From f956554f3a0d27a5d23f2d5b975a387efcf179a6 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sat, 8 Nov 2025 21:33:26 -0800 Subject: [PATCH 001/125] chore: increment crate versions to v0.13.0 --- CHANGELOG.md | 2 ++ Cargo.lock | 72 +++++++++++++++++++--------------------------------- Cargo.toml | 34 ++++++++++++------------- 3 files changed, 45 insertions(+), 63 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a4fb48200..aeaf7617d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ # Changelog +## v0.13.0 (TBD) + ## v0.12.1 (2025-11-08) - Added support for network transaction service in `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/miden-node/pull/1295)). diff --git a/Cargo.lock b/Cargo.lock index 9bf1ae26c4..6a274f0d94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,16 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "addr2line" version = "0.25.1" @@ -2481,9 +2471,8 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2f53a1c58ced1659a622b784daa5773f70b2aa7f9c4ad1c19b6d74baf2aeb3" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" dependencies = [ "miden-lib", "miden-objects", @@ -2579,11 +2568,9 @@ dependencies = [ [[package]] name = "miden-lib" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44abeb7f67252547fa54605e61e89256fa546e2f25de382ccac78a1ced07ff88" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" dependencies = [ - "Inflector", "fs-err", "miden-assembly", "miden-core", @@ -2652,7 +2639,7 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "axum", @@ -2678,7 +2665,7 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "clap 4.5.51", @@ -2698,7 +2685,7 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "assert_matches", @@ -2734,7 +2721,7 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.12.1" +version = "0.13.0" dependencies = [ "quote", "syn 2.0.109", @@ -2742,7 +2729,7 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "futures", @@ -2764,7 +2751,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "fs-err", @@ -2786,7 +2773,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.12.1" +version = "0.13.0" dependencies = [ "fs-err", "miette", @@ -2796,7 +2783,7 @@ dependencies = [ [[package]] name = "miden-node-rpc" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "futures", @@ -2828,7 +2815,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "assert_matches", @@ -2865,7 +2852,7 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.12.1" +version = "0.13.0" dependencies = [ "clap 4.5.51", "fs-err", @@ -2895,7 +2882,7 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "bytes", @@ -2924,7 +2911,7 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "miden-node-proto", @@ -2940,9 +2927,8 @@ dependencies = [ [[package]] name = "miden-objects" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef49472012d00be68f68dcbb13b4f28867fb6271c76c014bc555ef6e8e0958a" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2999,7 +2985,7 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.12.1" +version = "0.13.0" dependencies = [ "anyhow", "async-trait", @@ -3044,7 +3030,7 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.12.1" +version = "0.13.0" dependencies = [ "getrandom 0.3.4", "miden-node-proto-build", @@ -3079,9 +3065,8 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e64b086dfad33bc235d847cde20959d7b88d46d1da50d48ad2192321dbc2bcf" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3093,31 +3078,26 @@ dependencies = [ "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", - "thiserror 2.0.17", "winterfell", ] [[package]] name = "miden-tx" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d0ddcbaaa1002b954525212ebfa7ee097e6af9d26847b3d977b3ffa9f3edeb" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" dependencies = [ "miden-lib", "miden-objects", "miden-processor", "miden-prover", "miden-verifier", - "rand 0.9.2", "thiserror 2.0.17", - "tokio", ] [[package]] name = "miden-tx-batch-prover" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aab74fe8a2227aede2513f8a9dcde5268d3ae4debd5a904cf05992418bbd62d" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" dependencies = [ "miden-objects", "miden-tx", diff --git a/Cargo.toml b/Cargo.toml index 3e0acedc6d..57a06e7bcc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" rust-version = "1.90" -version = "0.12.1" +version = "0.13.0" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] @@ -36,25 +36,25 @@ opt-level = 2 [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.12" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.12" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.12" } -miden-node-proto = { path = "crates/proto", version = "0.12" } -miden-node-proto-build = { path = "proto", version = "0.12" } -miden-node-rpc = { path = "crates/rpc", version = "0.12" } -miden-node-store = { path = "crates/store", version = "0.12" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } +miden-node-proto = { path = "crates/proto", version = "0.13" } +miden-node-proto-build = { path = "proto", version = "0.13" } +miden-node-rpc = { path = "crates/rpc", version = "0.13" } +miden-node-store = { path = "crates/store", version = "0.13" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.12" } -miden-node-validator = { path = "crates/validator", version = "0.12" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.12" } +miden-node-utils = { path = "crates/utils", version = "0.13" } +miden-node-validator = { path = "crates/validator", version = "0.13" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.12" } -miden-lib = { version = "0.12" } -miden-objects = { default-features = false, version = "0.12" } -miden-testing = { version = "0.12" } -miden-tx = { default-features = false, version = "0.12" } -miden-tx-batch-prover = { version = "0.12" } +miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-lib = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-objects = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } +miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } +miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.19" } From 2cd7b4201486ed4597c7ffbc0fbb25a8500cb959 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sat, 8 Nov 2025 21:35:11 -0800 Subject: [PATCH 002/125] chore: remove release-plz.toml --- .release-plz.toml | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 .release-plz.toml diff --git a/.release-plz.toml b/.release-plz.toml deleted file mode 100644 index c3dfed33db..0000000000 --- a/.release-plz.toml +++ /dev/null @@ -1,6 +0,0 @@ -[workspace] -changelog_update = false # For now we have our own changelog. -release_always = true # Without the tracking PR, it would never trigger unless `true`. - -git_release_enable = false -git_tag_enable = false From 704dbae3fc7045a2ba64c15e79175e2fe4d21866 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 11 Nov 2025 14:27:15 -0300 Subject: [PATCH 003/125] chore: rename ProxyWorkerStatus::address to name (#1348) --- CHANGELOG.md | 2 ++ bin/network-monitor/assets/index.css | 2 +- bin/network-monitor/assets/index.html | 2 +- bin/network-monitor/src/status.rs | 4 ++-- .../src/generated/remote_prover.rs | 4 ++-- bin/remote-prover/src/proxy/health_check.rs | 2 +- bin/remote-prover/src/proxy/mod.rs | 6 +++--- bin/remote-prover/src/proxy/worker.rs | 20 +++++++++---------- crates/proto/src/generated/remote_prover.rs | 4 ++-- .../generated/nostd/remote_prover.rs | 4 ++-- .../generated/std/remote_prover.rs | 4 ++-- proto/proto/remote_prover.proto | 4 ++-- 12 files changed, 30 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aeaf7617d1..4670218934 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## v0.13.0 (TBD) +- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). + ## v0.12.1 (2025-11-08) - Added support for network transaction service in `miden-network-monitor` binary ([#1295](https://github.com/0xMiden/miden-node/pull/1295)). diff --git a/bin/network-monitor/assets/index.css b/bin/network-monitor/assets/index.css index b375f10e10..722aa61c75 100644 --- a/bin/network-monitor/assets/index.css +++ b/bin/network-monitor/assets/index.css @@ -383,7 +383,7 @@ body { font-family: "DM Mono", monospace; } -.worker-address { +.worker-name { font-weight: 500; color: #333; } diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index ec538d65fe..15d35016c1 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -212,7 +212,7 @@ Workers (${details.RemoteProverStatus.workers.length}): ${details.RemoteProverStatus.workers.map(worker => `
- ${worker.address} - + ${worker.name} - ${worker.version} - ${worker.status}
diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 9c88b5be3d..2ac897677f 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -130,7 +130,7 @@ pub struct RemoteProverStatusDetails { /// worker service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WorkerStatusDetails { - pub address: String, + pub name: String, pub version: String, pub status: Status, } @@ -175,7 +175,7 @@ impl From for WorkerStatusDetails { proto::remote_prover::WorkerHealthStatus::try_from(value.status).unwrap().into(); Self { - address: value.address, + name: value.name, version: value.version, status, } diff --git a/bin/remote-prover/src/generated/remote_prover.rs b/bin/remote-prover/src/generated/remote_prover.rs index 210b691537..4d8ae80285 100644 --- a/bin/remote-prover/src/generated/remote_prover.rs +++ b/bin/remote-prover/src/generated/remote_prover.rs @@ -28,9 +28,9 @@ pub struct Proof { /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/bin/remote-prover/src/proxy/health_check.rs b/bin/remote-prover/src/proxy/health_check.rs index 51192b7747..b583c09827 100644 --- a/bin/remote-prover/src/proxy/health_check.rs +++ b/bin/remote-prover/src/proxy/health_check.rs @@ -49,7 +49,7 @@ impl BackgroundService for LoadBalancerState { if let Err(ref reason) = status_result { error!( err = %reason, - worker.address = worker.address(), + worker.name = worker.name(), "Worker failed health check" ); } diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs index 2e3d532848..81290d73a9 100644 --- a/bin/remote-prover/src/proxy/mod.rs +++ b/bin/remote-prover/src/proxy/mod.rs @@ -352,7 +352,7 @@ impl RequestContext { /// Set the worker that will process the request fn set_worker(&mut self, worker: Worker) { - WORKER_REQUEST_COUNT.with_label_values(&[&worker.address()]).inc(); + WORKER_REQUEST_COUNT.with_label_values(&[&worker.name()]).inc(); self.worker = Some(worker); } } @@ -495,7 +495,7 @@ impl ProxyHttp for LoadBalancer { // Check if there is an available worker if let Some(worker) = self.0.pop_available_worker().await { - debug!("Worker {} picked up the request with ID: {}", worker.address(), request_id); + debug!("Worker {} picked up the request with ID: {}", worker.name(), request_id); ctx.set_worker(worker); break; } @@ -508,7 +508,7 @@ impl ProxyHttp for LoadBalancer { // Set SNI let mut http_peer = HttpPeer::new( - ctx.worker.clone().expect("Failed to get worker").address(), + ctx.worker.clone().expect("Failed to get worker").name(), false, String::new(), ); diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs index bf181c613a..aa418e8cb3 100644 --- a/bin/remote-prover/src/proxy/worker.rs +++ b/bin/remote-prover/src/proxy/worker.rs @@ -140,14 +140,14 @@ impl Worker { /// - `Ok(())` if the client was successfully created /// - `Err(RemoteProverError)` if the client creation failed async fn recreate_status_client(&mut self) -> Result<(), RemoteProverError> { - let address = self.address(); - match create_status_client(&address, self.connection_timeout, self.total_timeout).await { + let name = self.name(); + match create_status_client(&name, self.connection_timeout, self.total_timeout).await { Ok(client) => { self.status_client = Some(client); Ok(()) }, Err(err) => { - error!("Failed to recreate status client for worker {}: {}", address, err); + error!("Failed to recreate status client for worker {}: {}", name, err); Err(err) }, } @@ -170,7 +170,7 @@ impl Worker { if self.status_client.is_none() { match self.recreate_status_client().await { Ok(()) => { - info!("Successfully recreated status client for worker {}", self.address()); + info!("Successfully recreated status client for worker {}", self.name()); }, Err(err) => { return Err(err.as_report_context("failed to recreate status client")); @@ -181,7 +181,7 @@ impl Worker { let worker_status = match self.status_client.as_mut().unwrap().status(()).await { Ok(response) => response.into_inner(), Err(e) => { - error!("Failed to check worker status ({}): {}", self.address(), e); + error!("Failed to check worker status ({}): {}", self.name(), e); return Err(e.message().to_string()); }, }; @@ -198,7 +198,7 @@ impl Worker { let worker_supported_proof_type = ProofType::try_from(worker_status.supported_proof_type) .inspect_err(|err| { - error!(%err, address=%self.address(), "Failed to convert worker supported proof type"); + error!(%err, name=%self.name(), "Failed to convert worker supported proof type"); })?; if supported_proof_type != worker_supported_proof_type { @@ -271,8 +271,8 @@ impl Worker { self.is_available } - /// Returns the worker address. - pub fn address(&self) -> String { + /// Returns the worker name. + pub fn name(&self) -> String { self.backend.addr.to_string() } @@ -325,7 +325,7 @@ impl Worker { } }, WorkerHealthStatus::Unhealthy { .. } => { - WORKER_UNHEALTHY.with_label_values(&[&self.address()]).inc(); + WORKER_UNHEALTHY.with_label_values(&[&self.name()]).inc(); self.is_available = false; }, } @@ -349,7 +349,7 @@ impl From<&Worker> for ProxyWorkerStatus { fn from(worker: &Worker) -> Self { use miden_remote_prover::generated::remote_prover::WorkerHealthStatus as ProtoWorkerHealthStatus; Self { - address: worker.address(), + name: worker.name(), version: worker.version().to_string(), status: match worker.health_status() { WorkerHealthStatus::Healthy => ProtoWorkerHealthStatus::Healthy, diff --git a/crates/proto/src/generated/remote_prover.rs b/crates/proto/src/generated/remote_prover.rs index 210b691537..4d8ae80285 100644 --- a/crates/proto/src/generated/remote_prover.rs +++ b/crates/proto/src/generated/remote_prover.rs @@ -28,9 +28,9 @@ pub struct Proof { /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs index 53326a3fb2..65dd724603 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs @@ -28,9 +28,9 @@ pub struct Proof { /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs index 7f33a307f7..fdb3fcccf5 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs @@ -28,9 +28,9 @@ pub struct Proof { /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/proto/proto/remote_prover.proto b/proto/proto/remote_prover.proto index 49132fd6f1..e02a289e29 100644 --- a/proto/proto/remote_prover.proto +++ b/proto/proto/remote_prover.proto @@ -56,8 +56,8 @@ service ProxyStatusApi { // Status of an individual worker in the proxy. message ProxyWorkerStatus { - // The address of the worker. - string address = 1; + // The name of the worker. + string name = 1; // The version of the worker. string version = 2; // The health status of the worker. From fff81224eaf35b206604644e9c7a1ca25848af79 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 11 Nov 2025 23:00:59 +0100 Subject: [PATCH 004/125] chore: bump dependencies, make compile again (#1360) --- Cargo.lock | 133 +++++++++++++------------- bin/network-monitor/src/deploy/mod.rs | 7 +- crates/ntx-builder/src/transaction.rs | 8 +- crates/rpc/src/server/validator.rs | 6 +- 4 files changed, 79 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a274f0d94..309b2c04b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -196,7 +196,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -207,7 +207,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -684,7 +684,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -896,7 +896,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -939,7 +939,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -953,7 +953,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -964,7 +964,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -975,7 +975,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1067,7 +1067,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1077,7 +1077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1097,7 +1097,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1127,7 +1127,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1147,7 +1147,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1176,7 +1176,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1202,7 +1202,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1296,7 +1296,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1518,7 +1518,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -1808,9 +1808,9 @@ checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +checksum = "1744436df46f0bde35af3eda22aeaba453aada65d8f1c171cd8a5f59030bd69f" dependencies = [ "atomic-waker", "bytes", @@ -2157,7 +2157,7 @@ checksum = "980af8b43c3ad5d8d349ace167ec8170839f753a42d233ba19e08afe1850fa69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -2344,7 +2344,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -2472,7 +2472,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" dependencies = [ "miden-lib", "miden-objects", @@ -2536,7 +2536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2222f37355ea975f40acd3c098a437574a31a4d8a2c193cf4e9fead2beede577" dependencies = [ "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -2569,7 +2569,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" dependencies = [ "fs-err", "miden-assembly", @@ -2618,7 +2618,7 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.109", + "syn 2.0.110", "terminal_size 0.3.0", "textwrap", "thiserror 2.0.17", @@ -2634,7 +2634,7 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -2724,7 +2724,7 @@ name = "miden-node-grpc-error-macro" version = "0.13.0" dependencies = [ "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -2877,7 +2877,7 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -2928,7 +2928,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2938,6 +2938,7 @@ dependencies = [ "miden-crypto", "miden-mast-package", "miden-processor", + "miden-stdlib", "miden-utils-sync", "miden-verifier", "rand 0.9.2", @@ -3066,7 +3067,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3084,7 +3085,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" dependencies = [ "miden-lib", "miden-objects", @@ -3097,7 +3098,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#698363407d9aab564570647d49e87306df75cb52" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" dependencies = [ "miden-objects", "miden-tx", @@ -3187,7 +3188,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -3369,7 +3370,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -3488,7 +3489,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -3630,7 +3631,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -3675,7 +3676,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -4052,7 +4053,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -4105,7 +4106,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", "version_check", "yansi", ] @@ -4187,7 +4188,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.109", + "syn 2.0.110", "tempfile", ] @@ -4201,7 +4202,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -4291,9 +4292,9 @@ dependencies = [ [[package]] name = "pulldown-cmark-to-cmark" -version = "21.0.0" +version = "21.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" +checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" dependencies = [ "pulldown-cmark", ] @@ -4580,7 +4581,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.109", + "syn 2.0.110", "unicode-ident", ] @@ -4862,7 +4863,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -4964,7 +4965,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5094,9 +5095,9 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e4348c16a3d2e2a45437eff67efc5462b60443de76f61b5d0ed9111c626d9d" +checksum = "35c6d746902bca4ddf16592357eacf0473631ea26b36072f0dd0b31fa5ccd1f4" dependencies = [ "js-sys", "once_cell", @@ -5165,7 +5166,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5208,9 +5209,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.109" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", @@ -5234,7 +5235,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5358,7 +5359,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5369,7 +5370,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5469,7 +5470,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5662,7 +5663,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -5700,7 +5701,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.109", + "syn 2.0.110", "tempfile", "tonic-build", ] @@ -5833,7 +5834,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -6224,7 +6225,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", "wasm-bindgen-shared", ] @@ -6368,7 +6369,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -6379,7 +6380,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -6752,7 +6753,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -6869,7 +6870,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", "synstructure", ] @@ -6890,7 +6891,7 @@ checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] @@ -6910,7 +6911,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", "synstructure", ] @@ -6950,7 +6951,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.109", + "syn 2.0.110", ] [[package]] diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index d9be433a1e..d78828d440 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -283,8 +283,11 @@ impl DataStore for MonitorDataStore { }) } - async fn get_note_script(&self, script_root: Word) -> Result { - Err(DataStoreError::NoteScriptNotFound(script_root)) + async fn get_note_script( + &self, + _script_root: Word, + ) -> Result, DataStoreError> { + Ok(None) } } diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/transaction.rs index 8d5b1909f6..c0d89bc0da 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/transaction.rs @@ -388,7 +388,7 @@ impl DataStore for NtxDataStore { fn get_note_script( &self, script_root: Word, - ) -> impl FutureMaybeSend> { + ) -> impl FutureMaybeSend, DataStoreError>> { let store = self.store.clone(); let cache = self.script_cache.clone(); @@ -398,7 +398,7 @@ impl DataStore for NtxDataStore { let mut cache_guard = cache.lock().await; cache_guard.get(&script_root).cloned() } { - return Ok(cached_script); + return Ok(Some(cached_script)); } // Retrieve the script from the store. @@ -417,11 +417,11 @@ impl DataStore for NtxDataStore { cache_guard.put(script_root, script.clone()); } // Return script. - Ok(script) + Ok(Some(script)) }, None => { // Response did not contain the note script. - Err(DataStoreError::NoteScriptNotFound(script_root)) + Ok(None) }, } } diff --git a/crates/rpc/src/server/validator.rs b/crates/rpc/src/server/validator.rs index 2b6719c32a..b0e8e082ac 100644 --- a/crates/rpc/src/server/validator.rs +++ b/crates/rpc/src/server/validator.rs @@ -136,9 +136,9 @@ impl DataStore for TransactionInputsDataStore { fn get_note_script( &self, - script_root: Word, - ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::NoteScriptNotFound(script_root)) } + _script_root: Word, + ) -> impl FutureMaybeSend, DataStoreError>> { + async move { Ok(None) } } } From e693d57e3b9086e64b84dd54cae47e48efcbff26 Mon Sep 17 00:00:00 2001 From: igamigo Date: Wed, 12 Nov 2025 04:53:05 -0300 Subject: [PATCH 005/125] feat: fetch commitments only (#1347) --- crates/store/src/db/mod.rs | 11 +++--- crates/store/src/db/models/queries/notes.rs | 44 ++++++++++++--------- crates/store/src/state.rs | 7 +--- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index a7701a0608..5bbe0be4a4 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::ops::RangeInclusive; use std::path::PathBuf; @@ -455,15 +455,14 @@ impl Db { .await } - /// Loads all the [`NoteRecord`]s matching a certain note commitment from the - /// database. + /// Returns all note commitments from the DB that match the provided ones. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_notes_by_commitment( + pub async fn select_existing_note_commitments( &self, note_commitments: Vec, - ) -> Result> { + ) -> Result> { self.transact("note by commitment", move |conn| { - queries::select_notes_by_commitment(conn, note_commitments.as_slice()) + queries::select_existing_note_commitments(conn, note_commitments.as_slice()) }) .await } diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 01c981dc18..a5c2ffc2da 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -3,7 +3,7 @@ reason = "We will not approach the item count where i64 and usize cause issues" )] -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::ops::RangeInclusive; use diesel::prelude::{ @@ -217,26 +217,34 @@ pub(crate) fn select_notes_by_id( Ok(records) } -pub(crate) fn select_notes_by_commitment( +/// Select the subset of note commitments that already exist in the notes table +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// notes.note_commitment +/// FROM notes +/// WHERE note_commitment IN (?1) +/// ``` +pub(crate) fn select_existing_note_commitments( conn: &mut SqliteConnection, note_commitments: &[Word], -) -> Result, DatabaseError> { +) -> Result, DatabaseError> { + QueryParamNoteCommitmentLimit::check(note_commitments.len())?; + let note_commitments = serialize_vec(note_commitments.iter()); - let q = schema::notes::table - .left_join( - schema::note_scripts::table - .on(schema::notes::script_root.eq(schema::note_scripts::script_root.nullable())), - ) - .filter(schema::notes::note_commitment.eq_any(¬e_commitments)); - let raw: Vec<_> = SelectDsl::select( - q, - (NoteRecordRawRow::as_select(), schema::note_scripts::script.nullable()), - ) - .load::<(NoteRecordRawRow, Option>)>(conn)?; - let records = vec_raw_try_into::( - raw.into_iter().map(NoteRecordWithScriptRawJoined::from), - )?; - Ok(records) + + let raw_commitments = SelectDsl::select(schema::notes::table, schema::notes::note_commitment) + .filter(schema::notes::note_commitment.eq_any(¬e_commitments)) + .load::>(conn)?; + + let commitments = raw_commitments + .into_iter() + .map(|commitment| Word::read_from_bytes(&commitment[..])) + .collect::, _>>()?; + + Ok(commitments) } /// Select all notes from the DB using the given [`SqliteConnection`]. diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index b94e57bdb2..9c600c61ed 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -892,11 +892,8 @@ impl State { let found_unauthenticated_notes = self .db - .select_notes_by_commitment(unauthenticated_note_commitments) - .await? - .into_iter() - .map(|note| note.note_commitment) - .collect(); + .select_existing_note_commitments(unauthenticated_note_commitments) + .await?; Ok(TransactionInputs { account_commitment, From 548710ddc8cd2c9b0a150a9adbb6519743fca701 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Wed, 12 Nov 2025 01:40:31 -0800 Subject: [PATCH 006/125] chore: refresh Cargo.lock file --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 309b2c04b0..b18dd152b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2263,9 +2263,9 @@ dependencies = [ [[package]] name = "libz-ng-sys" -version = "1.1.22" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7118c2c2a3c7b6edc279a8b19507672b9c4d716f95e671172dfa4e23f9fd824" +checksum = "7bf914b7dd154ca9193afec311d8e39345c1bd93b48b3faa77329f0db8f553c0" dependencies = [ "cmake", "libc", @@ -2472,7 +2472,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" dependencies = [ "miden-lib", "miden-objects", @@ -2569,7 +2569,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" dependencies = [ "fs-err", "miden-assembly", @@ -2928,7 +2928,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" dependencies = [ "bech32", "getrandom 0.3.4", @@ -3067,7 +3067,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3085,7 +3085,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" dependencies = [ "miden-lib", "miden-objects", @@ -3098,7 +3098,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#c532ae91fdf711ac2f8b253342c253ee2eac0bed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" dependencies = [ "miden-objects", "miden-tx", From 3eef7f70154035c1206571f58e38548d348e6378 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 12 Nov 2025 13:40:02 +0100 Subject: [PATCH 007/125] refactor: get rid of `trait AccountTreeStorage` (#1361) --- CHANGELOG.md | 1 + .../store/benches/account_tree_historical.rs | 6 +- crates/store/src/accounts/mod.rs | 88 ++----------------- crates/store/src/accounts/tests.rs | 3 +- crates/store/src/lib.rs | 7 +- crates/store/src/state.rs | 6 +- 6 files changed, 14 insertions(+), 97 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4670218934..6e536294e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## v0.13.0 (TBD) - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). +- Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). ## v0.12.1 (2025-11-08) diff --git a/crates/store/benches/account_tree_historical.rs b/crates/store/benches/account_tree_historical.rs index dbb538d5a4..e038933639 100644 --- a/crates/store/benches/account_tree_historical.rs +++ b/crates/store/benches/account_tree_historical.rs @@ -1,7 +1,7 @@ use std::hint::black_box; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; -use miden_node_store::{AccountTreeWithHistory, InMemoryAccountTree}; +use miden_node_store::AccountTreeWithHistory; use miden_objects::Word; use miden_objects::account::AccountId; use miden_objects::block::BlockNumber; @@ -70,7 +70,7 @@ fn setup_vanilla_account_tree( fn setup_account_tree_with_history( num_accounts: usize, num_blocks: usize, -) -> (AccountTreeWithHistory, Vec) { +) -> (AccountTreeWithHistory, Vec) { let mut seed = [0u8; 32]; let storage = setup_storage(); let smt = LargeSmt::with_entries(storage, std::iter::empty()) @@ -164,7 +164,7 @@ fn bench_historical_access(c: &mut Criterion) { for &num_accounts in &account_counts { for &block_depth in &block_depths { - if block_depth > AccountTreeWithHistory::::MAX_HISTORY { + if block_depth > AccountTreeWithHistory::::MAX_HISTORY { continue; } diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index bf18b815a0..71131a6151 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -27,78 +27,6 @@ mod tests; /// Convenience for an in-memory-only account tree. pub type InMemoryAccountTree = AccountTree>; -// ACCOUNT TREE STORAGE TRAIT -// ================================================================================================ - -/// Trait abstracting operations over different account tree backends. -pub trait AccountTreeStorage { - /// Returns the root hash of the tree. - fn root(&self) -> Word; - - /// Returns the number of accounts in the tree. - fn num_accounts(&self) -> usize; - - /// Opens an account and returns its witness. - fn open(&self, account_id: AccountId) -> AccountWitness; - - /// Gets the account state commitment. - fn get(&self, account_id: AccountId) -> Word; - - /// Computes mutations for applying account updates. - fn compute_mutations( - &self, - accounts: impl IntoIterator, - ) -> Result; - - /// Applies mutations with reversion data. - fn apply_mutations_with_reversion( - &mut self, - mutations: AccountMutationSet, - ) -> Result; - - /// Checks if the tree contains an account with the given prefix. - fn contains_account_id_prefix(&self, prefix: AccountIdPrefix) -> bool; -} - -impl AccountTreeStorage for AccountTree> -where - S: SmtStorage, -{ - fn root(&self) -> Word { - self.root() - } - - fn num_accounts(&self) -> usize { - self.num_accounts() - } - - fn open(&self, account_id: AccountId) -> AccountWitness { - self.open(account_id) - } - - fn get(&self, account_id: AccountId) -> Word { - self.get(account_id) - } - - fn compute_mutations( - &self, - accounts: impl IntoIterator, - ) -> Result { - self.compute_mutations(accounts) - } - - fn apply_mutations_with_reversion( - &mut self, - mutations: AccountMutationSet, - ) -> Result { - self.apply_mutations_with_reversion(mutations) - } - - fn contains_account_id_prefix(&self, prefix: AccountIdPrefix) -> bool { - self.contains_account_id_prefix(prefix) - } -} - // HISTORICAL ERROR TYPES // ================================================================================================ @@ -178,23 +106,17 @@ impl HistoricalOverlay { /// This structure maintains a sliding window of historical account states by storing /// reversion data (mutations that undo changes). Historical witnesses are reconstructed /// by starting from the latest state and applying reversion overlays backwards in time. -#[derive(Debug, Clone)] -pub struct AccountTreeWithHistory -where - S: AccountTreeStorage, -{ +#[derive(Debug)] +pub struct AccountTreeWithHistory { /// The current block number (latest state). block_number: BlockNumber, /// The latest account tree state. - latest: S, + latest: AccountTree>, /// Historical overlays indexed by block number, storing reversion data. overlays: BTreeMap, } -impl AccountTreeWithHistory -where - S: AccountTreeStorage, -{ +impl AccountTreeWithHistory { /// Maximum number of historical blocks to maintain. pub const MAX_HISTORY: usize = 33; @@ -202,7 +124,7 @@ where // -------------------------------------------------------------------------------------------- /// Creates a new historical tree starting at the given block number. - pub fn new(account_tree: S, block_number: BlockNumber) -> Self { + pub fn new(account_tree: AccountTree>, block_number: BlockNumber) -> Self { Self { block_number, latest: account_tree, diff --git a/crates/store/src/accounts/tests.rs b/crates/store/src/accounts/tests.rs index 9d90e975e0..fff57902a1 100644 --- a/crates/store/src/accounts/tests.rs +++ b/crates/store/src/accounts/tests.rs @@ -84,8 +84,7 @@ mod account_tree_with_history_tests { #[test] fn test_history_limits() { - const MAX_HIST: u32 = - AccountTreeWithHistory::>>::MAX_HISTORY as u32; + const MAX_HIST: u32 = AccountTreeWithHistory::::MAX_HISTORY as u32; use assert_matches::assert_matches; let id = AccountIdBuilder::new().build_with_seed([30; 32]); diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index d50f124f73..ce49564703 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -8,12 +8,7 @@ pub mod genesis; mod server; pub mod state; -pub use accounts::{ - AccountTreeStorage, - AccountTreeWithHistory, - HistoricalError, - InMemoryAccountTree, -}; +pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; pub use server::{DataDirectory, Store}; diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 9c600c61ed..11c5be1c12 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -77,7 +77,7 @@ use crate::errors::{ StateInitializationError, StateSyncError, }; -use crate::{AccountTreeWithHistory, COMPONENT, DataDirectory, InMemoryAccountTree}; +use crate::{AccountTreeWithHistory, COMPONENT, DataDirectory}; // STRUCTURES // ================================================================================================ @@ -97,7 +97,7 @@ where { nullifier_tree: NullifierTree, blockchain: Blockchain, - account_tree: AccountTreeWithHistory>>, + account_tree: AccountTreeWithHistory, } impl InnerState @@ -1127,7 +1127,7 @@ async fn load_mmr(db: &mut Db) -> Result { async fn load_account_tree( db: &mut Db, block_number: BlockNumber, -) -> Result, StateInitializationError> { +) -> Result, StateInitializationError> { let account_data = db.select_all_account_commitments().await?.into_iter().collect::>(); // Convert account_data to use account_id_to_smt_key From ab85e38c776b9a4bff6d7d147c3ae77d40a8d25f Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sat, 15 Nov 2025 20:37:18 -0800 Subject: [PATCH 008/125] chore: refresh Cargo.lock --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eab6872402..650f50fbd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2472,7 +2472,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" dependencies = [ "miden-lib", "miden-objects", @@ -2569,7 +2569,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" dependencies = [ "fs-err", "miden-assembly", @@ -2929,7 +2929,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" dependencies = [ "bech32", "getrandom 0.3.4", @@ -3068,7 +3068,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3086,7 +3086,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" dependencies = [ "miden-lib", "miden-objects", @@ -3099,7 +3099,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#31a1f22bc5ccb17c1e1a4c55a892a4b700ca4a3e" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" dependencies = [ "miden-objects", "miden-tx", From 2b00852f81534f47ce45953f43f8407835a78870 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 17 Nov 2025 03:44:25 -0300 Subject: [PATCH 009/125] chore(breaking): sync transactions follow ups (#1357) - Normalized response size limits to 4MB - Standardized naming in protobuf schema --- CHANGELOG.md | 2 + crates/proto/src/generated/rpc_store.rs | 6 +-- crates/proto/src/generated/transaction.rs | 2 +- crates/store/src/constants.rs | 16 +++++++ .../db/migrations/2025062000000_setup/up.sql | 2 +- crates/store/src/db/mod.rs | 8 ++-- .../store/src/db/models/queries/accounts.rs | 3 +- .../store/src/db/models/queries/nullifiers.rs | 3 +- .../src/db/models/queries/transactions.rs | 18 +++---- crates/store/src/db/schema.rs | 2 +- crates/store/src/lib.rs | 1 + crates/store/src/server/rpc_api.rs | 47 ++++++++++++------- proto/proto/store/rpc.proto | 4 +- proto/proto/types/transaction.proto | 2 +- 14 files changed, 72 insertions(+), 44 deletions(-) create mode 100644 crates/store/src/constants.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index b4e3e63737..d58a3f080f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## v0.13.0 (TBD) +- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). +- Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). diff --git a/crates/proto/src/generated/rpc_store.rs b/crates/proto/src/generated/rpc_store.rs index 187f559ef0..9feea358c3 100644 --- a/crates/proto/src/generated/rpc_store.rs +++ b/crates/proto/src/generated/rpc_store.rs @@ -449,7 +449,7 @@ pub struct SyncTransactionsResponse { pub pagination_info: ::core::option::Option, /// List of transaction records. #[prost(message, repeated, tag = "2")] - pub transaction_records: ::prost::alloc::vec::Vec, + pub transactions: ::prost::alloc::vec::Vec, } /// Represents a transaction record. #[derive(Clone, PartialEq, ::prost::Message)] @@ -459,9 +459,7 @@ pub struct TransactionRecord { pub block_num: u32, /// A transaction header. #[prost(message, optional, tag = "2")] - pub transaction_header: ::core::option::Option< - super::transaction::TransactionHeader, - >, + pub header: ::core::option::Option, } /// Generated client implementations. pub mod rpc_client { diff --git a/crates/proto/src/generated/transaction.rs b/crates/proto/src/generated/transaction.rs index e02a636365..3b9f27a40b 100644 --- a/crates/proto/src/generated/transaction.rs +++ b/crates/proto/src/generated/transaction.rs @@ -52,7 +52,7 @@ pub struct TransactionHeader { pub final_state_commitment: ::core::option::Option, /// Nullifiers of the input notes of the transaction. #[prost(message, repeated, tag = "4")] - pub input_notes: ::prost::alloc::vec::Vec, + pub nullifiers: ::prost::alloc::vec::Vec, /// Output notes of the transaction. #[prost(message, repeated, tag = "5")] pub output_notes: ::prost::alloc::vec::Vec, diff --git a/crates/store/src/constants.rs b/crates/store/src/constants.rs new file mode 100644 index 0000000000..ba2dc620fd --- /dev/null +++ b/crates/store/src/constants.rs @@ -0,0 +1,16 @@ +//! Constants used for pagination and size limits across the store. + +/// Maximum number of account IDs that can be requested in a single query. +pub const MAX_ACCOUNT_IDS: usize = 100; + +/// Maximum number of nullifiers that can be requested in a single query. +pub const MAX_NULLIFIERS: usize = 100; + +/// Maximum number of note tags that can be requested in a single query. +pub const MAX_NOTE_TAGS: usize = 100; + +/// Maximum number of note IDs that can be requested in a single query. +pub const MAX_NOTE_IDS: usize = 100; + +/// Maximum payload size for all paginated endpoints (4 MB). +pub const MAX_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 7235ad1bea..0b8d0744d2 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -126,7 +126,7 @@ CREATE TABLE transactions ( block_num INTEGER NOT NULL, -- Block number in which the transaction was included. initial_state_commitment BLOB NOT NULL, -- State of the account before applying the transaction. final_state_commitment BLOB NOT NULL, -- State of the account after applying the transaction. - input_notes BLOB NOT NULL, -- Serialized vector with the Nullifier of the input notes. + nullifiers BLOB NOT NULL, -- Serialized vector with the Nullifier of the input notes. output_notes BLOB NOT NULL, -- Serialized vector with the NoteId of the output notes. size_in_bytes INTEGER NOT NULL, -- Estimated size of the row in bytes, considering the size of the input and output notes. diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 5bbe0be4a4..8515327f61 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -100,8 +100,8 @@ pub struct TransactionRecord { pub account_id: AccountId, pub initial_state_commitment: Word, pub final_state_commitment: Word, - pub input_notes: Vec, // Store nullifiers for input notes - pub output_notes: Vec, // Store note IDs for output notes + pub nullifiers: Vec, // Store nullifiers for input notes + pub output_notes: Vec, // Store note IDs for output notes } impl TransactionRecord { @@ -116,11 +116,11 @@ impl TransactionRecord { note_records.into_iter().map(Into::into).collect(); proto::rpc_store::TransactionRecord { - transaction_header: Some(proto::transaction::TransactionHeader { + header: Some(proto::transaction::TransactionHeader { account_id: Some(self.account_id.into()), initial_state_commitment: Some(self.initial_state_commitment.into()), final_state_commitment: Some(self.final_state_commitment.into()), - input_notes: self.input_notes.into_iter().map(From::from).collect(), + nullifiers: self.nullifiers.into_iter().map(From::from).collect(), output_notes, }), block_num: self.block_num.as_u32(), diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index b658d92752..a1e4452b1e 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -35,6 +35,7 @@ use miden_objects::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_objects::block::{BlockAccountUpdate, BlockNumber}; use miden_objects::{Felt, Word}; +use crate::constants::MAX_PAYLOAD_BYTES; use crate::db::models::conv::{ SqlTypeConvert, nonce_to_raw_sql, @@ -211,7 +212,6 @@ pub(crate) fn select_account_vault_assets( use schema::account_vault_assets as t; // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details - const MAX_PAYLOAD_BYTES: usize = 2 * 1024 * 1024; // 2 MB const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::(); // key + asset + block_num const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; @@ -426,7 +426,6 @@ pub(crate) fn select_account_storage_map_values( // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details - pub const MAX_PAYLOAD_BYTES: usize = 2 * 1024 * 1024; // 2 MB pub const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index a81ca050e2..95e77ab46f 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -21,6 +21,7 @@ use miden_objects::block::BlockNumber; use miden_objects::note::Nullifier; use super::DatabaseError; +use crate::constants::MAX_PAYLOAD_BYTES; use crate::db::models::conv::{SqlTypeConvert, nullifier_prefix_to_raw_sql}; use crate::db::models::utils::{get_nullifier_prefix, vec_raw_try_into}; use crate::db::{NullifierInfo, schema}; @@ -65,8 +66,6 @@ pub(crate) fn select_nullifiers_by_prefix( block_range: RangeInclusive, ) -> Result<(Vec, BlockNumber), DatabaseError> { // Size calculation: max 2^16 nullifiers per block × 36 bytes per nullifier = ~2.25MB - // We use 2.5MB to provide a safety margin for the unlikely case of hitting the maximum - pub const MAX_PAYLOAD_BYTES: usize = 2_500_000; // 2.5 MB - allows for max block size of ~2.25MB pub const NULLIFIER_BYTES: usize = 32; // digest size (nullifier) pub const BLOCK_NUM_BYTES: usize = 4; // 32 bits per block number pub const ROW_OVERHEAD_BYTES: usize = NULLIFIER_BYTES + BLOCK_NUM_BYTES; // 36 bytes diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index b011c2d72d..7b5caf5e73 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -93,7 +93,7 @@ pub struct TransactionRecordRaw { transaction_id: Vec, initial_state_commitment: Vec, final_state_commitment: Vec, - input_notes: Vec, + nullifiers: Vec, output_notes: Vec, size_in_bytes: i64, } @@ -117,11 +117,11 @@ impl TryInto for TransactionRecordRaw { let initial_state_commitment = self.initial_state_commitment; let final_state_commitment = self.final_state_commitment; - let input_notes_binary = self.input_notes; + let nullifiers_binary = self.nullifiers; let output_notes_binary = self.output_notes; // Deserialize input notes as nullifiers and output notes as note IDs - let input_notes: Vec = Deserializable::read_from_bytes(&input_notes_binary)?; + let nullifiers: Vec = Deserializable::read_from_bytes(&nullifiers_binary)?; let output_notes: Vec = Deserializable::read_from_bytes(&output_notes_binary)?; Ok(crate::db::TransactionRecord { @@ -130,7 +130,7 @@ impl TryInto for TransactionRecordRaw { transaction_id: TransactionId::read_from_bytes(&self.transaction_id[..])?, initial_state_commitment: Word::read_from_bytes(&initial_state_commitment)?, final_state_commitment: Word::read_from_bytes(&final_state_commitment)?, - input_notes, + nullifiers, output_notes, }) } @@ -171,7 +171,7 @@ pub struct TransactionSummaryRowInsert { block_num: i64, initial_state_commitment: Vec, final_state_commitment: Vec, - input_notes: Vec, + nullifiers: Vec, output_notes: Vec, size_in_bytes: i64, } @@ -190,7 +190,7 @@ impl TransactionSummaryRowInsert { const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments // Serialize input notes using binary format (store nullifiers) - let input_notes_binary = transaction_header.input_notes().to_bytes(); + let nullifiers_binary = transaction_header.input_notes().to_bytes(); // Serialize output notes using binary format (store note IDs) let output_notes_binary = transaction_header.output_notes().to_bytes(); @@ -206,9 +206,9 @@ impl TransactionSummaryRowInsert { // // Note: 500 bytes per output note is an over-estimate but ensures we don't // exceed memory limits when these transactions are later converted to proto records. - let input_notes_size = (transaction_header.input_notes().num_notes() * 32) as usize; + let nullifiers_size = (transaction_header.input_notes().num_notes() * 32) as usize; let output_notes_size = transaction_header.output_notes().len() * 500; - let size_in_bytes = (HEADER_BASE_SIZE + input_notes_size + output_notes_size) as i64; + let size_in_bytes = (HEADER_BASE_SIZE + nullifiers_size + output_notes_size) as i64; Self { transaction_id: transaction_header.id().to_bytes(), @@ -216,7 +216,7 @@ impl TransactionSummaryRowInsert { block_num: block_num.to_raw_sql(), initial_state_commitment: transaction_header.initial_state_commitment().to_bytes(), final_state_commitment: transaction_header.final_state_commitment().to_bytes(), - input_notes: input_notes_binary, + nullifiers: nullifiers_binary, output_notes: output_notes_binary, size_in_bytes, } diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 9fadd01751..a00f63c98f 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -93,7 +93,7 @@ diesel::table! { block_num -> BigInt, initial_state_commitment -> Binary, final_state_commitment -> Binary, - input_notes -> Binary, + nullifiers -> Binary, output_notes -> Binary, size_in_bytes -> BigInt, } diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index ce49564703..a9974fa7ca 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -2,6 +2,7 @@ use std::time::Duration; mod accounts; mod blocks; +mod constants; mod db; mod errors; pub mod genesis; diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index e1c923e27f..54df4a70d1 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -9,6 +9,7 @@ use tonic::{Request, Response, Status}; use tracing::{debug, info, instrument}; use crate::COMPONENT; +use crate::constants::{MAX_ACCOUNT_IDS, MAX_NOTE_IDS, MAX_NOTE_TAGS, MAX_NULLIFIERS}; use crate::errors::{ CheckNullifiersError, GetBlockByNumberError, @@ -31,14 +32,6 @@ use crate::server::api::{ validate_nullifiers, }; -// CONSTANTS -// ================================================================================================ - -const MAX_ACCOUNT_IDS: usize = 100; -const MAX_NULLIFIERS: usize = 100; -const MAX_NOTE_TAGS: usize = 100; -const MAX_NOTE_IDS: usize = 100; - // CLIENT ENDPOINTS // ================================================================================================ @@ -566,20 +559,40 @@ impl rpc_server::Rpc for StoreApi { .await .map_err(SyncTransactionsError::from)?; + // Collect all note IDs from all transactions to make a single query + let all_notes_ids = transaction_records_db + .iter() + .flat_map(|tx| tx.output_notes.iter()) + .copied() + .collect::>(); + + // Retrieve all note data in a single query + let all_note_records = self + .state + .get_notes_by_id(all_notes_ids) + .await + .map_err(SyncTransactionsError::from)?; + + // Create a map from note ID to note record for efficient lookup + let note_map: std::collections::HashMap<_, _> = all_note_records + .into_iter() + .map(|note_record| (note_record.note_id, note_record)) + .collect(); + // Convert database TransactionRecord to proto TransactionRecord - let mut transaction_records = Vec::with_capacity(transaction_records_db.len()); + let mut transactions = Vec::with_capacity(transaction_records_db.len()); for tx_header in transaction_records_db { - // Retrieve full note data for output notes from the database - let note_records = self - .state - .get_notes_by_id(tx_header.output_notes.clone()) - .await - .map_err(SyncTransactionsError::from)?; + // Get note records for this transaction's output notes + let note_records: Vec<_> = tx_header + .output_notes + .iter() + .filter_map(|note_id| note_map.get(¬e_id.into()).cloned()) + .collect(); // Convert to proto using the helper method let proto_record = tx_header.into_proto_with_note_records(note_records); - transaction_records.push(proto_record); + transactions.push(proto_record); } Ok(Response::new(proto::rpc_store::SyncTransactionsResponse { @@ -587,7 +600,7 @@ impl rpc_server::Rpc for StoreApi { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), - transaction_records, + transactions, })) } } diff --git a/proto/proto/store/rpc.proto b/proto/proto/store/rpc.proto index 1fc3e1936e..6f78444c78 100644 --- a/proto/proto/store/rpc.proto +++ b/proto/proto/store/rpc.proto @@ -496,7 +496,7 @@ message SyncTransactionsResponse { PaginationInfo pagination_info = 1; // List of transaction records. - repeated TransactionRecord transaction_records = 2; + repeated TransactionRecord transactions = 2; } // Represents a transaction record. @@ -505,5 +505,5 @@ message TransactionRecord { fixed32 block_num = 1; // A transaction header. - transaction.TransactionHeader transaction_header = 2; + transaction.TransactionHeader header = 2; } diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index a600e6327a..86955e83b9 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -54,7 +54,7 @@ message TransactionHeader { primitives.Digest final_state_commitment = 3; // Nullifiers of the input notes of the transaction. - repeated primitives.Digest input_notes = 4; + repeated primitives.Digest nullifiers = 4; // Output notes of the transaction. repeated note.NoteSyncRecord output_notes = 5; From f94ff09a0b59b104df3fee289cd9ad9b9e67e7ab Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 17 Nov 2025 09:00:55 -0300 Subject: [PATCH 010/125] test: add a regression test for #1312 (#1359) --- Cargo.lock | 1 + crates/ntx-builder/Cargo.toml | 3 + crates/ntx-builder/src/state/account.rs | 1 + crates/ntx-builder/src/state/mod.rs | 25 ++++++++ crates/ntx-builder/src/state/tests.rs | 84 +++++++++++++++++++++++++ 5 files changed, 114 insertions(+) create mode 100644 crates/ntx-builder/src/state/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 650f50fbd1..27dcc64731 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2735,6 +2735,7 @@ dependencies = [ "anyhow", "futures", "lru 0.16.2", + "miden-lib", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 7eefab8e49..1a2f0d9e1e 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -30,5 +30,8 @@ tracing = { workspace = true } url = { workspace = true } [dev-dependencies] +miden-lib = { workspace = true } miden-node-test-macro = { path = "../test-macro" } +miden-node-utils = { features = ["testing"], workspace = true } +miden-objects = { default-features = true, features = ["testing"], workspace = true } rstest = { workspace = true } diff --git a/crates/ntx-builder/src/state/account.rs b/crates/ntx-builder/src/state/account.rs index 56af83b427..7ca410fb87 100644 --- a/crates/ntx-builder/src/state/account.rs +++ b/crates/ntx-builder/src/state/account.rs @@ -197,6 +197,7 @@ impl AccountState { // in case it's transaction wasn't available in the first place. // It shouldn't happen practically, since we skip them if the // relevant account cannot be retrieved via `fetch`. + let _ = self.nullified_notes.remove(&nullifier); } diff --git a/crates/ntx-builder/src/state/mod.rs b/crates/ntx-builder/src/state/mod.rs index fbdb8ea563..beb27b141a 100644 --- a/crates/ntx-builder/src/state/mod.rs +++ b/crates/ntx-builder/src/state/mod.rs @@ -20,6 +20,9 @@ use crate::store::{StoreClient, StoreError}; mod account; +#[cfg(test)] +mod tests; + // CONSTANTS // ================================================================================================= @@ -130,6 +133,28 @@ impl State { Ok(state) } + /// Creates a new State for testing purposes with minimal setup. + /// + /// This bypasses the need for a real store connection and provides a mock state + /// that can be used in unit tests. + #[cfg(test)] + pub(crate) fn new_for_testing( + chain_tip_header: BlockHeader, + chain_mmr: PartialBlockchain, + store: StoreClient, + ) -> Self { + Self { + chain_tip_header, + chain_mmr, + store, + accounts: HashMap::default(), + queue: VecDeque::default(), + in_progress: HashSet::default(), + inflight_txs: BTreeMap::default(), + nullifier_idx: BTreeMap::default(), + } + } + /// Selects the next candidate network transaction. /// /// Note that this marks the candidate account as in-progress and that it cannot be selected diff --git a/crates/ntx-builder/src/state/tests.rs b/crates/ntx-builder/src/state/tests.rs new file mode 100644 index 0000000000..d41a322b27 --- /dev/null +++ b/crates/ntx-builder/src/state/tests.rs @@ -0,0 +1,84 @@ +use std::collections::HashSet; + +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::fee::test_fee_params; +use miden_objects::Word; +use miden_objects::block::BlockHeader; +use miden_objects::note::Nullifier; +use miden_objects::transaction::{PartialBlockchain, TransactionId}; + +use crate::state::State; +use crate::store::StoreClient; + +/// Helper function to create a mock State for testing without needing a real store. +fn create_mock_state() -> State { + // Create a minimal genesis block header + let chain_tip_header = BlockHeader::new( + 1_u8.into(), // version + Word::default(), // prev_hash + 0_u32.into(), // block_num (genesis) + Word::default(), // chain_root + Word::default(), // account_root + Word::default(), // nullifier_root + Word::default(), // note_root + Word::default(), // tx_hash + Word::default(), // kernel_root + Word::default(), // proof_hash + test_fee_params(), // fee_parameters + 0_u32, // timestamp + ); + + // Create an empty partial blockchain + let chain_mmr = PartialBlockchain::default(); + // Create a mock store client (it won't be used in this test) + let store = StoreClient::new("http://localhost:9999".parse().unwrap()); + + State::new_for_testing(chain_tip_header, chain_mmr, store) +} + +/// Regression test for issue #1312 +/// +/// This test verifies that the `NtxBuilder`'s state handling correctly processes transactions +/// that contain nullifiers without corresponding network notes. This scenario can occur when: +/// - A transaction consumes a non-network note (e.g., a private note) +/// - The nullifier is included in the transaction but is not tracked by the `NtxBuilder` +/// +/// The test ensures... +/// 1. such transactions are accepted +/// 2. the state remains consistent after processing +/// 3. the nullifier is skipped, since it has no corresponding note +/// 4. subsequent operations continue to work correctly +#[tokio::test] +async fn issue_1312_nullifier_without_note() { + let mut state = create_mock_state(); + + let initial_chain_tip = state.chain_tip(); + + let tx_id = + TransactionId::new(Word::default(), Word::default(), Word::default(), Word::default()); + let nullifier = + Nullifier::new(Word::default(), Word::default(), Word::default(), Word::default()); + + // Add transaction with nullifier but no network notes. + let add_event = MempoolEvent::TransactionAdded { + id: tx_id, + nullifiers: vec![nullifier], + network_notes: vec![], + account_delta: None, + }; + + state.mempool_update(add_event).await.unwrap(); + + assert_eq!(state.chain_tip(), initial_chain_tip); + + // Verify state integrity. + let candidate = state.select_candidate(std::num::NonZeroUsize::new(10).unwrap()); + assert!(candidate.is_none()); + + // Revert transaction. + let revert_event = + MempoolEvent::TransactionsReverted(std::iter::once(tx_id).collect::>()); + state.mempool_update(revert_event).await.unwrap(); + + assert_eq!(state.chain_tip(), initial_chain_tip); +} From 07bf3ca50304682e878acb9e9fd5445ed6676d9b Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 19 Nov 2025 12:53:06 -0300 Subject: [PATCH 011/125] docs: pin mdbooks version (#1372) --- .github/workflows/book.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index f93d105e1a..2806838def 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -42,7 +42,7 @@ jobs: - name: Install mdbook and plugins uses: taiki-e/install-action@v2 with: - tool: mdbook, mdbook-linkcheck, mdbook-alerts, mdbook-katex + tool: mdbook@0.4, mdbook-linkcheck@0.7, mdbook-alerts@0.8, mdbook-katex@0.9 - name: Build book run: mdbook build docs/internal/ From 4738a50d7fedda7f01ebcb19abbbb8028daa649f Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 20 Nov 2025 09:57:26 +0200 Subject: [PATCH 012/125] fix: gRPC client ignores genesis if version is unset (#1370) This fixes an issue with the gRPC client builder where the content negotiation header was only set if the RPC version was specified. This meant that if the version was unset but the genesis commitment was set, that no header was written and the genesis commitment was ignored. --- .github/workflows/test.yml | 3 + CHANGELOG.md | 6 + bin/network-monitor/src/counter.rs | 5 +- bin/network-monitor/src/deploy/mod.rs | 5 +- bin/network-monitor/src/monitor/tasks.rs | 12 +- bin/network-monitor/src/remote_prover.rs | 1 + bin/network-monitor/src/status.rs | 12 +- crates/block-producer/src/store/mod.rs | 5 +- crates/ntx-builder/src/block_producer.rs | 9 +- crates/ntx-builder/src/store.rs | 5 +- crates/proto/src/clients/mod.rs | 432 ++++++++++++++--------- crates/rpc/src/server/accept.rs | 2 +- crates/rpc/src/server/api.rs | 14 +- crates/rpc/src/tests.rs | 8 +- 14 files changed, 311 insertions(+), 208 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 662fd3d440..5ff13ee38b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,3 +31,6 @@ jobs: - uses: taiki-e/install-action@nextest - name: Run tests run: make test + - name: Doc tests + run: cargo test --doc --workspace --all-features + diff --git a/CHANGELOG.md b/CHANGELOG.md index d58a3f080f..a6dc3eba32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,17 @@ ## v0.13.0 (TBD) +### Changes + - [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). +### Fixes + +- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). + ## v0.12.3 (2025-11-15) - Added configurable timeout support to `RemoteBatchProver`, `RemoteBlockProver`, and `RemoteTransactionProver` clients ([#1365](https://github.com/0xMiden/miden-node/pull/1365)). diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 0fcde622c5..10c9a85463 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -10,7 +10,7 @@ use anyhow::{Context, Result}; use miden_lib::AuthScheme; use miden_lib::account::interface::AccountInterface; use miden_lib::utils::ScriptBuilder; -use miden_node_proto::clients::{Builder, Rpc, RpcClient}; +use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; use miden_objects::account::auth::AuthSecretKey; @@ -70,7 +70,8 @@ async fn create_rpc_client(config: &MonitorConfig) -> Result { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect() .await } diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index d78828d440..ff653d8f1f 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -9,7 +9,7 @@ use std::time::Duration; use anyhow::{Context, Result}; use miden_lib::transaction::TransactionKernel; -use miden_node_proto::clients::{Builder, Rpc, RpcClient}; +use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; use miden_objects::account::{Account, AccountId, PartialAccount, PartialStorage}; @@ -96,7 +96,8 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> .with_timeout(Duration::from_secs(5)) .without_metadata_version() .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect() .await .context("Failed to connect to RPC server")?; diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 91b3105335..77aac5b50b 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -4,7 +4,11 @@ use std::collections::HashMap; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use anyhow::Result; -use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverProxy, Rpc}; +use miden_node_proto::clients::{ + Builder as ClientBuilder, + RemoteProverProxyStatusClient, + RpcClient, +}; use tokio::sync::watch; use tokio::sync::watch::Receiver; use tokio::task::{Id, JoinSet}; @@ -54,7 +58,8 @@ impl Tasks { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let current_time = current_unix_timestamp_secs(); let initial_rpc_status = check_rpc_status(&mut rpc, current_time).await; @@ -92,7 +97,8 @@ impl Tasks { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let current_time = current_unix_timestamp_secs(); diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index c58b418112..a9cee796ec 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -103,6 +103,7 @@ pub async fn run_remote_prover_test_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() + .without_otel_context_injection() .connect_lazy::(); let mut interval = tokio::time::interval(test_interval); diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 48d9035f8a..968a263abc 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -5,7 +5,11 @@ use std::time::Duration; -use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverProxy, Rpc}; +use miden_node_proto::clients::{ + Builder as ClientBuilder, + RemoteProverProxyStatusClient, + RpcClient, +}; use miden_node_proto::generated as proto; use miden_node_proto::generated::block_producer::BlockProducerStatus; use miden_node_proto::generated::rpc::RpcStatus; @@ -241,7 +245,8 @@ pub async fn run_rpc_status_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); @@ -334,7 +339,8 @@ pub async fn run_remote_prover_status_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index df2972a8ed..fad738641c 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -3,7 +3,7 @@ use std::fmt::{Display, Formatter}; use std::num::NonZeroU32; use itertools::Itertools; -use miden_node_proto::clients::{Builder, StoreBlockProducer, StoreBlockProducerClient}; +use miden_node_proto::clients::{Builder, StoreBlockProducerClient}; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::errors::{ConversionError, MissingFieldHelper}; use miden_node_proto::{AccountState, generated as proto}; @@ -133,7 +133,8 @@ impl StoreClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { client: store } } diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index a29b61295c..cd0f0c1aed 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -1,11 +1,7 @@ use std::time::Duration; use futures::{TryStream, TryStreamExt}; -use miden_node_proto::clients::{ - BlockProducer, - BlockProducerClient as InnerBlockProducerClient, - Builder, -}; +use miden_node_proto::clients::{BlockProducerClient as InnerBlockProducerClient, Builder}; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::generated::{self as proto}; use miden_node_utils::FlattenResult; @@ -40,7 +36,8 @@ impl BlockProducerClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { client: block_producer } } diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 9222752769..4329107882 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use miden_node_proto::clients::{Builder, StoreNtxBuilder, StoreNtxBuilderClient}; +use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; @@ -39,7 +39,8 @@ impl StoreClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { inner: store } } diff --git a/crates/proto/src/clients/mod.rs b/crates/proto/src/clients/mod.rs index 4fb6b622c0..12f316b790 100644 --- a/crates/proto/src/clients/mod.rs +++ b/crates/proto/src/clients/mod.rs @@ -5,31 +5,34 @@ //! //! # Examples //! -//! ```rust,no_run -//! use miden_node_proto::clients::{Builder, WantsTls, StoreNtxBuilderClient, StoreNtxBuilder}; +//! ```rust +//! # use miden_node_proto::clients::{Builder, WantsTls, StoreNtxBuilderClient}; +//! # use url::Url; //! //! # async fn example() -> anyhow::Result<()> { //! // Create a store client with OTEL and TLS -//! let client: StoreNtxBuilderClient = Builder::new("https://store.example.com")? -//! .with_tls()? // or `.without_tls()` -//! .without_timeout() // or `.with_timeout(Duration::from_secs(10))` -//! .without_metadata_version() // or `.with_metadata_version("1.0".into())` -//! .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` -//! .connect::() +//! let url = Url::parse("https://example.com:8080")?; +//! let client: StoreNtxBuilderClient = Builder::new(url) +//! .with_tls()? // or `.without_tls()` +//! .without_timeout() // or `.with_timeout(Duration::from_secs(10))` +//! .without_metadata_version() // or `.with_metadata_version("1.0".into())` +//! .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` +//! .with_otel_context_injection() // or `.without_otel_context_injection()` +//! .connect::() //! .await?; //! # Ok(()) //! # } //! ``` -use std::collections::HashMap; -use std::fmt::Write; use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::time::Duration; use anyhow::{Context, Result}; +use http::header::ACCEPT; use miden_node_utils::tracing::grpc::OtelInterceptor; use tonic::metadata::AsciiMetadataValue; -use tonic::service::Interceptor; use tonic::service::interceptor::InterceptedService; use tonic::transport::{Channel, ClientTlsConfig, Endpoint}; use tonic::{Request, Status}; @@ -37,206 +40,255 @@ use url::Url; use crate::generated; -// METADATA INTERCEPTOR -// ================================================================================================ - -/// Interceptor designed to inject required metadata into all RPC requests. -#[derive(Default, Clone)] -pub struct MetadataInterceptor { - metadata: HashMap<&'static str, AsciiMetadataValue>, +#[derive(Clone)] +pub struct Interceptor { + otel: Option, + accept: AsciiMetadataValue, } -impl MetadataInterceptor { - /// Adds or overwrites HTTP ACCEPT metadata to the interceptor. - /// - /// Provided version string must be ASCII. - pub fn with_accept_metadata( - mut self, - version: &str, - genesis: Option<&str>, - ) -> Result { - let mut accept_value = format!("application/vnd.miden; version={version}"); - if let Some(genesis) = genesis { - write!(accept_value, "; genesis={genesis}")?; +impl Default for Interceptor { + fn default() -> Self { + Self { + otel: None, + accept: AsciiMetadataValue::from_static(Self::MEDIA_TYPE), } - self.metadata.insert("accept", AsciiMetadataValue::try_from(accept_value)?); - Ok(self) } } -// COMBINED INTERCEPTOR (OTEL + METADATA) -// ================================================================================================ -#[derive(Clone)] -pub struct OtelAndMetadataInterceptor { - otel: OtelInterceptor, - metadata: MetadataInterceptor, -} +impl Interceptor { + const MEDIA_TYPE: &str = "application/vnd.miden"; + const VERSION: &str = "version"; + const GENESIS: &str = "genesis"; -impl OtelAndMetadataInterceptor { - pub fn new(otel: OtelInterceptor, metadata: MetadataInterceptor) -> Self { - Self { otel, metadata } - } -} + fn new(enable_otel: bool, version: Option<&str>, genesis: Option<&str>) -> Self { + if let Some(version) = version + && !version.is_ascii() + { + panic!("version contains non-ascii values: {version}"); + } + + if let Some(genesis) = genesis + && !genesis.is_ascii() + { + panic!("genesis contains non-ascii values: {genesis}"); + } -impl Interceptor for OtelAndMetadataInterceptor { - fn call(&mut self, request: Request<()>) -> Result, Status> { - // Apply OTEL first so tracing context propagates, then attach metadata headers - let req = self.otel.call(request)?; - self.metadata.call(req) + let accept = match (version, genesis) { + (None, None) => Self::MEDIA_TYPE.to_string(), + (None, Some(genesis)) => format!("{}; {}={genesis}", Self::MEDIA_TYPE, Self::GENESIS), + (Some(version), None) => format!("{}; {}={version}", Self::MEDIA_TYPE, Self::VERSION), + (Some(version), Some(genesis)) => format!( + "{}; {}={version}, {}={genesis}", + Self::MEDIA_TYPE, + Self::VERSION, + Self::GENESIS + ), + }; + Self { + otel: enable_otel.then_some(OtelInterceptor), + // SAFETY: we checked that all values are ascii at the top of the function. + accept: AsciiMetadataValue::from_str(&accept).unwrap(), + } } } -impl Interceptor for MetadataInterceptor { - fn call(&mut self, request: Request<()>) -> Result, Status> { - let mut request = request; - for (key, value) in &self.metadata { - request.metadata_mut().insert(*key, value.clone()); +impl tonic::service::Interceptor for Interceptor { + fn call(&mut self, mut request: tonic::Request<()>) -> Result, Status> { + if let Some(mut otel) = self.otel { + request = otel.call(request)?; } + + request.metadata_mut().insert(ACCEPT.as_str(), self.accept.clone()); + Ok(request) } } -// TYPE ALIASES FOR INSTRUMENTED CLIENTS +// TYPE ALIASES TO AID LEGIBILITY // ================================================================================================ -pub type RpcClient = - generated::rpc::api_client::ApiClient>; -pub type BlockProducerClient = - generated::block_producer::api_client::ApiClient>; -pub type StoreNtxBuilderClient = generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient< - InterceptedService, ->; -pub type StoreBlockProducerClient = - generated::block_producer_store::block_producer_client::BlockProducerClient< - InterceptedService, - >; -pub type StoreRpcClient = - generated::rpc_store::rpc_client::RpcClient>; - -pub type RemoteProverProxyStatusClient = - generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient< - InterceptedService, - >; - -pub type RemoteProverClient = - generated::remote_prover::api_client::ApiClient>; - -// GRPC CLIENT BUILDER TRAIT +type InterceptedChannel = InterceptedService; +type GeneratedRpcClient = generated::rpc::api_client::ApiClient; +type GeneratedBlockProducerClient = + generated::block_producer::api_client::ApiClient; +type GeneratedStoreClientForNtxBuilder = + generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient; +type GeneratedStoreClientForBlockProducer = + generated::block_producer_store::block_producer_client::BlockProducerClient; +type GeneratedStoreClientForRpc = generated::rpc_store::rpc_client::RpcClient; +type GeneratedProxyStatusClient = + generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient; +type GeneratedProverClient = generated::remote_prover::api_client::ApiClient; + +// gRPC CLIENTS // ================================================================================================ -/// Configuration for gRPC clients. -/// -/// This struct contains the configuration for gRPC clients, including the metadata version and -/// genesis commitment. -pub struct ClientConfig { - pub metadata_version: Option, - pub metadata_genesis: Option, +#[derive(Debug, Clone)] +pub struct RpcClient(GeneratedRpcClient); +#[derive(Debug, Clone)] +pub struct BlockProducerClient(GeneratedBlockProducerClient); +#[derive(Debug, Clone)] +pub struct StoreNtxBuilderClient(GeneratedStoreClientForNtxBuilder); +#[derive(Debug, Clone)] +pub struct StoreBlockProducerClient(GeneratedStoreClientForBlockProducer); +#[derive(Debug, Clone)] +pub struct StoreRpcClient(GeneratedStoreClientForRpc); +#[derive(Debug, Clone)] +pub struct RemoteProverProxyStatusClient(GeneratedProxyStatusClient); +#[derive(Debug, Clone)] +pub struct RemoteProverClient(GeneratedProverClient); + +impl DerefMut for RpcClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } -/// Trait for building gRPC clients from a common [`Builder`] configuration. -/// -/// This trait provides a standardized way to create different gRPC clients with consistent -/// configuration options like TLS, OTEL interceptors, and connection types. -pub trait GrpcClientBuilder { - type Service; +impl Deref for RpcClient { + type Target = GeneratedRpcClient; - fn with_interceptor(channel: Channel, config: &ClientConfig) -> Self::Service; + fn deref(&self) -> &Self::Target { + &self.0 + } } -// CLIENT BUILDER MARKERS -// ================================================================================================ +impl DerefMut for BlockProducerClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct Rpc; +impl Deref for BlockProducerClient { + type Target = GeneratedBlockProducerClient; -#[derive(Copy, Clone, Debug)] -pub struct BlockProducer; + fn deref(&self) -> &Self::Target { + &self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct StoreNtxBuilder; +impl DerefMut for StoreNtxBuilderClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct StoreBlockProducer; +impl Deref for StoreNtxBuilderClient { + type Target = GeneratedStoreClientForNtxBuilder; -#[derive(Copy, Clone, Debug)] -pub struct StoreRpc; + fn deref(&self) -> &Self::Target { + &self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct RemoteProverProxy; +impl DerefMut for StoreBlockProducerClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -// CLIENT BUILDER IMPLEMENTATIONS -// ================================================================================================ +impl Deref for StoreBlockProducerClient { + type Target = GeneratedStoreClientForBlockProducer; -impl GrpcClientBuilder for Rpc { - type Service = RpcClient; + fn deref(&self) -> &Self::Target { + &self.0 + } +} - fn with_interceptor(channel: Channel, config: &ClientConfig) -> Self::Service { - // Include Accept header only if version was explicitly provided; still combine with OTEL. - let mut metadata = MetadataInterceptor::default(); - if let Some(version) = config.metadata_version.as_deref() { - metadata = metadata - .with_accept_metadata(version, config.metadata_genesis.as_deref()) - .expect("Failed to create metadata interceptor"); - } - let combined = OtelAndMetadataInterceptor::new(OtelInterceptor, metadata); - generated::rpc::api_client::ApiClient::with_interceptor(channel, combined) +impl DerefMut for StoreRpcClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl GrpcClientBuilder for BlockProducer { - type Service = BlockProducerClient; +impl Deref for StoreRpcClient { + type Target = GeneratedStoreClientForRpc; - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::block_producer::api_client::ApiClient::with_interceptor(channel, OtelInterceptor) + fn deref(&self) -> &Self::Target { + &self.0 } } -impl GrpcClientBuilder for StoreNtxBuilder { - type Service = StoreNtxBuilderClient; +impl DerefMut for RemoteProverProxyStatusClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient::with_interceptor( - channel, - OtelInterceptor, - ) +impl Deref for RemoteProverProxyStatusClient { + type Target = GeneratedProxyStatusClient; + + fn deref(&self) -> &Self::Target { + &self.0 } } -impl GrpcClientBuilder for StoreBlockProducer { - type Service = StoreBlockProducerClient; +impl DerefMut for RemoteProverClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::block_producer_store::block_producer_client::BlockProducerClient::with_interceptor( - channel, - OtelInterceptor, - ) +impl Deref for RemoteProverClient { + type Target = GeneratedProverClient; + + fn deref(&self) -> &Self::Target { + &self.0 } } -impl GrpcClientBuilder for StoreRpc { - type Service = StoreRpcClient; +// GRPC CLIENT BUILDER TRAIT +// ================================================================================================ + +/// Trait for building gRPC clients from a common [`Builder`] configuration. +pub trait GrpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self; +} + +impl GrpcClient for RpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedRpcClient::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::rpc_store::rpc_client::RpcClient::with_interceptor(channel, OtelInterceptor) +impl GrpcClient for BlockProducerClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedBlockProducerClient::new(InterceptedService::new(channel, interceptor))) } } -impl GrpcClientBuilder for RemoteProverProxy { - type Service = RemoteProverProxyStatusClient; +impl GrpcClient for StoreNtxBuilderClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForNtxBuilder::new(InterceptedService::new( + channel, + interceptor, + ))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient::with_interceptor( +impl GrpcClient for StoreBlockProducerClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForBlockProducer::new(InterceptedService::new( channel, - OtelInterceptor, - ) + interceptor, + ))) } } -impl GrpcClientBuilder for RemoteProverClient { - type Service = RemoteProverClient; +impl GrpcClient for StoreRpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForRpc::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::remote_prover::api_client::ApiClient::with_interceptor(channel, OtelInterceptor) +impl GrpcClient for RemoteProverProxyStatusClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedProxyStatusClient::new(InterceptedService::new(channel, interceptor))) + } +} + +impl GrpcClient for RemoteProverClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedProverClient::new(InterceptedService::new(channel, interceptor))) } } @@ -251,17 +303,20 @@ impl GrpcClientBuilder for RemoteProverClient { /// /// Usage example: /// -/// ```rust,no_run -/// use miden_node_proto::clients::{Builder, WantsTls, Rpc, RpcClient}; -/// use std::time::Duration; +/// ```rust +/// # use miden_node_proto::clients::{Builder, WantsTls, RpcClient}; +/// # use url::Url; +/// # use std::time::Duration; /// /// # async fn example() -> anyhow::Result<()> { -/// let client: RpcClient = Builder::new("https://rpc.example.com:8080")? -/// .with_tls()? // or `.without_tls()` +/// let url = Url::parse("https://rpc.example.com:8080")?; +/// let client: RpcClient = Builder::new(url) +/// .with_tls()? // or `.without_tls()` /// .with_timeout(Duration::from_secs(5)) // or `.without_timeout()` -/// .with_metadata_version("1.0".into()) // or `.without_metadata_version()` +/// .with_metadata_version("1.0".into()) // or `.without_metadata_version()` /// .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` -/// .connect::() +/// .with_otel_context_injection() // or `.without_otel_context_injection()` +/// .connect::() /// .await?; /// # Ok(()) /// # } @@ -271,6 +326,7 @@ pub struct Builder { endpoint: Endpoint, metadata_version: Option, metadata_genesis: Option, + enable_otel: bool, _state: PhantomData, } @@ -283,6 +339,8 @@ pub struct WantsVersion; #[derive(Copy, Clone, Debug)] pub struct WantsGenesis; #[derive(Copy, Clone, Debug)] +pub struct WantsOTel; +#[derive(Copy, Clone, Debug)] pub struct WantsConnection; impl Builder { @@ -292,6 +350,7 @@ impl Builder { endpoint: self.endpoint, metadata_version: self.metadata_version, metadata_genesis: self.metadata_genesis, + enable_otel: self.enable_otel, _state: PhantomData::, } } @@ -308,6 +367,7 @@ impl Builder { endpoint, metadata_version: None, metadata_genesis: None, + enable_otel: false, _state: PhantomData, } } @@ -357,42 +417,64 @@ impl Builder { impl Builder { /// Do not include genesis commitment in request metadata. - pub fn without_metadata_genesis(mut self) -> Builder { + pub fn without_metadata_genesis(mut self) -> Builder { self.metadata_genesis = None; self.next_state() } /// Include a specific genesis commitment string in request metadata. - pub fn with_metadata_genesis(mut self, genesis: String) -> Builder { + pub fn with_metadata_genesis(mut self, genesis: String) -> Builder { self.metadata_genesis = Some(genesis); self.next_state() } } +impl Builder { + /// Enables OpenTelemetry context propagation via gRPC. + /// + /// This is used to by OpenTelemetry to connect traces across network boundaries. The server on + /// the other end must be configured to receive and use the injected trace context. + pub fn with_otel_context_injection(mut self) -> Builder { + self.enable_otel = true; + self.next_state() + } + + /// Disables OpenTelemetry context propagation. This should be disabled when interfacing with + /// external third party gRPC servers. + pub fn without_otel_context_injection(mut self) -> Builder { + self.enable_otel = false; + self.next_state() + } +} + impl Builder { /// Establish an eager connection and return a fully configured client. - pub async fn connect(self) -> Result + pub async fn connect(self) -> Result where - T: GrpcClientBuilder, + T: GrpcClient, { let channel = self.endpoint.connect().await?; - let cfg = ClientConfig { - metadata_version: self.metadata_version, - metadata_genesis: self.metadata_genesis, - }; - Ok(T::with_interceptor(channel, &cfg)) + Ok(self.connect_with_channel::(channel)) } /// Establish a lazy connection and return a client that will connect on first use. - pub fn connect_lazy(self) -> T::Service + pub fn connect_lazy(self) -> T where - T: GrpcClientBuilder, + T: GrpcClient, { let channel = self.endpoint.connect_lazy(); - let cfg = ClientConfig { - metadata_version: self.metadata_version, - metadata_genesis: self.metadata_genesis, - }; - T::with_interceptor(channel, &cfg) + self.connect_with_channel::(channel) + } + + fn connect_with_channel(self, channel: Channel) -> T + where + T: GrpcClient, + { + let interceptor = Interceptor::new( + self.enable_otel, + self.metadata_version.as_deref(), + self.metadata_genesis.as_deref(), + ); + T::with_interceptor(channel, interceptor) } } diff --git a/crates/rpc/src/server/accept.rs b/crates/rpc/src/server/accept.rs index 5ea5650afb..10b6c54117 100644 --- a/crates/rpc/src/server/accept.rs +++ b/crates/rpc/src/server/accept.rs @@ -29,7 +29,7 @@ use tower::{Layer, Service}; /// /// Parameters are optional and order is not important. /// -/// ``` +/// ```text /// application/vnd.miden; version=; genesis=0x1234 /// ``` #[derive(Clone)] diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index a75341f56b..a788f40bf9 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -2,13 +2,7 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use miden_node_proto::clients::{ - BlockProducer, - BlockProducerClient, - Builder, - StoreRpc, - StoreRpcClient, -}; +use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient}; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::api_server::{self, Api}; use miden_node_proto::generated::{self as proto}; @@ -59,7 +53,8 @@ impl RpcService { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::() + .with_otel_context_injection() + .connect_lazy::() }; let block_producer = block_producer_url.map(|block_producer_url| { @@ -73,7 +68,8 @@ impl RpcService { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::() + .with_otel_context_injection() + .connect_lazy::() }); Self { diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 6eaec910a3..c7e4ba390b 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -4,7 +4,7 @@ use std::time::Duration; use http::header::{ACCEPT, CONTENT_TYPE}; use http::{HeaderMap, HeaderValue}; use miden_lib::account::wallets::BasicWallet; -use miden_node_proto::clients::{Builder, Rpc as RpcClientMarker, RpcClient}; +use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::rpc::api_client::ApiClient as ProtoClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::Store; @@ -91,7 +91,8 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { .with_timeout(Duration::from_secs(10)) .with_metadata_version(version.to_string()) .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect::() .await .unwrap(); @@ -338,7 +339,8 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) .with_timeout(Duration::from_secs(10)) .without_metadata_version() .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect::() .await .expect("Failed to build client"); From 81dc8c47109ab1ca69fd49c9cc62e33c9596d991 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 20 Nov 2025 11:57:07 +0200 Subject: [PATCH 013/125] chore(clippy): enforce `fs-err` instead of `std::fs` (#1375) --- Cargo.lock | 2 ++ bin/node/src/commands/store.rs | 4 ++-- clippy.toml | 31 ++++++++++++++++++++++++++ crates/remote-prover-client/Cargo.toml | 1 + crates/remote-prover-client/build.rs | 2 +- crates/store/Cargo.toml | 1 + crates/store/src/blocks.rs | 8 +++---- crates/store/src/server/mod.rs | 2 +- crates/utils/Cargo.toml | 1 + crates/utils/src/version/mod.rs | 2 +- 10 files changed, 45 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27dcc64731..8de1f79612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2889,6 +2889,7 @@ dependencies = [ "anyhow", "bytes", "figment", + "fs-err", "http", "http-body-util", "itertools 0.14.0", @@ -3035,6 +3036,7 @@ dependencies = [ name = "miden-remote-prover-client" version = "0.13.0" dependencies = [ + "fs-err", "getrandom 0.3.4", "miden-node-proto-build", "miden-objects", diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 7cc0fb0408..c4c39d92be 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -180,8 +180,8 @@ impl StoreCommand { // Create directories if they do not already exist. for directory in &[accounts_directory, data_directory] { - if directory.exists() { - let is_empty = directory.read_dir()?.next().is_none(); + if fs_err::exists(directory)? { + let is_empty = fs_err::read_dir(directory)?.next().is_none(); // If the directory exists and is empty, we store the files there if !is_empty { anyhow::bail!(format!("{} exists but it is not empty.", directory.display())); diff --git a/clippy.toml b/clippy.toml index 3523592a20..2a5815cec4 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1,32 @@ doc-valid-idents = ["..", "SQLite"] + +disallowed-methods = [ + # Use fs_errr functions, so the filename is available in the error message + { path = "std::fs::canonicalize", replacement = "fs_err::canonicalize" }, + { path = "std::fs::copy", replacement = "fs_err::copy" }, + { path = "std::fs::create_dir", replacement = "fs_err::create_dir" }, + { path = "std::fs::create_dir_all", replacement = "fs_err::create_dir_all" }, + { path = "std::fs::exists", replacement = "fs_err::exists" }, + { path = "std::fs::hard_link", replacement = "fs_err::hard_link" }, + { path = "std::fs::metadata", replacement = "fs_err::metadata" }, + { path = "std::fs::read", replacement = "fs_err::read" }, + { path = "std::fs::read_dir", replacement = "fs_err::read_dir" }, + { path = "std::fs::read_link", replacement = "fs_err::read_link" }, + { path = "std::fs::read_to_string", replacement = "fs_err::read_to_string" }, + { path = "std::fs::remove_dir", replacement = "fs_err::remove_dir" }, + { path = "std::fs::remove_dir_all", replacement = "fs_err::remove_dir_all" }, + { path = "std::fs::remove_file", replacement = "fs_err::remove_file" }, + { path = "std::fs::rename", replacement = "fs_err::rename" }, + { path = "std::fs::set_permissions", replacement = "fs_err::set_permissions" }, + { path = "std::fs::soft_link", replacement = "fs_err::soft_link" }, + { path = "std::fs::symlink_metadata", replacement = "fs_err::symlink_metadata" }, + { path = "std::fs::write", replacement = "fs_err::write" }, + + # Use fs_err::path::PathExt methods, so the filename is available in the error message + { path = "std::path::Path::canonicalize", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::metadata", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::read_dir", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::read_link", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::symlink_metadata", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::try_exists", reason = "Use fs_err::path::PathExt methods" }, +] diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 049fb16217..32cc07be8b 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -46,6 +46,7 @@ tokio = { default-features = false, features = ["sync"], optional = true tonic-prost = { workspace = true } [build-dependencies] +fs-err = { workspace = true } miden-node-proto-build = { workspace = true } miette = { features = ["fancy"], version = "7.5" } tonic-prost-build = { workspace = true } diff --git a/crates/remote-prover-client/build.rs b/crates/remote-prover-client/build.rs index 4a6c5e2541..ffd9b2e711 100644 --- a/crates/remote-prover-client/build.rs +++ b/crates/remote-prover-client/build.rs @@ -56,7 +56,7 @@ fn build_tonic_from_descriptor( /// Replaces std references with core and alloc for nostd compatibility fn convert_to_nostd(file_path: &str) -> miette::Result<()> { - let file_content = fs::read_to_string(file_path).into_diagnostic()?; + let file_content = fs_err::read_to_string(file_path).into_diagnostic()?; let updated_content = file_content .replace("std::result", "core::result") .replace("std::marker", "core::marker") diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index f490cf4b74..8eca0c71d1 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -21,6 +21,7 @@ deadpool-diesel = { features = ["sqlite"], version = "0.6" } deadpool-sync = { version = "0.1" } diesel = { features = ["numeric", "sqlite"], version = "2.2" } diesel_migrations = { features = ["sqlite"], version = "2.2" } +fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } miden-lib = { workspace = true } diff --git a/crates/store/src/blocks.rs b/crates/store/src/blocks.rs index 5c9570252a..8d7d2c216a 100644 --- a/crates/store/src/blocks.rs +++ b/crates/store/src/blocks.rs @@ -31,7 +31,7 @@ impl BlockStore { fields(path = %store_dir.display()), )] pub fn bootstrap(store_dir: PathBuf, genesis_block: &GenesisBlock) -> std::io::Result { - std::fs::create_dir(&store_dir)?; + fs_err::create_dir(&store_dir)?; let block_store = Self { store_dir }; block_store.save_block_blocking(BlockNumber::GENESIS, &genesis_block.inner().to_bytes())?; @@ -55,7 +55,7 @@ impl BlockStore { /// /// See also: [`std::fs::metadata`]. pub fn load(store_dir: PathBuf) -> std::io::Result { - let meta = std::fs::metadata(&store_dir)?; + let meta = fs_err::metadata(&store_dir)?; if meta.is_dir().not() { return Err(ErrorKind::NotADirectory.into()); } @@ -101,10 +101,10 @@ impl BlockStore { ) -> Result<(), std::io::Error> { let (epoch_path, block_path) = self.epoch_block_path(block_num)?; if !epoch_path.exists() { - std::fs::create_dir_all(epoch_path)?; + fs_err::create_dir_all(epoch_path)?; } - std::fs::write(block_path, data) + fs_err::write(block_path, data) } // HELPER FUNCTIONS diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 2dd41fca0d..91a2fbc368 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -183,7 +183,7 @@ impl DataDirectory { /// Creates a new [`DataDirectory`], ensuring that the directory exists and is accessible /// insofar as is possible. pub fn load(path: PathBuf) -> std::io::Result { - let meta = std::fs::metadata(&path)?; + let meta = fs_err::metadata(&path)?; if meta.is_dir().not() { return Err(std::io::ErrorKind::NotConnected.into()); } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 1c28ce8fb5..db641928e7 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -24,6 +24,7 @@ testing = ["dep:tokio", "miden-objects/testing"] anyhow = { workspace = true } bytes = { version = "1.10" } figment = { features = ["env", "toml"], version = "0.10" } +fs-err = { workspace = true } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } diff --git a/crates/utils/src/version/mod.rs b/crates/utils/src/version/mod.rs index 03ff66249f..7d378558c5 100644 --- a/crates/utils/src/version/mod.rs +++ b/crates/utils/src/version/mod.rs @@ -123,7 +123,7 @@ mod vergen { let cargo_vcs_info = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".cargo_vcs_info.json"); if cargo_vcs_info.exists() { // The file is small so reading to string is acceptable. - let contents = std::fs::read_to_string(cargo_vcs_info).context("Reading vcs info")?; + let contents = fs_err::read_to_string(cargo_vcs_info).context("Reading vcs info")?; // File format: // { From 9dcf580aebcc7c73db8eabcd8ac09addd9b44e08 Mon Sep 17 00:00:00 2001 From: radik878 Date: Thu, 20 Nov 2025 14:41:41 +0200 Subject: [PATCH 014/125] feat: accept header genesis parameter is now required for write RPC methods (#1298) Write RPC requests are now required to contain the network's genesis commitment as part of their ACCEPT header. --- CHANGELOG.md | 1 + crates/rpc/src/server/accept.rs | 120 +++++++++++++++++++++++++++++--- crates/rpc/src/server/mod.rs | 6 +- crates/rpc/src/tests.rs | 115 +++++++++++++++++++++++++----- 4 files changed, 211 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6dc3eba32..8efc3eac8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). +- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). ### Fixes diff --git a/crates/rpc/src/server/accept.rs b/crates/rpc/src/server/accept.rs index 10b6c54117..4e0e1d06d9 100644 --- a/crates/rpc/src/server/accept.rs +++ b/crates/rpc/src/server/accept.rs @@ -10,6 +10,12 @@ use miden_objects::{Word, WordError}; use semver::{Comparator, Version, VersionReq}; use tower::{Layer, Service}; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum GenesisNegotiation { + Optional, + Mandatory, +} + /// Performs content negotiation by rejecting requests which don't match our RPC version or network. /// Clients can specify these as parameters in our `application/vnd.miden` accept media range. /// @@ -36,6 +42,11 @@ use tower::{Layer, Service}; pub struct AcceptHeaderLayer { supported_versions: VersionReq, genesis_commitment: Word, + /// RPC method names for which the `genesis` parameter is mandatory. + /// + /// These should be gRPC method names (e.g. `SubmitProvenTransaction`), + /// matched against the end of the request path like "/rpc.Api/". + require_genesis_methods: Vec<&'static str>, } #[derive(Debug, thiserror::Error)] @@ -71,7 +82,17 @@ impl AcceptHeaderLayer { }], }; - AcceptHeaderLayer { supported_versions, genesis_commitment } + AcceptHeaderLayer { + supported_versions, + genesis_commitment, + require_genesis_methods: Vec::new(), + } + } + + /// Mark a gRPC method as requiring a `genesis` parameter in the Accept header. + pub fn with_genesis_enforced_method(mut self, method: &'static str) -> Self { + self.require_genesis_methods.push(method); + self } } @@ -89,13 +110,21 @@ impl AcceptHeaderLayer { const GRPC: Name<'static> = Name::new_unchecked("grpc"); /// Parses the `Accept` header's contents, searching for any media type compatible with our - /// RPC version and genesis commitment. - fn negotiate(&self, accept: &str) -> Result<(), AcceptHeaderError> { + /// RPC version and genesis commitment, controlling whether `genesis` is optional or mandatory. + fn negotiate( + &self, + accept: &str, + genesis_mode: GenesisNegotiation, + ) -> Result<(), AcceptHeaderError> { let mut media_types = mediatype::MediaTypeList::new(accept).peekable(); // Its debatable whether an empty header value is valid. Let's err on the side of being // gracious if the client want's to be weird. if media_types.peek().is_none() { + // If there are no media types provided and genesis is required, reject. + if matches!(genesis_mode, GenesisNegotiation::Mandatory) { + return Err(AcceptHeaderError::NoSupportedMediaRange); + } return Ok(()); } @@ -150,16 +179,16 @@ impl AcceptHeaderLayer { continue; } - // Skip if the genesis commitment does not match. + // Skip if the genesis commitment does not match, or if it is required but missing. let genesis = media_type .get_param(Self::GENESIS) .map(|value| Word::try_from(value.unquoted_str().as_ref())) .transpose() .map_err(AcceptHeaderError::InvalidGenesis)?; - if let Some(genesis) = genesis - && genesis != self.genesis_commitment - { - continue; + match (genesis_mode, genesis) { + (_, Some(value)) if value != self.genesis_commitment => continue, + (GenesisNegotiation::Mandatory, None) => continue, + _ => {}, } // All preconditions met, this is a valid media type that we can serve. @@ -195,14 +224,36 @@ where } fn call(&mut self, request: http::Request) -> Self::Future { + // Determine if this RPC method requires the `genesis` parameter. + let path = request.uri().path(); + let method_name = path.rsplit('/').next().unwrap_or_default(); + let requires_genesis = self.verifier.require_genesis_methods.contains(&method_name); + + dbg!(request.headers()); + + // If `genesis` is required but the header is missing entirely, reject early. let Some(header) = request.headers().get(ACCEPT) else { + if requires_genesis { + let response = tonic::Status::invalid_argument( + "Accept header with 'genesis' parameter is required for write RPC methods", + ) + .into_http(); + return futures::future::ready(Ok(response)).boxed(); + } return self.inner.call(request).boxed(); }; let result = header .to_str() .map_err(AcceptHeaderError::InvalidUtf8) - .map(|header| self.verifier.negotiate(header)) + .map(|header| { + let mode = if requires_genesis { + GenesisNegotiation::Mandatory + } else { + GenesisNegotiation::Optional + }; + self.verifier.negotiate(header, mode) + }) .flatten_result(); match result { @@ -342,7 +393,9 @@ mod tests { #[case::quoted_network(r#"application/vnd.miden; genesis="0x00000000000000000000000000000000000000000000000000000000deadbeef""#)] #[test] fn request_should_pass(#[case] accept: &'static str) { - AcceptHeaderLayer::for_tests().negotiate(accept).unwrap(); + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Optional) + .unwrap(); } #[rstest::rstest] @@ -356,7 +409,52 @@ mod tests { #[case::wildcard_subtype("application/*")] #[test] fn request_should_be_rejected(#[case] accept: &'static str) { - AcceptHeaderLayer::for_tests().negotiate(accept).unwrap_err(); + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Optional) + .unwrap_err(); + } + + #[test] + fn write_requires_genesis_param_missing_or_empty_or_mismatch() { + let layer = AcceptHeaderLayer::for_tests(); + + // Missing genesis parameter + assert!( + layer + .negotiate("application/vnd.miden", super::GenesisNegotiation::Mandatory) + .is_err() + ); + + // Empty header value + assert!(layer.negotiate("", super::GenesisNegotiation::Mandatory).is_err()); + + // Present but mismatched genesis parameter + let mismatched = "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeee"; + assert!(layer.negotiate(mismatched, super::GenesisNegotiation::Mandatory).is_err()); + } + + #[rstest::rstest] + #[case::matching_network( + "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeef" + )] + #[case::matching_network_and_version( + "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeef; version=0.2.3" + )] + #[test] + fn request_with_mandadory_genesis_should_pass(#[case] accept: &'static str) { + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Mandatory) + .unwrap(); + } + + #[rstest::rstest] + #[case::missing_network("application/vnd.miden;")] + #[case::missing_network_wildcard("*/*")] + #[test] + fn request_with_mandadory_genesis_should_be_rejected(#[case] accept: &'static str) { + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Mandatory) + .unwrap_err(); } #[rstest::rstest] diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index c6b6349be8..71ef163c29 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -80,7 +80,11 @@ impl Rpc { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .layer(HealthCheckLayer) - .layer(AcceptHeaderLayer::new(&rpc_version, genesis.commitment())) + .layer( + AcceptHeaderLayer::new(&rpc_version, genesis.commitment()) + .with_genesis_enforced_method("SubmitProvenTransaction") + .with_genesis_enforced_method("SubmitProvenBatch"), + ) .layer(cors_for_grpc_web_layer()) // Enables gRPC-web support. .layer(GrpcWebLayer::new()) diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index c7e4ba390b..192aac20a8 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -36,7 +36,7 @@ use crate::Rpc; async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = { @@ -63,7 +63,7 @@ async fn rpc_server_accepts_requests_without_accept_header() { async fn rpc_server_accepts_requests_with_accept_header() { // Start the RPC. let (mut rpc_client, _, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Send any request to the RPC. let response = send_request(&mut rpc_client).await; @@ -80,7 +80,7 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { for version in ["1.9.0", "0.8.1", "0.8.0", "0.999.0", "99.0.0"] { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Recreate the RPC client with an invalid version. let url = rpc_addr.to_string(); @@ -122,7 +122,7 @@ async fn rpc_startup_is_robust_to_network_failures() { assert!(response.is_err()); // Start the store. - let (store_runtime, data_directory) = start_store(store_addr).await; + let (store_runtime, data_directory, _genesis) = start_store(store_addr).await; // Test: send request against RPC api and should succeed let response = send_request(&mut rpc_client).await; @@ -160,7 +160,7 @@ async fn rpc_startup_is_robust_to_network_failures() { async fn rpc_server_has_web_support() { // Start server let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Send a status request let client = reqwest::Client::new(); @@ -203,14 +203,17 @@ async fn rpc_server_has_web_support() { async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; // Override the client so that the ACCEPT header is not set. - let mut rpc_client = { - let endpoint = tonic::transport::Endpoint::try_from(format!("http://{rpc_addr}")).unwrap(); - - ProtoClient::connect(endpoint).await.unwrap() - }; + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .with_metadata_genesis(genesis.to_hex()) + .without_otel_context_injection() + .connect_lazy::(); let account_id = AccountId::dummy( [0; 15], @@ -272,13 +275,83 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { assert!(response.is_err()); // Assert that the error is due to the invalid account delta commitment. + let err = response.as_ref().unwrap_err().message(); + assert!( + err.contains("failed to validate account delta in transaction account update"), + "expected error message to contain delta commitment error but got: {err}" + ); + + // Shutdown to avoid runtime drop error. + store_runtime.shutdown_background(); +} + +#[tokio::test] +async fn rpc_server_rejects_tx_submissions_without_genesis() { + // Start the RPC. + let (_, rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + // Override the client so that the ACCEPT header is not set. + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .without_metadata_genesis() + .without_otel_context_injection() + .connect_lazy::(); + + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account = AccountBuilder::new([0; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let account_delta: AccountDelta = account.clone().try_into().unwrap(); + + // Send any request to the RPC. + let tx = ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.commitment(), + account_delta.clone().to_commitment(), // delta commitment + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(account_delta)) + .build() + .unwrap(); + + let request = proto::transaction::ProvenTransaction { + transaction: tx.to_bytes(), + transaction_inputs: None, + }; + + let response = rpc_client.submit_proven_transaction(request).await; + + // Assert that the server rejected our request. + assert!(response.is_err()); + + // Assert that the error is due to the invalid account delta commitment. + let err = response.as_ref().unwrap_err().message(); assert!( - response - .as_ref() - .err() - .unwrap() - .message() - .contains("failed to validate account delta in transaction account update"), + err.contains( + "server does not support any of the specified application/vnd.miden content types" + ), + "expected error message to reference incompatible content media types but got: {err:?}" ); // Shutdown to avoid runtime drop error. @@ -347,7 +420,7 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) (rpc_client, rpc_addr, store_addr) } -async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir) { +async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); @@ -377,5 +450,9 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir) { .await .expect("store should start serving"); }); - (store_runtime, data_directory) + ( + store_runtime, + data_directory, + genesis_state.into_block().unwrap().inner().commitment(), + ) } From cf59454c7b693edca4d575de00264fa1b8a9bb12 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 24 Nov 2025 19:30:30 +0100 Subject: [PATCH 015/125] feat: integrate `AccountTreeWithHistory` (#1333) --- CHANGELOG.md | 1 + crates/ntx-builder/src/transaction.rs | 20 +--- .../db/migrations/2025062000000_setup/up.sql | 48 +++++--- crates/store/src/db/mod.rs | 13 +++ .../store/src/db/models/queries/accounts.rs | 107 ++++++++++++++---- crates/store/src/db/schema.rs | 12 +- crates/store/src/errors.rs | 4 + crates/store/src/state.rs | 55 +++++---- 8 files changed, 185 insertions(+), 75 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8efc3eac8d..3b7d43be4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/miden-node/pull/1278)). - Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/miden-node/pull/1293)). - [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/miden-node/pull/1292)). +- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/miden-node/pull/1333)). - Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/miden-node/pull/1332)). - Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/miden-node/pull/1338)). diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/transaction.rs index c0d89bc0da..6969e0e88c 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/transaction.rs @@ -409,20 +409,12 @@ impl DataStore for NtxDataStore { } })?; // Handle response. - match maybe_script { - Some(script) => { - // Cache the retrieved script. - { - let mut cache_guard = cache.lock().await; - cache_guard.put(script_root, script.clone()); - } - // Return script. - Ok(Some(script)) - }, - None => { - // Response did not contain the note script. - Ok(None) - }, + if let Some(script) = maybe_script { + let mut cache_guard = cache.lock().await; + cache_guard.put(script_root, script.clone()); + Ok(Some(script)) + } else { + Ok(None) } } } diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 0b8d0744d2..75cc90146b 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -21,10 +21,9 @@ CREATE TABLE accounts ( storage BLOB, vault BLOB, nonce INTEGER, + is_latest BOOLEAN NOT NULL DEFAULT 0, -- Indicates if this is the latest state for this account_id - PRIMARY KEY (account_id), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num), - FOREIGN KEY (code_commitment) REFERENCES account_codes(code_commitment), + PRIMARY KEY (account_id, block_num), CONSTRAINT all_null_or_none_null CHECK ( (code_commitment IS NOT NULL AND storage IS NOT NULL AND vault IS NOT NULL AND nonce IS NOT NULL) @@ -34,6 +33,12 @@ CREATE TABLE accounts ( ) WITHOUT ROWID; CREATE INDEX idx_accounts_network_prefix ON accounts(network_account_id_prefix) WHERE network_account_id_prefix IS NOT NULL; +CREATE INDEX idx_accounts_id_block ON accounts(account_id, block_num DESC); +CREATE INDEX idx_accounts_latest ON accounts(account_id, is_latest) WHERE is_latest = 1; +-- Index for joining with block_headers +CREATE INDEX idx_accounts_block_num ON accounts(block_num); +-- Index for joining with account_codes +CREATE INDEX idx_accounts_code_commitment ON accounts(code_commitment) WHERE code_commitment IS NOT NULL; CREATE TABLE notes ( committed_at INTEGER NOT NULL, -- Block number when the note was committed @@ -56,9 +61,6 @@ CREATE TABLE notes ( serial_num BLOB, PRIMARY KEY (committed_at, batch_index, note_index), - FOREIGN KEY (committed_at) REFERENCES block_headers(block_num), - FOREIGN KEY (sender) REFERENCES accounts(account_id), - FOREIGN KEY (script_root) REFERENCES note_scripts(script_root), CONSTRAINT notes_type_in_enum CHECK (note_type BETWEEN 1 AND 3), CONSTRAINT notes_execution_mode_in_enum CHECK (execution_mode BETWEEN 0 AND 1), CONSTRAINT notes_consumed_at_is_u32 CHECK (consumed_at BETWEEN 0 AND 0xFFFFFFFF), @@ -72,6 +74,12 @@ CREATE INDEX idx_notes_sender ON notes(sender, committed_at); CREATE INDEX idx_notes_tag ON notes(tag, committed_at); CREATE INDEX idx_notes_nullifier ON notes(nullifier); CREATE INDEX idx_unconsumed_network_notes ON notes(execution_mode, consumed_at); +-- Index for joining with block_headers on committed_at +CREATE INDEX idx_notes_committed_at ON notes(committed_at); +-- Index for joining with note_scripts +CREATE INDEX idx_notes_script_root ON notes(script_root) WHERE script_root IS NOT NULL; +-- Index for joining with block_headers on consumed_at +CREATE INDEX idx_notes_consumed_at ON notes(consumed_at) WHERE consumed_at IS NOT NULL; CREATE TABLE note_scripts ( script_root BLOB NOT NULL, @@ -86,25 +94,33 @@ CREATE TABLE account_storage_map_values ( slot INTEGER NOT NULL, key BLOB NOT NULL, value BLOB NOT NULL, - is_latest_update BOOLEAN NOT NULL, + is_latest BOOLEAN NOT NULL, PRIMARY KEY (account_id, block_num, slot, key), - CONSTRAINT slot_is_u8 CHECK (slot BETWEEN 0 AND 0xFF) + CONSTRAINT slot_is_u8 CHECK (slot BETWEEN 0 AND 0xFF), + FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; -CREATE INDEX asm_latest_by_acct_block_slot_key ON account_storage_map_values(account_id, block_num); +-- Index for joining with accounts table on compound key +CREATE INDEX idx_account_storage_account_block ON account_storage_map_values(account_id, block_num); +-- Index for querying latest values +CREATE INDEX idx_account_storage_latest ON account_storage_map_values(account_id, is_latest) WHERE is_latest = 1; CREATE TABLE account_vault_assets ( account_id BLOB NOT NULL, block_num INTEGER NOT NULL, vault_key BLOB NOT NULL, asset BLOB, - is_latest_update BOOLEAN NOT NULL, + is_latest BOOLEAN NOT NULL, - PRIMARY KEY (account_id, block_num, vault_key) + PRIMARY KEY (account_id, block_num, vault_key), + FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; -CREATE INDEX idx_vault_assets_id_block ON account_vault_assets (account_id, block_num); +-- Index for joining with accounts table on compound key +CREATE INDEX idx_vault_assets_account_block ON account_vault_assets(account_id, block_num); +-- Index for querying latest assets +CREATE INDEX idx_vault_assets_latest ON account_vault_assets(account_id, is_latest) WHERE is_latest = 1; CREATE TABLE nullifiers ( nullifier BLOB NOT NULL, @@ -112,12 +128,12 @@ CREATE TABLE nullifiers ( block_num INTEGER NOT NULL, PRIMARY KEY (nullifier), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num), CONSTRAINT nullifiers_nullifier_is_digest CHECK (length(nullifier) = 32), CONSTRAINT nullifiers_nullifier_prefix_is_u16 CHECK (nullifier_prefix BETWEEN 0 AND 0xFFFF) ) WITHOUT ROWID; CREATE INDEX idx_nullifiers_prefix ON nullifiers(nullifier_prefix); +-- Index for joining with block_headers CREATE INDEX idx_nullifiers_block_num ON nullifiers(block_num); CREATE TABLE transactions ( @@ -130,10 +146,10 @@ CREATE TABLE transactions ( output_notes BLOB NOT NULL, -- Serialized vector with the NoteId of the output notes. size_in_bytes INTEGER NOT NULL, -- Estimated size of the row in bytes, considering the size of the input and output notes. - PRIMARY KEY (transaction_id), - FOREIGN KEY (account_id) REFERENCES accounts(account_id), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num) + PRIMARY KEY (transaction_id) ) WITHOUT ROWID; +-- Index for joining with accounts (note: account may not exist in accounts table) CREATE INDEX idx_transactions_account_id ON transactions(account_id); +-- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 8515327f61..94c8ae3580 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -408,6 +408,19 @@ impl Db { .await } + /// Loads account details at a specific block number from the DB. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_historical_account_at( + &self, + id: AccountId, + block_num: BlockNumber, + ) -> Result { + self.transact("Get historical account details", move |conn| { + queries::select_historical_account_at(conn, id, block_num) + }) + .await + } + /// Loads public account details from the DB based on the account ID's prefix. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_network_account_by_prefix( diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index a1e4452b1e..52be3ee84f 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -72,6 +72,7 @@ use crate::errors::DatabaseError; /// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// account_id = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account( conn: &mut SqliteConnection, @@ -84,6 +85,57 @@ pub(crate) fn select_account( (AccountRaw::as_select(), schema::account_codes::code.nullable()), ) .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result::<(AccountRaw, Option>)>(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + let info = AccountWithCodeRawJoined::from(raw).try_into()?; + Ok(info) +} + +/// Select account details at a specific block number from the DB using the given +/// [`SqliteConnection`]. +/// +/// # Returns +/// +/// The account details at the specified block, or an error. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// accounts.account_id, +/// accounts.account_commitment, +/// accounts.block_num, +/// accounts.storage, +/// accounts.vault, +/// accounts.nonce, +/// accounts.code_commitment, +/// account_codes.code +/// FROM +/// accounts +/// LEFT JOIN +/// account_codes ON accounts.code_commitment = account_codes.code_commitment +/// WHERE +/// account_id = ?1 +/// AND block_num = ?2 +/// ``` +pub(crate) fn select_historical_account_at( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result { + let raw = SelectDsl::select( + schema::accounts::table.left_join(schema::account_codes::table.on( + schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), + )), + (AccountRaw::as_select(), schema::account_codes::code.nullable()), + ) + .filter( + schema::accounts::account_id + .eq(account_id.to_bytes()) + .and(schema::accounts::block_num.eq(block_num.to_raw_sql())), + ) .get_result::<(AccountRaw, Option>)>(conn) .optional()? .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; @@ -130,6 +182,7 @@ pub(crate) fn select_account_by_id_prefix( )), (AccountRaw::as_select(), schema::account_codes::code.nullable()), ) + .filter(schema::accounts::is_latest.eq(true)) .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) .get_result::<(AccountRaw, Option>)>(conn) .optional() @@ -157,6 +210,8 @@ pub(crate) fn select_account_by_id_prefix( /// account_commitment /// FROM /// accounts +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -167,6 +222,7 @@ pub(crate) fn select_all_account_commitments( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) + .filter(schema::accounts::is_latest.eq(true)) .order_by(schema::accounts::block_num.asc()) .load::<(Vec, Vec)>(conn)?; @@ -341,6 +397,7 @@ pub(crate) fn select_all_accounts( )), (AccountRaw::as_select(), schema::account_codes::code.nullable()), ) + .filter(schema::accounts::is_latest.eq(true)) .load::<(AccountRaw, Option>)>(conn)?; let account_infos = vec_raw_try_into::( accounts_raw.into_iter().map(AccountWithCodeRawJoined::from), @@ -380,7 +437,7 @@ impl StorageMapValue { /// /// # Returns /// -/// A vector of tuples containing `(slot, key, value, is_latest_update)` for the given account. +/// A vector of tuples containing `(slot, key, value, is_latest)` for the given account. /// Each row contains one of: /// /// - the historical value for a slot and key specifically on block `block_to` @@ -593,8 +650,8 @@ impl TryInto for AccountSummaryRaw { /// Insert an account vault asset row into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest_update=true` for the new row and update any existing -/// row with the same `(account_id, vault_key)` tuple to `is_latest_update=false`. +/// This function will set `is_latest=true` for the new row and update any existing +/// row with the same `(account_id, vault_key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -610,16 +667,16 @@ pub(crate) fn insert_account_vault_asset( diesel::Connection::transaction(conn, |conn| { // First, update any existing rows with the same (account_id, vault_key) to set - // is_latest_update=false + // is_latest=false let vault_key: Word = vault_key.into(); let update_count = diesel::update(schema::account_vault_assets::table) .filter( schema::account_vault_assets::account_id .eq(&account_id.to_bytes()) .and(schema::account_vault_assets::vault_key.eq(&vault_key.to_bytes())) - .and(schema::account_vault_assets::is_latest_update.eq(true)), + .and(schema::account_vault_assets::is_latest.eq(true)), ) - .set(schema::account_vault_assets::is_latest_update.eq(false)) + .set(schema::account_vault_assets::is_latest.eq(false)) .execute(conn)?; // Insert the new latest row @@ -633,8 +690,8 @@ pub(crate) fn insert_account_vault_asset( /// Insert an account storage map value into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest_update=true` for the new row and update any existing -/// row with the same `(account_id, slot, key)` tuple to `is_latest_update=false`. +/// This function will set `is_latest=true` for the new row and update any existing +/// row with the same `(account_id, slot, key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -659,9 +716,9 @@ pub(crate) fn insert_account_storage_map_value( .eq(&account_id) .and(schema::account_storage_map_values::slot.eq(slot)) .and(schema::account_storage_map_values::key.eq(&key)) - .and(schema::account_storage_map_values::is_latest_update.eq(true)), + .and(schema::account_storage_map_values::is_latest.eq(true)), ) - .set(schema::account_storage_map_values::is_latest_update.eq(false)) + .set(schema::account_storage_map_values::is_latest.eq(false)) .execute(conn)?; let record = AccountStorageMapRowInsert { @@ -670,7 +727,7 @@ pub(crate) fn insert_account_storage_map_value( value, slot, block_num, - is_latest_update: true, + is_latest: true, }; let insert_count = diesel::insert_into(schema::account_storage_map_values::table) .values(record) @@ -701,6 +758,7 @@ pub(crate) fn upsert_accounts( (AccountRaw::as_select(), schema::account_codes::code.nullable()), ) .filter(schema::accounts::account_id.eq(account_id)) + .filter(schema::accounts::is_latest.eq(true)) .get_results::<(AccountRaw, Option>)>(conn)?; // SELECT .. FROM accounts LEFT JOIN account_codes @@ -844,16 +902,22 @@ pub(crate) fn upsert_accounts( code_commitment: full_account .as_ref() .map(|account| account.code().commitment().to_bytes()), + is_latest: true, }; - let v = account_value.clone(); - let inserted = diesel::insert_into(schema::accounts::table) - .values(&v) - .on_conflict(schema::accounts::account_id) - .do_update() - .set(account_value) + // Update any existing rows for this account_id to set is_latest = false + diesel::update(schema::accounts::table) + .filter( + schema::accounts::account_id + .eq(&account_id.to_bytes()) + .and(schema::accounts::is_latest.eq(true)), + ) + .set(schema::accounts::is_latest.eq(false)) .execute(conn)?; + let v = account_value.clone(); + let inserted = diesel::insert_into(schema::accounts::table).values(&v).execute(conn)?; + debug_assert_eq!(inserted, 1); count += inserted; @@ -899,6 +963,7 @@ pub(crate) struct AccountRowInsert { pub(crate) storage: Option>, pub(crate) vault: Option>, pub(crate) nonce: Option, + pub(crate) is_latest: bool, } #[derive(Insertable, AsChangeset, Debug, Clone)] @@ -908,7 +973,7 @@ pub(crate) struct AccountAssetRowInsert { pub(crate) block_num: i64, pub(crate) vault_key: Vec, pub(crate) asset: Option>, - pub(crate) is_latest_update: bool, + pub(crate) is_latest: bool, } impl AccountAssetRowInsert { @@ -917,7 +982,7 @@ impl AccountAssetRowInsert { vault_key: &AssetVaultKey, block_num: BlockNumber, asset: Option, - is_latest_update: bool, + is_latest: bool, ) -> Self { let account_id = account_id.to_bytes(); let vault_key: Word = (*vault_key).into(); @@ -929,7 +994,7 @@ impl AccountAssetRowInsert { block_num, vault_key, asset, - is_latest_update, + is_latest, } } } @@ -942,5 +1007,5 @@ pub(crate) struct AccountStorageMapRowInsert { pub(crate) slot: i32, pub(crate) key: Vec, pub(crate) value: Vec, - pub(crate) is_latest_update: bool, + pub(crate) is_latest: bool, } diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index a00f63c98f..f269aee3b4 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -7,7 +7,7 @@ diesel::table! { slot -> Integer, key -> Binary, value -> Binary, - is_latest_update -> Bool, + is_latest -> Bool, } } @@ -17,12 +17,12 @@ diesel::table! { block_num -> BigInt, vault_key -> Binary, asset -> Nullable, - is_latest_update -> Bool, + is_latest -> Bool, } } diesel::table! { - accounts (account_id) { + accounts (account_id, block_num) { account_id -> Binary, network_account_id_prefix -> Nullable, account_commitment -> Binary, @@ -31,6 +31,7 @@ diesel::table! { vault -> Nullable, nonce -> Nullable, block_num -> BigInt, + is_latest -> Bool, } } @@ -101,11 +102,12 @@ diesel::table! { diesel::joinable!(accounts -> account_codes (code_commitment)); diesel::joinable!(accounts -> block_headers (block_num)); -diesel::joinable!(notes -> accounts (sender)); +// Note: Cannot use diesel::joinable! with accounts table due to composite primary key +// diesel::joinable!(notes -> accounts (sender)); +// diesel::joinable!(transactions -> accounts (account_id)); diesel::joinable!(notes -> block_headers (committed_at)); diesel::joinable!(notes -> note_scripts (script_root)); diesel::joinable!(nullifiers -> block_headers (block_num)); -diesel::joinable!(transactions -> accounts (account_id)); diesel::joinable!(transactions -> block_headers (block_num)); diesel::allow_tables_to_appear_in_same_query!( diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 6a17c60c53..4ca54020bf 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -98,6 +98,8 @@ pub enum DatabaseError { AccountNotFoundInDb(AccountId), #[error("account {0} state at block height {1} not found")] AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), + #[error("historical block {block_num} not available: {reason}")] + HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] @@ -169,6 +171,8 @@ impl From for Status { #[derive(Error, Debug)] pub enum StateInitializationError { + #[error("account tree IO error: {0}")] + AccountTreeIoError(String), #[error("database error")] DatabaseError(#[from] DatabaseError), #[error("failed to create nullifier tree")] diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 11c5be1c12..fc8ce87dff 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -150,11 +150,6 @@ impl State { let chain_mmr = load_mmr(&mut db).await?; let block_headers = db.select_all_block_headers().await?; - // TODO: Account tree loading synchronization - // Currently `load_account_tree` loads all account commitments from the DB. This could - // potentially lead to inconsistency if the DB contains account states from blocks beyond - // `latest_block_num`, though in practice the DB writes are transactional and this - // should not occur. let latest_block_num = block_headers .last() .map_or(BlockNumber::GENESIS, miden_objects::block::BlockHeader::block_num); @@ -919,27 +914,39 @@ impl State { /// Returns the respective account proof with optional details, such as asset and storage /// entries. /// - /// Note: The `block_num` parameter in the request is currently ignored and will always - /// return the current state. Historical block support will be implemented in a future update. + /// When `block_num` is provided, this method will return the account state at that specific + /// block using both the historical account tree witness and historical database state. #[allow(clippy::too_many_lines)] pub async fn get_account_proof( &self, account_request: AccountProofRequest, ) -> Result { let AccountProofRequest { block_num, account_id, details } = account_request; - let _ = block_num.ok_or_else(|| { - DatabaseError::NotImplemented( - "Handling of historical/past block numbers is not implemented yet".to_owned(), - ) - }); // Lock inner state for the whole operation. We need to hold this lock to prevent the // database, account tree and latest block number from changing during the operation, // because changing one of them would lead to inconsistent state. let inner_state = self.inner.read().await; - let block_num = inner_state.account_tree.block_number_latest(); - let witness = inner_state.account_tree.open_latest(account_id); + // Determine which block to query + let (block_num, witness) = if let Some(requested_block) = block_num { + // Historical query: use the account tree with history + let witness = inner_state + .account_tree + .open_at(account_id, requested_block) + .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { + block_num: requested_block, + reason: "Block is either in the future or has been pruned from history" + .to_string(), + })?; + (requested_block, witness) + } else { + // Latest query: use the latest state + let block_num = inner_state.account_tree.block_number_latest(); + let witness = inner_state.account_tree.open_latest(account_id); + (block_num, witness) + }; + drop(inner_state); let account_details = if let Some(AccountDetailRequest { code_commitment, @@ -947,7 +954,7 @@ impl State { storage_requests, }) = details { - let account_info = self.db.select_account(account_id).await?; + let account_info = self.db.select_historical_account_at(account_id, block_num).await?; // if we get a query for a _private_ account _with_ details requested, we'll error out let Some(account) = account_info.details else { @@ -1130,14 +1137,24 @@ async fn load_account_tree( ) -> Result, StateInitializationError> { let account_data = db.select_all_account_commitments().await?.into_iter().collect::>(); - // Convert account_data to use account_id_to_smt_key let smt_entries = account_data .into_iter() .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(MemoryStorage::default(), smt_entries) - .expect("Failed to create LargeSmt from database account data"); + let smt = + LargeSmt::with_entries(MemoryStorage::default(), smt_entries).map_err(|e| match e { + miden_objects::crypto::merkle::LargeSmtError::Merkle(merkle_error) => { + StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) + }, + miden_objects::crypto::merkle::LargeSmtError::Storage(err) => { + // large_smt::StorageError is not `Sync` and hence `context` cannot be called + // which we want to and do + StateInitializationError::AccountTreeIoError(err.as_report()) + }, + })?; + + let account_tree = + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree)?; - let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); Ok(AccountTreeWithHistory::new(account_tree, block_number)) } From 890b6e6fe0ce974daec48f9589183665a24f0481 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 25 Nov 2025 17:27:26 -0300 Subject: [PATCH 016/125] feat(stresstest): add sync transaction benchmark (#1294) --- CHANGELOG.md | 1 + bin/stress-test/README.md | 18 +++ bin/stress-test/src/main.rs | 31 ++++- bin/stress-test/src/store/mod.rs | 189 +++++++++++++++++++++++++++++++ 4 files changed, 238 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b7d43be4d..a2e8155689 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). +- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index dee33fa9d3..4d8c283c6e 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -23,6 +23,7 @@ The endpoints that you can test are: - `sync_state` - `sync_notes` - `sync_nullifiers` +- `sync_transactions` Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. @@ -153,5 +154,22 @@ P99.9 request latency: 2.289709ms Average nullifiers per response: 21.0348 ``` +- sync-transactions +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-transactions --accounts 5 --block-range 100 + +Average request latency: 1.61454ms +P50 request latency: 1.439584ms +P95 request latency: 3.195001ms +P99 request latency: 4.068709ms +P99.9 request latency: 6.888542ms +Average transactions per response: 1.547 +Pagination statistics: + Total runs: 10000 + Runs triggering pagination: 9971 + Pagination rate: 99.71% + Average pages per run: 2.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index 62b5ddc6dd..095b04caf1 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -3,7 +3,13 @@ use std::path::PathBuf; use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; -use store::{bench_sync_notes, bench_sync_nullifiers, bench_sync_state, load_state}; +use store::{ + bench_sync_notes, + bench_sync_nullifiers, + bench_sync_state, + bench_sync_transactions, + load_state, +}; mod seeding; mod store; @@ -58,13 +64,26 @@ pub enum Command { #[derive(Subcommand, Clone, Copy)] pub enum Endpoint { + #[command(name = "sync-nullifiers")] SyncNullifiers { /// Number of prefixes to send in each request. #[arg(short, long, value_name = "PREFIXES", default_value = "10")] prefixes: usize, }, + #[command(name = "sync-state")] SyncState, + #[command(name = "sync-notes")] SyncNotes, + #[command(name = "sync-transactions")] + SyncTransactions { + /// Number of accounts to sync transactions for in each request. + #[arg(short, long, value_name = "ACCOUNTS", default_value = "5")] + accounts: usize, + /// Block range size for each request (number of blocks to query). + #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "100")] + block_range: u32, + }, + #[command(name = "load-state")] LoadState, } @@ -98,6 +117,16 @@ async fn main() { Endpoint::SyncNotes => { bench_sync_notes(data_directory, iterations, concurrency).await; }, + Endpoint::SyncTransactions { accounts, block_range } => { + bench_sync_transactions( + data_directory, + iterations, + concurrency, + accounts, + block_range, + ) + .await; + }, Endpoint::LoadState => { load_state(&data_directory).await; }, diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index d218265212..a0adb87ab5 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -9,6 +9,8 @@ use miden_node_utils::tracing::grpc::OtelInterceptor; use miden_objects::account::AccountId; use miden_objects::note::{NoteDetails, NoteTag}; use miden_objects::utils::{Deserializable, Serializable}; +use rand::Rng; +use rand::seq::SliceRandom; use tokio::fs; use tokio::time::sleep; use tonic::service::interceptor::InterceptedService; @@ -292,6 +294,193 @@ async fn sync_nullifiers( (start.elapsed(), response.into_inner()) } +// SYNC TRANSACTIONS +// ================================================================================================ + +/// Sends multiple `sync_transactions` requests to the store and prints the performance. +/// +/// Arguments: +/// - `data_directory`: directory that contains the database dump file and the accounts ids dump +/// file. +/// - `iterations`: number of requests to send. +/// - `concurrency`: number of requests to send in parallel. +/// - `accounts_per_request`: number of accounts to sync transactions for in each request. +pub async fn bench_sync_transactions( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + accounts_per_request: usize, + block_range_size: u32, +) { + // load accounts from the dump file + let accounts_file = data_directory.join(ACCOUNTS_FILENAME); + let accounts = fs::read_to_string(&accounts_file) + .await + .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); + let mut account_ids: Vec = accounts + .lines() + .map(|a| AccountId::from_hex(a).expect("invalid account id")) + .collect(); + // Shuffle once so the cycling iterator starts in a random order. + let mut rng = rand::rng(); + account_ids.shuffle(&mut rng); + let mut account_ids = account_ids.into_iter().cycle(); + + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + // Get the latest block number to determine the range + let status = store_client.clone().status(()).await.unwrap().into_inner(); + let chain_tip = status.chain_tip; + + // each request will have `accounts_per_request` account ids and will query a range of blocks + let request = |_| { + let mut client = store_client.clone(); + let account_batch: Vec = + account_ids.by_ref().take(accounts_per_request).collect(); + + // Pick a random window of size `block_range_size` that fits before `chain_tip`. + let max_start = chain_tip.saturating_sub(block_range_size); + let start_block = rand::rng().random_range(0..=max_start); + let end_block = start_block.saturating_add(block_range_size).min(chain_tip); + + tokio::spawn(async move { + sync_transactions_paginated(&mut client, account_batch, start_block, end_block).await + }) + }; + + // create a stream of tasks to send sync_transactions requests + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + let responses: Vec = + results.iter().map(|r| r.response.clone()).collect(); + + print_summary(&timers_accumulator); + + #[allow(clippy::cast_precision_loss)] + let average_transactions_per_response = if responses.is_empty() { + 0.0 + } else { + responses.iter().map(|r| r.transactions.len()).sum::() as f64 + / responses.len() as f64 + }; + println!("Average transactions per response: {average_transactions_per_response}"); + + // Calculate pagination statistics + let total_runs = results.len(); + let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); + #[allow(clippy::cast_precision_loss)] + let pagination_rate = if total_runs > 0 { + (paginated_runs as f64 / total_runs as f64) * 100.0 + } else { + 0.0 + }; + #[allow(clippy::cast_precision_loss)] + let avg_pages = if total_runs > 0 { + results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 + } else { + 0.0 + }; + + println!("Pagination statistics:"); + println!(" Total runs: {total_runs}"); + println!(" Runs triggering pagination: {paginated_runs}"); + println!(" Pagination rate: {pagination_rate:.2}%"); + println!(" Average pages per run: {avg_pages:.2}"); +} + +/// Sends a single `sync_transactions` request to the store and returns a tuple with: +/// - the elapsed time. +/// - the response. +pub async fn sync_transactions( + api_client: &mut RpcClient>, + account_ids: Vec, + block_from: u32, + block_to: u32, +) -> (Duration, proto::rpc_store::SyncTransactionsResponse) { + let account_ids = account_ids + .iter() + .map(|id| proto::account::AccountId { id: id.to_bytes() }) + .collect::>(); + + let sync_request = proto::rpc_store::SyncTransactionsRequest { + block_range: Some(proto::rpc_store::BlockRange { block_from, block_to: Some(block_to) }), + account_ids, + }; + + let start = Instant::now(); + let response = api_client.sync_transactions(sync_request).await.unwrap(); + (start.elapsed(), response.into_inner()) +} + +#[derive(Clone)] +struct SyncTransactionsRun { + duration: Duration, + response: proto::rpc_store::SyncTransactionsResponse, + pages: usize, +} + +async fn sync_transactions_paginated( + api_client: &mut RpcClient>, + account_ids: Vec, + block_from: u32, + block_to: u32, +) -> SyncTransactionsRun { + let mut total_duration = Duration::default(); + let mut aggregated_records = Vec::new(); + let mut next_block_from = block_from; + let mut target_block_to = block_to; + let mut pages = 0usize; + let mut final_pagination_info = None; + + loop { + if next_block_from > target_block_to { + break; + } + + let (elapsed, response) = + sync_transactions(api_client, account_ids.clone(), next_block_from, target_block_to) + .await; + total_duration += elapsed; + pages += 1; + + let info = response.pagination_info.unwrap_or(proto::rpc_store::PaginationInfo { + chain_tip: target_block_to, + block_num: target_block_to, + }); + + aggregated_records.extend(response.transactions.into_iter()); + let reached_block = info.block_num; + let chain_tip = info.chain_tip; + final_pagination_info = + Some(proto::rpc_store::PaginationInfo { chain_tip, block_num: reached_block }); + + if reached_block >= chain_tip { + break; + } + + // Request the remaining range up to the reported chain tip + next_block_from = reached_block; + target_block_to = chain_tip; + } + + SyncTransactionsRun { + duration: total_duration, + response: proto::rpc_store::SyncTransactionsResponse { + pagination_info: final_pagination_info, + transactions: aggregated_records, + }, + pages, + } +} + // LOAD STATE // ================================================================================================ From 5d498b0585f56455ef9edd8935c8e019dd45c341 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 26 Nov 2025 11:09:44 +0100 Subject: [PATCH 017/125] store: refactor/simplify get_account* (#1379) --- crates/store/src/errors.rs | 2 + crates/store/src/state.rs | 159 +++++++++++++++++++++---------------- 2 files changed, 93 insertions(+), 68 deletions(-) diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 4ca54020bf..88a5583b0b 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -104,6 +104,8 @@ pub enum DatabaseError { AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), + #[error("account {0} details missing")] + AccountDetailsMissing(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, #[error("data corrupted: {0}")] diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index fc8ce87dff..37af8afd11 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -916,16 +916,36 @@ impl State { /// /// When `block_num` is provided, this method will return the account state at that specific /// block using both the historical account tree witness and historical database state. - #[allow(clippy::too_many_lines)] pub async fn get_account_proof( &self, account_request: AccountProofRequest, ) -> Result { let AccountProofRequest { block_num, account_id, details } = account_request; - // Lock inner state for the whole operation. We need to hold this lock to prevent the - // database, account tree and latest block number from changing during the operation, - // because changing one of them would lead to inconsistent state. + if details.is_some() && !account_id.is_public() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } + + let (block_num, witness) = self.get_block_witness(block_num, account_id).await?; + + let details = if let Some(request) = details { + Some(self.fetch_public_account_details(account_id, block_num, request).await?) + } else { + None + }; + + Ok(AccountProofResponse { block_num, witness, details }) + } + + /// Gets the block witness (account tree proof) for the specified account + /// + /// If `block_num` is provided, returns the witness at that historical block, + /// if not present, returns the witness at the latest block. + async fn get_block_witness( + &self, + block_num: Option, + account_id: AccountId, + ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { let inner_state = self.inner.read().await; // Determine which block to query @@ -946,86 +966,89 @@ impl State { let witness = inner_state.account_tree.open_latest(account_id); (block_num, witness) }; - drop(inner_state); - let account_details = if let Some(AccountDetailRequest { + Ok((block_num, witness)) + } + + /// Fetches the account details (code, vault, storage) for a public account at the specified + /// block. + /// + /// This method queries the database to fetch the account state and processes the detail + /// request to return only the requested information. + async fn fetch_public_account_details( + &self, + account_id: AccountId, + block_num: BlockNumber, + detail_request: AccountDetailRequest, + ) -> Result { + let AccountDetailRequest { code_commitment, asset_vault_commitment, storage_requests, - }) = details - { - let account_info = self.db.select_historical_account_at(account_id, block_num).await?; - - // if we get a query for a _private_ account _with_ details requested, we'll error out - let Some(account) = account_info.details else { - return Err(DatabaseError::AccountNotPublic(account_id)); - }; + } = detail_request; - let storage_header = account.storage().to_header(); + let account_info = self.db.select_historical_account_at(account_id, block_num).await?; - let mut storage_map_details = - Vec::::with_capacity(storage_requests.len()); + // If we get a query for a public account but the details are missing from the database, + // it indicates an inconsistent state in the database. + let Some(account) = account_info.details else { + return Err(DatabaseError::AccountDetailsMissing(account_id)); + }; - for StorageMapRequest { slot_index, slot_data } in storage_requests { - let Some(StorageSlot::Map(storage_map)) = - account.storage().slots().get(slot_index as usize) - else { - return Err(AccountError::StorageSlotNotMap(slot_index).into()); - }; - let details = AccountStorageMapDetails::new(slot_index, slot_data, storage_map); - storage_map_details.push(details); - } + let storage_header = account.storage().to_header(); - // Only include unknown account code blobs, which is equal to a account code digest - // mismatch. If `None` was requested, don't return any. - let account_code = code_commitment - .is_some_and(|code_commitment| code_commitment != account.code().commitment()) - .then(|| account.code().to_bytes()); + let mut storage_map_details = + Vec::::with_capacity(storage_requests.len()); - // storage details - let storage_details = AccountStorageDetails { - header: storage_header, - map_details: storage_map_details, + for StorageMapRequest { slot_index, slot_data } in storage_requests { + let Some(StorageSlot::Map(storage_map)) = + account.storage().slots().get(slot_index as usize) + else { + return Err(AccountError::StorageSlotNotMap(slot_index).into()); }; + let details = AccountStorageMapDetails::new(slot_index, slot_data, storage_map); + storage_map_details.push(details); + } - // Handle vault details based on the `asset_vault_commitment`. - // Similar to `code_commitment`, if the provided commitment matches, we don't return - // vault data. If no commitment is provided or it doesn't match, we return - // the vault data. If the number of vault contained assets are exceeding a - // limit, we signal this back in the response and the user must handle that - // in follow-up request. - let vault_details = match asset_vault_commitment { - Some(commitment) if commitment == account.vault().root() => { - // The client already has the correct vault data - AccountVaultDetails::empty() - }, - Some(_) => { - // The commitment doesn't match, so return vault data - AccountVaultDetails::new(account.vault()) - }, - None => { - // No commitment provided, so don't return vault data - AccountVaultDetails::empty() - }, - }; + // Only include unknown account code blobs, which is equal to a account code digest + // mismatch. If `None` was requested, don't return any. + let account_code = code_commitment + .is_some_and(|code_commitment| code_commitment != account.code().commitment()) + .then(|| account.code().to_bytes()); - Some(AccountDetails { - account_header: AccountHeader::from(account), - account_code, - vault_details, - storage_details, - }) - } else { - None + // storage details + let storage_details = AccountStorageDetails { + header: storage_header, + map_details: storage_map_details, }; - let response = AccountProofResponse { - block_num, - witness, - details: account_details, + // Handle vault details based on the `asset_vault_commitment`. + // Similar to `code_commitment`, if the provided commitment matches, we don't return + // vault data. If no commitment is provided or it doesn't match, we return + // the vault data. If the number of vault contained assets are exceeding a + // limit, we signal this back in the response and the user must handle that + // in follow-up request. + let vault_details = match asset_vault_commitment { + Some(commitment) if commitment == account.vault().root() => { + // The client already has the correct vault data + AccountVaultDetails::empty() + }, + Some(_) => { + // The commitment doesn't match, so return vault data + AccountVaultDetails::new(account.vault()) + }, + None => { + // No commitment provided, so don't return vault data + AccountVaultDetails::empty() + }, }; - Ok(response) + Ok(AccountDetails { + account_header: AccountHeader::from(account), + account_code, + vault_details, + storage_details, + }) } /// Returns storage map values for syncing within a block range. From 8d76576ef63b1e06402b0c729e836a149507c456 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 27 Nov 2025 04:43:49 -0300 Subject: [PATCH 018/125] feat: add timeout support for WASM clients (#1383) --- CHANGELOG.md | 1 + .../remote-prover-client/src/remote_prover/batch_prover.rs | 7 ++++++- .../remote-prover-client/src/remote_prover/block_prover.rs | 7 ++++++- crates/remote-prover-client/src/remote_prover/tx_prover.rs | 7 ++++++- 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 993c2babaf..e0fa6c5553 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). +- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). ### Fixes diff --git a/crates/remote-prover-client/src/remote_prover/batch_prover.rs b/crates/remote-prover-client/src/remote_prover/batch_prover.rs index 3c75097c26..ed9c7a382e 100644 --- a/crates/remote-prover-client/src/remote_prover/batch_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/batch_prover.rs @@ -71,7 +71,12 @@ impl RemoteBatchProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let mut fetch_options = + tonic_web_wasm_client::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index 694f11b371..c533bef4d6 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -72,7 +72,12 @@ impl RemoteBlockProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let mut fetch_options = + tonic_web_wasm_client::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; diff --git a/crates/remote-prover-client/src/remote_prover/tx_prover.rs b/crates/remote-prover-client/src/remote_prover/tx_prover.rs index bf6239646d..b94c9a9ea8 100644 --- a/crates/remote-prover-client/src/remote_prover/tx_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/tx_prover.rs @@ -72,7 +72,12 @@ impl RemoteTransactionProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let mut fetch_options = + tonic_web_wasm_client::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; From db22698df343e98695232c8346249ec5ffc8ea6c Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 28 Nov 2025 23:57:19 +1300 Subject: [PATCH 019/125] feat: Validate and prove blocks (#1381) --- CHANGELOG.md | 1 + Cargo.lock | 31 ++-- bin/node/.env | 1 + bin/node/src/commands/block_producer.rs | 9 ++ bin/node/src/commands/bundled.rs | 3 + bin/node/src/commands/mod.rs | 1 + bin/remote-prover/Cargo.toml | 1 + bin/remote-prover/src/api/prover.rs | 20 +-- .../src/generated/conversions.rs | 6 +- .../src/generated/remote_prover.rs | 4 +- bin/stress-test/src/seeding/mod.rs | 9 +- .../block-producer/src/block_builder/mod.rs | 132 +++++++++++++++--- crates/block-producer/src/errors.rs | 25 +++- crates/block-producer/src/lib.rs | 1 + crates/block-producer/src/server/mod.rs | 6 +- crates/block-producer/src/validator/mod.rs | 85 +++++++++++ crates/ntx-builder/src/state/mod.rs | 2 +- crates/proto/src/clients/mod.rs | 23 +++ crates/proto/src/domain/mod.rs | 1 + crates/proto/src/domain/proof_request.rs | 39 ++++++ crates/proto/src/generated/blockchain.rs | 16 +++ crates/proto/src/generated/remote_prover.rs | 4 +- crates/proto/src/generated/validator.rs | 91 ++++++++++++ crates/proto/src/lib.rs | 1 + crates/remote-prover-client/Cargo.toml | 15 +- .../src/remote_prover/block_prover.rs | 67 +++------ .../generated/nostd/remote_prover.rs | 4 +- .../generated/std/remote_prover.rs | 4 +- crates/rpc/src/tests.rs | 2 +- crates/store/Cargo.toml | 27 ++-- crates/store/src/db/mod.rs | 10 +- crates/store/src/genesis/mod.rs | 17 ++- crates/store/src/server/block_producer.rs | 8 +- crates/store/src/state.rs | 17 ++- crates/validator/Cargo.toml | 2 + crates/validator/src/server/mod.rs | 38 ++++- proto/proto/remote_prover.proto | 4 +- proto/proto/types/blockchain.proto | 17 +++ proto/proto/validator.proto | 15 ++ 39 files changed, 605 insertions(+), 154 deletions(-) create mode 100644 crates/block-producer/src/validator/mod.rs create mode 100644 crates/proto/src/domain/proof_request.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index e0fa6c5553..d6509aac7c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). +- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). ### Fixes diff --git a/Cargo.lock b/Cargo.lock index 5d94053303..333522f338 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1346,7 +1346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2111,7 +2111,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2486,9 +2486,8 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" dependencies = [ - "miden-lib", "miden-objects", "thiserror 2.0.17", ] @@ -2583,7 +2582,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" dependencies = [ "fs-err", "miden-assembly", @@ -2931,9 +2930,11 @@ name = "miden-node-validator" version = "0.13.0" dependencies = [ "anyhow", + "miden-lib", "miden-node-proto", "miden-node-proto-build", "miden-node-utils", + "miden-objects", "tokio", "tokio-stream", "tonic", @@ -2945,7 +2946,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" dependencies = [ "bech32", "getrandom 0.3.4", @@ -3014,6 +3015,7 @@ dependencies = [ "humantime", "miden-block-prover", "miden-lib", + "miden-node-proto", "miden-node-proto-build", "miden-node-utils", "miden-objects", @@ -3052,6 +3054,7 @@ version = "0.13.0" dependencies = [ "fs-err", "getrandom 0.3.4", + "miden-node-proto", "miden-node-proto-build", "miden-objects", "miden-tx", @@ -3085,7 +3088,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3103,7 +3106,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" dependencies = [ "miden-lib", "miden-objects", @@ -3116,7 +3119,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#8592b0367c6ddb57a65b0773e348c3ea35c7bf0a" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" dependencies = [ "miden-objects", "miden-tx", @@ -3338,7 +3341,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -4660,7 +4663,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5293,7 +5296,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5302,7 +5305,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2111ef44dae28680ae9752bb89409e7310ca33a8c621ebe7b106cf5c928b3ac0" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -6311,7 +6314,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/bin/node/.env b/bin/node/.env index 75d0bbbbd0..29001ba361 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -9,6 +9,7 @@ MIDEN_NODE_NTX_PROVER_URL= MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= +MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 6820a38511..f65097e346 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -12,6 +12,7 @@ use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, + ENV_VALIDATOR_BLOCK_PRODUCER_URL, duration_to_human_readable_string, }; @@ -27,6 +28,10 @@ pub enum BlockProducerCommand { #[arg(long = "store.url", env = ENV_STORE_BLOCK_PRODUCER_URL)] store_url: Url, + /// The validator's service gRPC url. + #[arg(long = "validator.url", env = ENV_VALIDATOR_BLOCK_PRODUCER_URL)] + validator_url: Url, + #[command(flatten)] block_producer: BlockProducerConfig, @@ -55,6 +60,7 @@ impl BlockProducerCommand { let Self::Start { url, store_url, + validator_url, block_producer, enable_otel: _, grpc_timeout, @@ -80,6 +86,7 @@ impl BlockProducerCommand { BlockProducer { block_producer_address, store_url, + validator_url, batch_prover_url: block_producer.batch_prover_url, block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, @@ -115,6 +122,7 @@ mod tests { let cmd = BlockProducerCommand::Start { url: dummy_url(), store_url: dummy_url(), + validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, block_prover_url: None, @@ -137,6 +145,7 @@ mod tests { let cmd = BlockProducerCommand::Start { url: dummy_url(), store_url: dummy_url(), + validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, block_prover_url: None, diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 71c6f5ac11..1ea4129d35 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -200,10 +200,13 @@ impl BundledCommand { let checkpoint = Arc::clone(&checkpoint); let store_url = Url::parse(&format!("http://{store_block_producer_address}")) .context("Failed to parse URL")?; + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; async move { BlockProducer { block_producer_address, store_url, + validator_url, batch_prover_url: block_producer.batch_prover_url, block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 90c91ccfc0..9d99a46ac2 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -18,6 +18,7 @@ const ENV_RPC_URL: &str = "MIDEN_NODE_RPC_URL"; const ENV_STORE_RPC_URL: &str = "MIDEN_NODE_STORE_RPC_URL"; const ENV_STORE_NTX_BUILDER_URL: &str = "MIDEN_NODE_STORE_NTX_BUILDER_URL"; const ENV_STORE_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_STORE_BLOCK_PRODUCER_URL"; +const ENV_VALIDATOR_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL"; const ENV_DATA_DIRECTORY: &str = "MIDEN_NODE_DATA_DIRECTORY"; const ENV_ENABLE_OTEL: &str = "MIDEN_NODE_ENABLE_OTEL"; const ENV_GENESIS_CONFIG_FILE: &str = "MIDEN_GENESIS_CONFIG_FILE"; diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 0bcc98a2ed..d3b28797d1 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -31,6 +31,7 @@ clap = { features = ["env"], workspace = true } http = { workspace = true } humantime = { workspace = true } miden-block-prover = { workspace = true } +miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-objects = { features = ["std"], workspace = true } miden-tx = { features = ["std"], workspace = true } diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index 2f4cbf6a73..b03c06aeae 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -1,8 +1,8 @@ use miden_block_prover::LocalBlockProver; +use miden_node_proto::BlockProofRequest; use miden_node_utils::ErrorReport; use miden_objects::MIN_PROOF_SECURITY_LEVEL; use miden_objects::batch::ProposedBatch; -use miden_objects::block::ProposedBlock; use miden_objects::transaction::TransactionInputs; use miden_objects::utils::Serializable; use miden_tx::LocalTransactionProver; @@ -165,24 +165,24 @@ impl ProverRpcApi { )] pub fn prove_block( &self, - proposed_block: ProposedBlock, + proof_request: BlockProofRequest, request_id: &str, ) -> Result, tonic::Status> { let Prover::Block(prover) = &self.prover else { return Err(Status::unimplemented("Block prover is not enabled")); }; - - let proven_block = prover + let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; + let block_proof = prover .try_lock() .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(proposed_block) + .prove(tx_batches, block_header.clone(), block_inputs) .map_err(internal_error)?; - // Record the commitment of the block in the current tracing span - let block_id = proven_block.commitment(); + // Record the commitment of the block in the current tracing span. + let block_id = block_header.commitment(); tracing::Span::current().record("block_id", tracing::field::display(&block_id)); - Ok(Response::new(proto::remote_prover::Proof { payload: proven_block.to_bytes() })) + Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) } } @@ -225,8 +225,8 @@ impl ProverApi for ProverRpcApi { self.prove_batch(proposed_batch, &request_id) }, proto::remote_prover::ProofType::Block => { - let proposed_block = proof_request.try_into().map_err(invalid_argument)?; - self.prove_block(proposed_block, &request_id) + let proof_request = proof_request.try_into().map_err(invalid_argument)?; + self.prove_block(proof_request, &request_id) }, } } diff --git a/bin/remote-prover/src/generated/conversions.rs b/bin/remote-prover/src/generated/conversions.rs index 885d1e06b7..8e4f933fd9 100644 --- a/bin/remote-prover/src/generated/conversions.rs +++ b/bin/remote-prover/src/generated/conversions.rs @@ -1,8 +1,8 @@ // CONVERSIONS // ================================================================================================ +use miden_node_proto::BlockProofRequest; use miden_objects::batch::ProposedBatch; -use miden_objects::block::ProposedBlock; use miden_objects::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; @@ -39,11 +39,11 @@ impl TryFrom for ProposedBatch { } } -impl TryFrom for ProposedBlock { +impl TryFrom for BlockProofRequest { type Error = DeserializationError; fn try_from(request: proto::ProofRequest) -> Result { - ProposedBlock::read_from_bytes(&request.payload) + BlockProofRequest::read_from_bytes(&request.payload) } } diff --git a/bin/remote-prover/src/generated/remote_prover.rs b/bin/remote-prover/src/generated/remote_prover.rs index 4d8ae80285..b504804c3e 100644 --- a/bin/remote-prover/src/generated/remote_prover.rs +++ b/bin/remote-prover/src/generated/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,7 +21,7 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index c35a6dd910..225001a3bb 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -9,6 +9,7 @@ use miden_block_prover::LocalBlockProver; use miden_lib::account::auth::AuthRpoFalcon512; use miden_lib::account::faucets::BasicFungibleFaucet; use miden_lib::account::wallets::BasicWallet; +use miden_lib::block::build_block; use miden_lib::note::create_p2id_note; use miden_lib::utils::Serializable; use miden_node_block_producer::store::StoreClient; @@ -245,8 +246,12 @@ async fn apply_block( store_client: &StoreClient, metrics: &mut SeedingMetrics, ) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); - let proven_block = LocalBlockProver::new(0).prove_dummy(proposed_block).unwrap(); + let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); + let (header, body) = build_block(proposed_block.clone()).unwrap(); + let block_proof = LocalBlockProver::new(0) + .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) + .unwrap(); + let proven_block = ProvenBlock::new_unchecked(header, body, block_proof); let block_size: usize = proven_block.to_bytes().len(); let start = Instant::now(); diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 6a5cf53efa..ca742904c0 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -6,10 +6,18 @@ use futures::never::Never; use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock}; +use miden_objects::batch::{OrderedBatches, ProvenBatch}; +use miden_objects::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + BlockProof, + ProposedBlock, + ProvenBlock, +}; use miden_objects::note::NoteHeader; -use miden_objects::transaction::TransactionHeader; +use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; use rand::Rng; use tokio::time::Duration; @@ -19,6 +27,7 @@ use url::Url; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; use crate::store::StoreClient; +use crate::validator::BlockProducerValidatorClient; use crate::{COMPONENT, TelemetryInjectorExt}; // BLOCK BUILDER @@ -36,6 +45,8 @@ pub struct BlockBuilder { pub store: StoreClient, + pub validator: BlockProducerValidatorClient, + /// The prover used to prove a proposed block into a proven block. pub block_prover: BlockProver, } @@ -46,6 +57,7 @@ impl BlockBuilder { /// If the block prover URL is not set, the block builder will use the local block prover. pub fn new( store: StoreClient, + validator: BlockProducerValidatorClient, block_prover_url: Option, block_interval: Duration, ) -> Self { @@ -61,6 +73,7 @@ impl BlockBuilder { failure_rate: 0.0, block_prover, store, + validator, } } /// Starts the [`BlockBuilder`], infinitely producing blocks at the configured interval. @@ -112,8 +125,12 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(ProposedBlock::inject_telemetry) - .and_then(|inputs| self.prove_block(inputs)) + .inspect_ok(|(proposed_block, _)| { + ProposedBlock::inject_telemetry(proposed_block); + }) + .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) + .and_then(|(proposed_block, inputs, header, body)| self.prove_block(proposed_block, inputs, header, body)) + .and_then(|(proposed_block, header, body, block_proof)| self.construct_proven_block(proposed_block, header, body, block_proof)) .inspect_ok(ProvenBlock::inject_telemetry) // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot // handle errors after it considers the process complete (which makes sense). @@ -197,31 +214,68 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result { + ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = - ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = ProposedBlock::new(inputs.clone(), batches) + .map_err(BuildBlockError::ProposeBlockFailed)?; - Ok(proposed_block) + Ok((proposed_block, inputs)) + } + + #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] + async fn validate_block( + &self, + proposed_block: ProposedBlock, + block_inputs: BlockInputs, + ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, BlockBody), BuildBlockError> { + let response = self + .validator + .validate_block(proposed_block.clone()) + .await + .map_err(BuildBlockError::ValidateBlockFailed)?; + + // TODO: Check that the returned header and body match the proposed block. + + let (ordered_batches, ..) = proposed_block.into_parts(); + Ok((ordered_batches, block_inputs, response.header, response.body)) } #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] async fn prove_block( &self, - proposed_block: ProposedBlock, - ) -> Result { - let proven_block = self.block_prover.prove(proposed_block).await?; + ordered_batches: OrderedBatches, + block_inputs: BlockInputs, + header: BlockHeader, + body: BlockBody, + ) -> Result<(OrderedBatches, BlockHeader, BlockBody, BlockProof), BuildBlockError> { + // Prove block using header and body from validator. + let block_proof = self + .block_prover + .prove(ordered_batches.clone(), header.clone(), block_inputs) + .await?; + self.simulate_proving().await; + + Ok((ordered_batches, header, body, block_proof)) + } + #[instrument(target = COMPONENT, name = "block_builder.construct_proven_block", skip_all, err)] + async fn construct_proven_block( + &self, + ordered_batches: OrderedBatches, + header: BlockHeader, + body: BlockBody, + block_proof: BlockProof, + ) -> Result { + let proven_block = ProvenBlock::new_unchecked(header, body, block_proof); if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { return Err(BuildBlockError::SecurityLevelTooLow( proven_block.proof_security_level(), MIN_PROOF_SECURITY_LEVEL, )); } - - self.simulate_proving().await; + validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; Ok(proven_block) } @@ -388,15 +442,55 @@ impl BlockProver { } #[instrument(target = COMPONENT, skip_all, err)] - async fn prove(&self, proposed_block: ProposedBlock) -> Result { + async fn prove( + &self, + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { match self { - Self::Local(prover) => { - prover.prove(proposed_block).map_err(BuildBlockError::ProveBlockFailed) - }, + Self::Local(prover) => prover + .prove(tx_batches, block_header, block_inputs) + .map_err(BuildBlockError::ProveBlockFailed), Self::Remote(prover) => prover - .prove(proposed_block) + .prove(tx_batches, block_header, block_inputs) .await .map_err(BuildBlockError::RemoteProverClientError), } } } + +/// Validates that the proven block's transaction headers are consistent with the transactions +/// passed in the proposed block. +/// +/// This expects that transactions from the proposed block and proven block are in the same +/// order, as define by [`OrderedTransactionHeaders`]. +fn validate_tx_headers( + proven_block: &ProvenBlock, + proposed_txs: &OrderedTransactionHeaders, +) -> Result<(), BuildBlockError> { + if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { + return Err(BuildBlockError::other(format!( + "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", + proven_block.body().transactions().as_slice().len(), + proposed_txs.as_slice().len() + ))); + } + + // Because we checked the length matches we can zip the iterators up. + // We expect the transaction headers to be in the same order. + for (proposed_header, proven_header) in proposed_txs + .as_slice() + .iter() + .zip(proven_block.body().transactions().as_slice()) + { + if proposed_header != proven_header { + return Err(BuildBlockError::other(format!( + "transaction header with id {} does not match header of the transaction in the proposed block", + proposed_header.id() + ))); + } + } + + Ok(()) +} diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 9d55617a22..0a0aa176cd 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,4 +1,6 @@ -use miden_block_prover::ProvenBlockError; +use core::error::Error as CoreError; + +use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_objects::account::AccountId; use miden_objects::block::BlockNumber; @@ -9,6 +11,8 @@ use miden_remote_prover_client::RemoteProverClientError; use thiserror::Error; use tokio::task::JoinError; +use crate::validator::ValidatorError; + // Block-producer errors // ================================================================================================= @@ -204,8 +208,10 @@ pub enum BuildBlockError { GetBlockInputsFailed(#[source] StoreError), #[error("failed to propose block")] ProposeBlockFailed(#[source] ProposedBlockError), + #[error("failed to validate block")] + ValidateBlockFailed(#[source] ValidatorError), #[error("failed to prove block")] - ProveBlockFailed(#[source] ProvenBlockError), + ProveBlockFailed(#[source] BlockProverError), /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. #[error("nothing actually went wrong, failure was injected on purpose")] @@ -214,6 +220,21 @@ pub enum BuildBlockError { RemoteProverClientError(#[source] RemoteProverClientError), #[error("block proof security level is too low: {0} < {1}")] SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. + #[error("{error_msg}")] + Other { + error_msg: Box, + source: Option>, + }, +} + +impl BuildBlockError { + /// Creates a custom error using the [`BuildBlockError::Other`] variant from an + /// error message. + pub fn other(message: impl Into) -> Self { + let message: String = message.into(); + Self::Other { error_msg: message.into(), source: None } + } } // Store errors diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 503e29cc16..3e30b25fab 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -9,6 +9,7 @@ mod block_builder; mod domain; mod mempool; pub mod store; +pub mod validator; #[cfg(feature = "testing")] pub mod errors; diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 39753fe831..4bedc31e2c 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -36,6 +36,7 @@ use crate::errors::{ }; use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; +use crate::validator::BlockProducerValidatorClient; use crate::{COMPONENT, SERVER_NUM_BATCH_BUILDERS}; /// The block producer server. @@ -49,6 +50,8 @@ pub struct BlockProducer { pub block_producer_address: SocketAddr, /// The address of the store component. pub store_url: Url, + /// The address of the validator component. + pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, /// The address of the block prover component. @@ -81,6 +84,7 @@ impl BlockProducer { pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); + let validator = BlockProducerValidatorClient::new(self.validator_url.clone()); // Retry fetching the chain tip from the store until it succeeds. let mut retries_counter = 0; @@ -118,7 +122,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); let block_builder = - BlockBuilder::new(store.clone(), self.block_prover_url, self.block_interval); + BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, diff --git a/crates/block-producer/src/validator/mod.rs b/crates/block-producer/src/validator/mod.rs new file mode 100644 index 0000000000..49b72185c8 --- /dev/null +++ b/crates/block-producer/src/validator/mod.rs @@ -0,0 +1,85 @@ +use miden_node_proto::clients::{Builder, ValidatorClient}; +use miden_node_proto::generated as proto; +use miden_objects::block::{BlockBody, BlockHeader, ProposedBlock}; +use miden_objects::utils::{Deserializable, Serializable}; +use thiserror::Error; +use tracing::{info, instrument}; +use url::Url; + +use crate::COMPONENT; + +// VALIDATOR ERROR +// ================================================================================================ + +#[derive(Debug, Error)] +pub enum ValidatorError { + #[error("gRPC transport error: {0}")] + Transport(#[from] tonic::Status), + #[error("Failed to convert header: {0}")] + HeaderConversion(String), + #[error("Failed to deserialize body: {0}")] + BodyDeserialization(String), +} + +// VALIDATE BLOCK RESPONSE +// ================================================================================================ + +#[derive(Debug, Clone)] +pub struct ValidateBlockResponse { + pub header: BlockHeader, + pub body: BlockBody, +} + +// VALIDATOR CLIENT +// ================================================================================================ + +/// Interface to the validator's block-producer gRPC API. +/// +/// Essentially just a thin wrapper around the generated gRPC client which improves type safety. +#[derive(Clone, Debug)] +pub struct BlockProducerValidatorClient { + client: ValidatorClient, +} + +impl BlockProducerValidatorClient { + /// Creates a new validator client with a lazy connection. + pub fn new(validator_url: Url) -> Self { + info!(target: COMPONENT, validator_endpoint = %validator_url, "Initializing validator client"); + + let validator = Builder::new(validator_url) + .without_tls() + .without_timeout() + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::(); + + Self { client: validator } + } + + #[instrument(target = COMPONENT, name = "validator.client.validate_block", skip_all, err)] + pub async fn validate_block( + &self, + proposed_block: ProposedBlock, + ) -> Result { + // Send request and receive response. + let message = proto::blockchain::ProposedBlock { + proposed_block: proposed_block.to_bytes(), + }; + let request = tonic::Request::new(message); + let response = self.client.clone().validate_block(request).await?; + let response = response.into_inner(); + + // Extract header from response (should always be present). + let header_proto = response.header.expect("validator always returns a header"); + let header = BlockHeader::try_from(header_proto) + .map_err(|err| ValidatorError::HeaderConversion(err.to_string()))?; + + // Extract body from response (should always be present). + let body_proto = response.body.expect("validator always returns a body"); + let body = BlockBody::read_from_bytes(&body_proto.block_body) + .map_err(|err| ValidatorError::BodyDeserialization(err.to_string()))?; + + Ok(ValidateBlockResponse { header, body }) + } +} diff --git a/crates/ntx-builder/src/state/mod.rs b/crates/ntx-builder/src/state/mod.rs index beb27b141a..363f58f837 100644 --- a/crates/ntx-builder/src/state/mod.rs +++ b/crates/ntx-builder/src/state/mod.rs @@ -234,7 +234,7 @@ impl State { /// Blocks in the MMR are pruned if the block count exceeds the maximum. fn update_chain_tip(&mut self, tip: BlockHeader) { // Update MMR which lags by one block. - self.chain_mmr.add_block(self.chain_tip_header.clone(), true); + self.chain_mmr.add_block(&self.chain_tip_header, true); // Set the new tip. self.chain_tip_header = tip; diff --git a/crates/proto/src/clients/mod.rs b/crates/proto/src/clients/mod.rs index 12f316b790..3388d7875e 100644 --- a/crates/proto/src/clients/mod.rs +++ b/crates/proto/src/clients/mod.rs @@ -119,6 +119,7 @@ type GeneratedStoreClientForRpc = generated::rpc_store::rpc_client::RpcClient; type GeneratedProverClient = generated::remote_prover::api_client::ApiClient; +type GeneratedValidatorClient = generated::validator::api_client::ApiClient; // gRPC CLIENTS // ================================================================================================ @@ -137,6 +138,8 @@ pub struct StoreRpcClient(GeneratedStoreClientForRpc); pub struct RemoteProverProxyStatusClient(GeneratedProxyStatusClient); #[derive(Debug, Clone)] pub struct RemoteProverClient(GeneratedProverClient); +#[derive(Debug, Clone)] +pub struct ValidatorClient(GeneratedValidatorClient); impl DerefMut for RpcClient { fn deref_mut(&mut self) -> &mut Self::Target { @@ -236,6 +239,20 @@ impl Deref for RemoteProverClient { } } +impl DerefMut for ValidatorClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Deref for ValidatorClient { + type Target = GeneratedValidatorClient; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + // GRPC CLIENT BUILDER TRAIT // ================================================================================================ @@ -292,6 +309,12 @@ impl GrpcClient for RemoteProverClient { } } +impl GrpcClient for ValidatorClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedValidatorClient::new(InterceptedService::new(channel, interceptor))) + } +} + // STRICT TYPE-SAFE BUILDER (NO DEFAULTS) // ================================================================================================ diff --git a/crates/proto/src/domain/mod.rs b/crates/proto/src/domain/mod.rs index f70c8f738a..b078655532 100644 --- a/crates/proto/src/domain/mod.rs +++ b/crates/proto/src/domain/mod.rs @@ -6,6 +6,7 @@ pub mod mempool; pub mod merkle; pub mod note; pub mod nullifier; +pub mod proof_request; pub mod transaction; // UTILITIES diff --git a/crates/proto/src/domain/proof_request.rs b/crates/proto/src/domain/proof_request.rs new file mode 100644 index 0000000000..16ef1ca409 --- /dev/null +++ b/crates/proto/src/domain/proof_request.rs @@ -0,0 +1,39 @@ +// PROOF REQUEST +// ================================================================================================ + +use miden_objects::batch::OrderedBatches; +use miden_objects::block::{BlockHeader, BlockInputs}; +use miden_objects::utils::{ + ByteReader, + ByteWriter, + Deserializable, + DeserializationError, + Serializable, +}; + +pub struct BlockProofRequest { + pub tx_batches: OrderedBatches, + pub block_header: BlockHeader, + pub block_inputs: BlockInputs, +} + +impl Serializable for BlockProofRequest { + fn write_into(&self, target: &mut W) { + let Self { tx_batches, block_header, block_inputs } = self; + tx_batches.write_into(target); + block_header.write_into(target); + block_inputs.write_into(target); + } +} + +impl Deserializable for BlockProofRequest { + fn read_from(source: &mut R) -> Result { + let block = Self { + tx_batches: OrderedBatches::read_from(source)?, + block_header: BlockHeader::read_from(source)?, + block_inputs: BlockInputs::read_from(source)?, + }; + + Ok(block) + } +} diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 1f11528964..41afe7ec73 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -7,6 +7,14 @@ pub struct Block { #[prost(bytes = "vec", tag = "1")] pub block: ::prost::alloc::vec::Vec, } +/// Represents a proposed block. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ProposedBlock { + /// Block data encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::block::ProposedBlock\]. + #[prost(bytes = "vec", tag = "1")] + pub proposed_block: ::prost::alloc::vec::Vec, +} /// Represents a block or nothing. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct MaybeBlock { @@ -79,3 +87,11 @@ pub struct FeeParameters { #[prost(fixed32, tag = "2")] pub verification_base_fee: u32, } +/// Represents a block body. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockBody { + /// Block body data encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::block::BlockBody\]. + #[prost(bytes = "vec", tag = "1")] + pub block_body: ::prost::alloc::vec::Vec, +} diff --git a/crates/proto/src/generated/remote_prover.rs b/crates/proto/src/generated/remote_prover.rs index 4d8ae80285..b504804c3e 100644 --- a/crates/proto/src/generated/remote_prover.rs +++ b/crates/proto/src/generated/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,7 +21,7 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs index a5a31a35fd..3c3ec8a571 100644 --- a/crates/proto/src/generated/validator.rs +++ b/crates/proto/src/generated/validator.rs @@ -9,6 +9,16 @@ pub struct ValidatorStatus { #[prost(string, tag = "2")] pub status: ::prost::alloc::string::String, } +/// Response message for ValidateBlock RPC. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ValidateBlockResponse { + /// The block header (required - always present). + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// The block body (required - always present). + #[prost(message, optional, tag = "2")] + pub body: ::core::option::Option, +} /// Generated client implementations. pub mod api_client { #![allow( @@ -147,6 +157,31 @@ pub mod api_client { .insert(GrpcMethod::new("validator.Api", "SubmitProvenTransaction")); self.inner.unary(req, path, codec).await } + /// Validates a proposed block and returns the block header and body. + pub async fn validate_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/validator.Api/ValidateBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("validator.Api", "ValidateBlock")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -172,6 +207,14 @@ pub mod api_server { &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; + /// Validates a proposed block and returns the block header and body. + async fn validate_block( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Validator API for the Validator component. #[derive(Debug)] @@ -337,6 +380,54 @@ pub mod api_server { }; Box::pin(fut) } + "/validator.Api/ValidateBlock" => { + #[allow(non_camel_case_types)] + struct ValidateBlockSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService< + super::super::blockchain::ProposedBlock, + > for ValidateBlockSvc { + type Response = super::ValidateBlockResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::ProposedBlock, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::validate_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ValidateBlockSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/crates/proto/src/lib.rs b/crates/proto/src/lib.rs index 5cd0afe4b4..0f5cbb8f51 100644 --- a/crates/proto/src/lib.rs +++ b/crates/proto/src/lib.rs @@ -10,4 +10,5 @@ pub mod generated; pub use domain::account::{AccountState, AccountWitnessRecord}; pub use domain::nullifier::NullifierWitnessRecord; +pub use domain::proof_request::BlockProofRequest; pub use domain::{convert, try_convert}; diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 32cc07be8b..a68fb10c53 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -15,7 +15,7 @@ crate-type = ["lib"] [features] batch-prover = ["dep:miden-objects", "dep:tokio"] -block-prover = ["dep:miden-objects", "dep:tokio"] +block-prover = ["dep:miden-node-proto", "dep:miden-objects", "dep:tokio"] default = ["std"] std = ["miden-objects/std", "miden-tx/std"] tx-prover = ["dep:miden-objects", "dep:miden-tx", "dep:tokio"] @@ -38,12 +38,13 @@ tonic-web = { optional = true, version = "0.14" } workspace = true [dependencies] -miden-objects = { optional = true, workspace = true } -miden-tx = { optional = true, workspace = true } -prost = { default-features = false, features = ["derive"], version = "0.14" } -thiserror = { workspace = true } -tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } -tonic-prost = { workspace = true } +miden-node-proto = { optional = true, workspace = true } +miden-objects = { optional = true, workspace = true } +miden-tx = { optional = true, workspace = true } +prost = { default-features = false, features = ["derive"], version = "0.14" } +thiserror = { workspace = true } +tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } +tonic-prost = { workspace = true } [build-dependencies] fs-err = { workspace = true } diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index c533bef4d6..d74e9f158d 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -3,10 +3,12 @@ use alloc::sync::Arc; use alloc::vec::Vec; use core::time::Duration; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{ProposedBlock, ProvenBlock}; +use miden_node_proto::BlockProofRequest; +use miden_objects::batch::{OrderedBatches, ProvenBatch}; +use miden_objects::block::{BlockHeader, BlockInputs, BlockProof, ProposedBlock, ProvenBlock}; use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_tx::utils::{ByteReader, ByteWriter}; use tokio::sync::Mutex; use super::generated::api_client::ApiClient; @@ -104,8 +106,10 @@ impl RemoteBlockProver { impl RemoteBlockProver { pub async fn prove( &self, - proposed_block: ProposedBlock, - ) -> Result { + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { use miden_objects::utils::Serializable; self.connect().await?; @@ -119,75 +123,38 @@ impl RemoteBlockProver { })? .clone(); - // Get the set of expected transaction headers. - let proposed_txs = proposed_block.batches().to_transactions(); - - let request = tonic::Request::new(proposed_block.into()); + let request = BlockProofRequest { tx_batches, block_header, block_inputs }; + let request = tonic::Request::new(request.into()); let response = client.prove(request).await.map_err(|err| { RemoteProverClientError::other_with_source("failed to prove block", err) })?; - // Deserialize the response bytes back into a ProvenBlock. - let proven_block = ProvenBlock::try_from(response.into_inner()).map_err(|err| { + // Deserialize the response bytes back into a BlockProof. + let block_proof = BlockProof::try_from(response.into_inner()).map_err(|err| { RemoteProverClientError::other_with_source( "failed to deserialize received response from remote block prover", err, ) })?; - Self::validate_tx_headers(&proven_block, &proposed_txs)?; - - Ok(proven_block) - } - - /// Validates that the proven block's transaction headers are consistent with the transactions - /// passed in the proposed block. - /// - /// This expects that transactions from the proposed block and proven block are in the same - /// order, as define by [`OrderedTransactionHeaders`]. - fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, - ) -> Result<(), RemoteProverClientError> { - if proposed_txs.as_slice().len() != proven_block.transactions().as_slice().len() { - return Err(RemoteProverClientError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in - proposed_txs.as_slice().iter().zip(proven_block.transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(RemoteProverClientError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) + Ok(block_proof) } } // CONVERSION // ================================================================================================ -impl TryFrom for ProvenBlock { +impl TryFrom for BlockProof { type Error = DeserializationError; fn try_from(value: proto::Proof) -> Result { - ProvenBlock::read_from_bytes(&value.payload) + BlockProof::read_from_bytes(&value.payload) } } -impl From for proto::ProofRequest { - fn from(proposed_block: ProposedBlock) -> Self { +impl From for proto::ProofRequest { + fn from(proposed_block: BlockProofRequest) -> Self { proto::ProofRequest { proof_type: proto::ProofType::Block.into(), payload: proposed_block.to_bytes(), diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs index 65dd724603..1074dd5b8e 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,7 +21,7 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs index fdb3fcccf5..7be124daad 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,7 +21,7 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 192aac20a8..4b4f1dd0b4 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -453,6 +453,6 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { ( store_runtime, data_directory, - genesis_state.into_block().unwrap().inner().commitment(), + genesis_state.into_block().unwrap().inner().header().commitment(), ) } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 8eca0c71d1..bad38b9e29 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -28,19 +28,20 @@ miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std"], workspace = true } -pretty_assertions = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = { features = ["derive"], version = "1" } -thiserror = { workspace = true } -tokio = { features = ["fs", "rt-multi-thread"], workspace = true } -tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } -tonic = { workspace = true } -tonic-reflection = { workspace = true } -tower-http = { features = ["util"], workspace = true } -tracing = { workspace = true } +# TODO remove `testing` from `miden-objects`, required for `BlockProof::new_dummy` +miden-objects = { features = ["std", "testing"], workspace = true } +pretty_assertions = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { features = ["derive"], version = "1" } +thiserror = { workspace = true } +tokio = { features = ["fs", "rt-multi-thread"], workspace = true } +tokio-stream = { features = ["net"], workspace = true } +toml = { version = "0.9" } +tonic = { workspace = true } +tonic-reflection = { workspace = true } +tower-http = { features = ["util"], workspace = true } +tracing = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 94c8ae3580..6339c0660f 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -250,8 +250,8 @@ impl Db { genesis.header(), &[], &[], - genesis.updated_accounts(), - genesis.transactions(), + genesis.body().updated_accounts(), + genesis.body().transactions(), ) }) .context("failed to insert genesis block")?; @@ -513,9 +513,9 @@ impl Db { conn, block.header(), ¬es, - block.created_nullifiers(), - block.updated_accounts(), - block.transactions(), + block.body().created_nullifiers(), + block.body().updated_accounts(), + block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index cad4d51c9d..2dfc3ccb7d 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -5,9 +5,11 @@ use miden_objects::account::{Account, AccountDelta}; use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_objects::block::{ BlockAccountUpdate, + BlockBody, BlockHeader, BlockNoteTree, BlockNumber, + BlockProof, FeeParameters, ProvenBlock, }; @@ -118,16 +120,19 @@ impl GenesisState { self.timestamp, ); - // SAFETY: Header and accounts should be valid by construction. - // No notes or nullifiers are created at genesis, which is consistent with the above empty - // block note tree root and empty nullifier tree root. - Ok(GenesisBlock(ProvenBlock::new_unchecked( - header, + let body = BlockBody::new_unchecked( accounts, empty_output_notes, empty_nullifiers, empty_transactions, - ))) + ); + + let block_proof = BlockProof::new_dummy(); + + // SAFETY: Header and accounts should be valid by construction. + // No notes or nullifiers are created at genesis, which is consistent with the above empty + // block note tree root and empty nullifier tree root. + Ok(GenesisBlock(ProvenBlock::new_unchecked(header, body, block_proof))) } } diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 57cad0a7af..b6d29f1362 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -71,10 +71,10 @@ impl block_producer_server::BlockProducer for StoreApi { info!( target: COMPONENT, block_num, - block_commitment = %block.commitment(), - account_count = block.updated_accounts().len(), - note_count = block.output_notes().count(), - nullifier_count = block.created_nullifiers().len(), + block_commitment = %block.header().commitment(), + account_count = block.body().updated_accounts().len(), + note_count = block.body().output_notes().count(), + nullifier_count = block.body().created_nullifiers().len(), ); self.state.apply_block(block).await?; diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 37af8afd11..3fd3fa24b5 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -25,13 +25,13 @@ use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_objects::account::{AccountHeader, AccountId, StorageSlot}; use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; +use miden_objects::block::nullifier_tree::NullifierTree; use miden_objects::block::{ AccountWitness, BlockHeader, BlockInputs, BlockNumber, Blockchain, - NullifierTree, NullifierWitness, ProvenBlock, }; @@ -201,7 +201,7 @@ impl State { let header = block.header(); - let tx_commitment = block.transactions().commitment(); + let tx_commitment = block.body().transactions().commitment(); if header.tx_commitment() != tx_commitment { return Err(InvalidBlockError::InvalidBlockTxCommitment { @@ -212,7 +212,7 @@ impl State { } let block_num = header.block_num(); - let block_commitment = block.commitment(); + let block_commitment = block.header().commitment(); // ensures the right block header is being processed let prev_block = self @@ -258,6 +258,7 @@ impl State { // nullifiers can be produced only once let duplicate_nullifiers: Vec<_> = block + .body() .created_nullifiers() .iter() .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some()) @@ -279,7 +280,11 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + block + .body() + .created_nullifiers() + .iter() + .map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -292,6 +297,7 @@ impl State { .account_tree .compute_mutations( block + .body() .updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), @@ -318,12 +324,13 @@ impl State { }; // build note tree - let note_tree = block.build_output_note_tree(); + let note_tree = block.body().compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } let notes = block + .body() .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index ebb6145b77..23e32bcbf3 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -18,9 +18,11 @@ workspace = true [dependencies] anyhow = { workspace = true } +miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-objects = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } tonic = { features = ["transport"], workspace = true } diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 2ce3212906..5284468a50 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -2,11 +2,14 @@ use std::net::SocketAddr; use std::time::Duration; use anyhow::Context; +use miden_lib::block::build_block; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_objects::block::ProposedBlock; +use miden_objects::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tower_http::catch_panic::CatchPanicLayer; @@ -95,6 +98,39 @@ impl api_server::Api for ValidatorServer { &self, _request: tonic::Request, ) -> Result, tonic::Status> { - todo!() + // TODO(sergerad): Implement transaction validation logic. + Ok(tonic::Response::new(())) + } + + /// Validates a proposed block and returns the block header and body. + async fn validate_block( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let proposed_block_bytes = request.into_inner().proposed_block; + + // Deserialize the proposed block. + let proposed_block = + ProposedBlock::read_from_bytes(&proposed_block_bytes).map_err(|err| { + tonic::Status::invalid_argument(format!( + "Failed to deserialize proposed block: {err}", + )) + })?; + + // Build header and body + let (header, body) = build_block(proposed_block) + .map_err(|err| tonic::Status::internal(format!("Failed to build block: {err}")))?; + + // Convert to protobuf format + let header_proto = proto::blockchain::BlockHeader::from(&header); + let body_proto = proto::blockchain::BlockBody { block_body: body.to_bytes() }; + + // Both header and body are required fields and must always be populated + let response = proto::validator::ValidateBlockResponse { + header: Some(header_proto), + body: Some(body_proto), + }; + + Ok(tonic::Response::new(response)) } } diff --git a/proto/proto/remote_prover.proto b/proto/proto/remote_prover.proto index e02a289e29..28a0ad485a 100644 --- a/proto/proto/remote_prover.proto +++ b/proto/proto/remote_prover.proto @@ -33,7 +33,7 @@ message ProofRequest { // type-specific: // - TRANSACTION: TransactionInputs encoded. // - BATCH: ProposedBatch encoded. - // - BLOCK: ProposedBlock encoded. + // - BLOCK: BlockProofRequest encoded. bytes payload = 2; } @@ -42,7 +42,7 @@ message Proof { // Serialized proof bytes. // - TRANSACTION: Returns an encoded ProvenTransaction. // - BATCH: Returns an encoded ProvenBatch. - // - BLOCK: Returns an encoded ProvenBlock. + // - BLOCK: Returns an encoded BlockProof. bytes payload = 1; } diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 28a35ae33e..b22dad84c6 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -14,6 +14,13 @@ message Block { bytes block = 1; } +// Represents a proposed block. +message ProposedBlock { + // Block data encoded using [winter_utils::Serializable] implementation for + // [miden_objects::block::ProposedBlock]. + bytes proposed_block = 1; +} + // Represents a block or nothing. message MaybeBlock { // The requested block data encoded using [winter_utils::Serializable] implementation for @@ -82,3 +89,13 @@ message FeeParameters { // The base fee (in base units) capturing the cost for the verification of a transaction. fixed32 verification_base_fee = 2; } + +// BLOCK BODY +// ================================================================================================ + +// Represents a block body. +message BlockBody { + // Block body data encoded using [winter_utils::Serializable] implementation for + // [miden_objects::block::BlockBody]. + bytes block_body = 1; +} diff --git a/proto/proto/validator.proto b/proto/proto/validator.proto index 6d4e801b45..8f12b2483f 100644 --- a/proto/proto/validator.proto +++ b/proto/proto/validator.proto @@ -16,6 +16,9 @@ service Api { // Submits a transaction to the validator. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (google.protobuf.Empty) {} + + // Validates a proposed block and returns the block header and body. + rpc ValidateBlock(blockchain.ProposedBlock) returns (ValidateBlockResponse) {} } // STATUS @@ -29,3 +32,15 @@ message ValidatorStatus { // The validator's status. string status = 2; } + +// VALIDATE BLOCK RESPONSE +// ================================================================================================ + +// Response message for ValidateBlock RPC. +message ValidateBlockResponse { + // The block header (required - always present). + blockchain.BlockHeader header = 1; + + // The block body (required - always present). + blockchain.BlockBody body = 2; +} From 19cfcf23740a872d903a31b6b03e4909956cbaaf Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 1 Dec 2025 13:07:32 +0100 Subject: [PATCH 020/125] feat: integrate `NullifierTree` (#1364) --- CHANGELOG.md | 4 +- Cargo.lock | 284 +++++++++++++------------- bin/network-monitor/src/deploy/mod.rs | 20 +- crates/ntx-builder/src/transaction.rs | 20 +- crates/rpc/src/server/validator.rs | 30 +-- crates/store/src/state.rs | 12 +- 6 files changed, 192 insertions(+), 178 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ec5a0fdcf..1b971856f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,11 +12,13 @@ - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). +- Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). ### Fixes - RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). -## v0.12.6 (TBD) + +## v0.12.6 - Added Faucet metadata to the `miden-network-monitor` binary ([#1373](https://github.com/0xMiden/miden-node/pull/1373)). diff --git a/Cargo.lock b/Cargo.lock index f5fda2851c..3ec62e5f1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -196,7 +196,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -207,7 +207,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -504,9 +504,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.46" +version = "1.2.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" +checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" dependencies = [ "find-msvc-tools", "jobserver", @@ -642,9 +642,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive 4.5.49", @@ -652,9 +652,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -684,7 +684,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -782,7 +782,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.51", + "clap 4.5.53", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -896,7 +896,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -939,7 +939,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -953,7 +953,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -964,7 +964,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -975,7 +975,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1067,7 +1067,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1077,7 +1077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1097,14 +1097,14 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "diesel" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e7624a3bb9fffd82fff016be9a7f163d20e5a89eb8d28f9daaa6b30fff37500" +checksum = "0c415189028b232660655e4893e8bc25ca7aee8e96888db66d9edb400535456a" dependencies = [ "bigdecimal", "diesel_derives", @@ -1119,22 +1119,22 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.3.4" +version = "2.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9daac6489a36e42570da165a10c424f3edcefdff70c5fd55e1847c23f3dd7562" +checksum = "8587cbca3c929fb198e7950d761d31ca72b80aa6e07c1b7bec5879d187720436" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "diesel_migrations" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee060f709c3e3b1cadd83fcd0f61711f7a8cf493348f758d3a1c1147d70b3c97" +checksum = "745fd255645f0f1135f9ec55c7b00e0882192af9683ab4731e4bba3da82b8f9c" dependencies = [ "diesel", "migrations_internals", @@ -1147,7 +1147,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1176,7 +1176,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1202,7 +1202,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1296,7 +1296,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1518,7 +1518,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1644,7 +1644,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -1681,15 +1681,16 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", "foldhash 0.2.0", "rayon", "serde", + "serde_core", ] [[package]] @@ -1745,23 +1746,22 @@ dependencies = [ [[package]] name = "hostname" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" dependencies = [ "cfg-if", "libc", - "windows-link 0.1.3", + "windows-link 0.2.1", ] [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -2050,12 +2050,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", ] [[package]] @@ -2157,7 +2157,7 @@ checksum = "980af8b43c3ad5d8d349ace167ec8170839f753a42d233ba19e08afe1850fa69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2172,9 +2172,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -2344,7 +2344,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2472,7 +2472,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2498,9 +2498,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.18.2" +version = "0.18.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb82051002f9c64878d3b105a7b924de1ee92019231923380cf4ecd7b824f9a" +checksum = "0048d2d987f215bc9633ced499a8c488d0e2474350c765f904b87cae3462acb7" dependencies = [ "blake3", "cc", @@ -2508,7 +2508,7 @@ dependencies = [ "ed25519-dalek", "flume", "glob", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "hkdf", "k256", "miden-crypto-derive", @@ -2530,12 +2530,12 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.18.2" +version = "0.18.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2222f37355ea975f40acd3c098a437574a31a4d8a2c193cf4e9fead2beede577" +checksum = "ca3b38aace84e157fb02aba8f8ae85bbf8c3afdcdbdf8190fbe7476f3be7ef44" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2568,7 +2568,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" dependencies = [ "fs-err", "miden-assembly", @@ -2617,7 +2617,7 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.110", + "syn 2.0.111", "terminal_size 0.3.0", "textwrap", "thiserror 2.0.17", @@ -2633,7 +2633,7 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2642,7 +2642,7 @@ version = "0.13.0" dependencies = [ "anyhow", "axum", - "clap 4.5.51", + "clap 4.5.53", "hex", "humantime", "miden-lib", @@ -2668,7 +2668,7 @@ name = "miden-node" version = "0.13.0" dependencies = [ "anyhow", - "clap 4.5.51", + "clap 4.5.53", "figment", "fs-err", "humantime", @@ -2724,7 +2724,7 @@ name = "miden-node-grpc-error-macro" version = "0.13.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2828,7 +2828,7 @@ dependencies = [ "diesel_migrations", "fs-err", "hex", - "indexmap 2.12.0", + "indexmap 2.12.1", "miden-lib", "miden-node-proto", "miden-node-proto-build", @@ -2855,7 +2855,7 @@ dependencies = [ name = "miden-node-stress-test" version = "0.13.0" dependencies = [ - "clap 4.5.51", + "clap 4.5.53", "fs-err", "futures", "miden-air", @@ -2878,7 +2878,7 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2932,7 +2932,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2996,7 +2996,7 @@ dependencies = [ "async-trait", "axum", "bytes", - "clap 4.5.51", + "clap 4.5.53", "http", "humantime", "miden-block-prover", @@ -3074,7 +3074,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3092,7 +3092,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" dependencies = [ "miden-lib", "miden-objects", @@ -3105,7 +3105,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e8bbc9578e78eb790de28b476c8bf659d04994ed" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" dependencies = [ "miden-objects", "miden-tx", @@ -3195,7 +3195,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3327,7 +3327,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3377,7 +3377,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3496,7 +3496,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3638,7 +3638,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3654,7 +3654,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.0", + "indexmap 2.12.1", ] [[package]] @@ -3683,7 +3683,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3875,7 +3875,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4060,7 +4060,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4113,7 +4113,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "version_check", "yansi", ] @@ -4195,21 +4195,21 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.110", + "syn 2.0.111", "tempfile", ] [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "d2d93e596a829ebe00afa41c3a056e6308d6b8a4c7d869edf184e2c91b1ba564" dependencies = [ "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4588,7 +4588,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.111", "unicode-ident", ] @@ -4681,9 +4681,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" dependencies = [ "zeroize", ] @@ -4870,7 +4870,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4972,7 +4972,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4982,7 +4982,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" dependencies = [ "base64", - "indexmap 2.12.0", + "indexmap 2.12.1", "rust_decimal", ] @@ -5024,9 +5024,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.6" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] @@ -5102,9 +5102,9 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c6d746902bca4ddf16592357eacf0473631ea26b36072f0dd0b31fa5ccd1f4" +checksum = "60bdd87fcb4c9764b024805fb2df5f1d659bea6e629fdbdcdcfc4042b9a640d0" dependencies = [ "js-sys", "once_cell", @@ -5173,7 +5173,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5216,9 +5216,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.110" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -5242,7 +5242,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5291,7 +5291,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2111ef44dae28680ae9752bb89409e7310ca33a8c621ebe7b106cf5c928b3ac0" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5366,7 +5366,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5377,7 +5377,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5477,7 +5477,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5556,7 +5556,7 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "serde_core", "serde_spanned 1.0.3", "toml_datetime 0.7.3", @@ -5589,7 +5589,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -5603,7 +5603,7 @@ version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "toml_datetime 0.7.3", "toml_parser", "winnow", @@ -5670,7 +5670,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5708,7 +5708,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.110", + "syn 2.0.111", "tempfile", "tonic-build", ] @@ -5778,7 +5778,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "pin-project-lite", "slab", "sync_wrapper", @@ -5791,9 +5791,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" dependencies = [ "bitflags 2.10.0", "bytes", @@ -5823,9 +5823,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" dependencies = [ "log", "pin-project-lite", @@ -5835,20 +5835,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" dependencies = [ "once_cell", "valuable", @@ -5909,9 +5909,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -6189,9 +6189,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -6202,9 +6202,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.55" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -6215,9 +6215,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6225,22 +6225,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -6260,9 +6260,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -6376,7 +6376,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6387,7 +6387,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6701,9 +6701,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] @@ -6760,7 +6760,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6877,28 +6877,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6918,7 +6918,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] @@ -6958,7 +6958,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index ff653d8f1f..81ae1f98d4 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -261,12 +261,12 @@ impl DataStore for MonitorDataStore { unimplemented!("Not needed") } - async fn get_vault_asset_witness( + async fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> Result { + vault_keys: BTreeSet, + ) -> Result, DataStoreError> { let account = self.get_account(account_id)?; if account.vault().root() != vault_root { @@ -276,12 +276,14 @@ impl DataStore for MonitorDataStore { }); } - AssetWitness::new(account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + AssetWitness::new(account.vault().open(vault_key).into()).map_err(|err| { + DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(Box::new(err)), + } + }) + })) } async fn get_note_script( diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/transaction.rs index 6969e0e88c..971e58672a 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/transaction.rs @@ -323,12 +323,12 @@ impl DataStore for NtxDataStore { async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } } - fn get_vault_asset_witness( + fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> impl FutureMaybeSend> { + vault_keys: BTreeSet, + ) -> impl FutureMaybeSend, DataStoreError>> { async move { if self.account.id() != account_id { return Err(DataStoreError::AccountNotFound(account_id)); @@ -341,12 +341,14 @@ impl DataStore for NtxDataStore { }); } - AssetWitness::new(self.account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + AssetWitness::new(self.account.vault().open(vault_key).into()).map_err(|err| { + DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(Box::new(err)), + } + }) + })) } } diff --git a/crates/rpc/src/server/validator.rs b/crates/rpc/src/server/validator.rs index b0e8e082ac..5a0b077e8d 100644 --- a/crates/rpc/src/server/validator.rs +++ b/crates/rpc/src/server/validator.rs @@ -82,12 +82,12 @@ impl DataStore for TransactionInputsDataStore { async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } } - fn get_vault_asset_witness( + fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> impl FutureMaybeSend> { + vault_keys: BTreeSet, + ) -> impl FutureMaybeSend, DataStoreError>> { async move { if self.tx_inputs.account().id() != account_id { return Err(DataStoreError::AccountNotFound(account_id)); @@ -100,18 +100,20 @@ impl DataStore for TransactionInputsDataStore { }); } - match self.tx_inputs.account().vault().open(vault_key) { - Ok(vault_proof) => { - AssetWitness::new(vault_proof.into()).map_err(|err| DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + match self.tx_inputs.account().vault().open(vault_key) { + Ok(vault_proof) => { + AssetWitness::new(vault_proof.into()).map_err(|err| DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(err.into()), + }) + }, + Err(err) => Err(DataStoreError::Other { + error_msg: "failed to open vault".into(), source: Some(err.into()), - }) - }, - Err(err) => Err(DataStoreError::Other { - error_msg: "failed to open vault".into(), - source: Some(err.into()), - }), - } + }), + } + })) } } diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 3fd3fa24b5..e91a114772 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -95,7 +95,7 @@ struct InnerState where S: SmtStorage, { - nullifier_tree: NullifierTree, + nullifier_tree: NullifierTree>, blockchain: Blockchain, account_tree: AccountTreeWithHistory, } @@ -1141,10 +1141,16 @@ impl State { // ================================================================================================ #[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_nullifier_tree(db: &mut Db) -> Result { +async fn load_nullifier_tree( + db: &mut Db, +) -> Result>, StateInitializationError> { let nullifiers = db.select_all_nullifiers().await?; - NullifierTree::with_entries(nullifiers.into_iter().map(|info| (info.nullifier, info.block_num))) + // Convert nullifier data to entries for NullifierTree + // The nullifier value format is: block_num + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + + NullifierTree::with_storage_from_entries(MemoryStorage::default(), entries) .map_err(StateInitializationError::FailedToCreateNullifierTree) } From 8eb27f9c45d6f3f0b1a1170b1d9fb414fd0cc997 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 1 Dec 2025 19:17:34 +0100 Subject: [PATCH 021/125] fix: breaking `protox` transitive dependency (#1404) --- CHANGELOG.md | 1 + Cargo.toml | 32 +++++++++++++++++--------------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b971856f0..d78911ccf6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ ### Fixes - RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). +- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). ## v0.12.6 diff --git a/Cargo.toml b/Cargo.toml index 57a06e7bcc..88705f3424 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,21 +60,23 @@ miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/mid miden-air = { features = ["std", "testing"], version = "0.19" } # External dependencies -anyhow = { version = "1.0" } -assert_matches = { version = "1.5" } -async-trait = { version = "0.1" } -clap = { features = ["derive"], version = "4.5" } -fs-err = { version = "3" } -futures = { version = "0.3" } -hex = { version = "0.4" } -http = { version = "1.3" } -humantime = { version = "2.2" } -indexmap = { version = "2.12" } -itertools = { version = "0.14" } -lru = { default-features = false, version = "0.16" } -pretty_assertions = { version = "1.4" } -prost = { version = "0.14" } -protox = { version = "0.9" } +anyhow = { version = "1.0" } +assert_matches = { version = "1.5" } +async-trait = { version = "0.1" } +clap = { features = ["derive"], version = "4.5" } +fs-err = { version = "3" } +futures = { version = "0.3" } +hex = { version = "0.4" } +http = { version = "1.3" } +humantime = { version = "2.2" } +indexmap = { version = "2.12" } +itertools = { version = "0.14" } +lru = { default-features = false, version = "0.16" } +pretty_assertions = { version = "1.4" } +# breaking change `DecodeError::new` is not exposed anymore +# but is assumed public by some internal dependency +prost = { version = "=0.14.1" } +protox = { version = "=0.9.0" } rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } From f05b81dff37942dafd87b95c4513c29a8987b999 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 1 Dec 2025 17:43:47 -0300 Subject: [PATCH 022/125] feat: add more data to block-producer status endpoint (#1388) * feat: add more data to block-producer status endpoint * feat: display mempool data in network monitor (#1392) * review: rename spawn function, increase default duration, use default for Mempool Stats * review: add section separators to mempool --- CHANGELOG.md | 2 + bin/network-monitor/assets/index.html | 18 ++++- bin/network-monitor/src/counter.rs | 23 ++---- bin/network-monitor/src/deploy/mod.rs | 72 +++++++++++++++---- bin/network-monitor/src/status.rs | 23 ++++++ crates/block-producer/src/lib.rs | 4 ++ crates/block-producer/src/mempool/mod.rs | 39 +++++++++++ crates/block-producer/src/server/mod.rs | 73 +++++++++++++++++++- crates/proto/src/generated/block_producer.rs | 16 +++++ crates/rpc/src/server/api.rs | 2 + proto/proto/block_producer.proto | 15 ++++ 11 files changed, 254 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d78911ccf6..158f40461e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). +- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). +- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). ### Fixes diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index 9f642e5f98..060c294292 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -196,7 +196,23 @@ ` : ''} ${details.RpcStatus.block_producer_status ? `
- Block Producer: ${details.RpcStatus.block_producer_status.version} - ${details.RpcStatus.block_producer_status.status} + Block Producer: + ${details.RpcStatus.block_producer_status.version} - ${details.RpcStatus.block_producer_status.status} +
+ Mempool: +
+ Unbatched TXs: + ${details.RpcStatus.block_producer_status.mempool.unbatched_transactions} +
+
+ Proposed Batches: + ${details.RpcStatus.block_producer_status.mempool.proposed_batches} +
+
+ Proven Batches: + ${details.RpcStatus.block_producer_status.mempool.proven_batches} +
+
` : ''} ` : ''} diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 196310cd9b..a67952cccc 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -11,7 +11,7 @@ use anyhow::{Context, Result}; use miden_lib::AuthScheme; use miden_lib::account::interface::AccountInterface; use miden_lib::utils::ScriptBuilder; -use miden_node_proto::clients::{Builder, RpcClient}; +use miden_node_proto::clients::RpcClient; use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; use miden_objects::account::auth::AuthSecretKey; @@ -43,7 +43,7 @@ use tracing::{error, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; -use crate::deploy::{MonitorDataStore, get_counter_library}; +use crate::deploy::{MonitorDataStore, create_genesis_aware_rpc_client, get_counter_library}; use crate::status::{ CounterTrackingDetails, IncrementDetails, @@ -52,19 +52,6 @@ use crate::status::{ Status, }; -async fn create_rpc_client(config: &MonitorConfig) -> Result { - Builder::new(config.rpc_url.clone()) - .with_tls() - .context("Failed to configure TLS for RPC client") - .expect("TLS is enabled") - .with_timeout(config.request_timeout) - .without_metadata_version() - .without_metadata_genesis() - .without_otel_context_injection() - .connect() - .await -} - /// Get the genesis block header. async fn get_genesis_block_header(rpc_client: &mut RpcClient) -> Result { let block_header_request = BlockHeaderByNumberRequest { @@ -215,7 +202,8 @@ pub async fn run_increment_task( expected_counter_value: Arc, ) -> Result<()> { // Create RPC client - let mut rpc_client = create_rpc_client(&config).await?; + let mut rpc_client = + create_genesis_aware_rpc_client(&config.rpc_url, config.request_timeout).await?; let ( mut details, @@ -347,7 +335,8 @@ pub async fn run_counter_tracking_task( expected_counter_value: Arc, ) -> Result<()> { // Create RPC client - let mut rpc_client = create_rpc_client(&config).await?; + let mut rpc_client = + create_genesis_aware_rpc_client(&config.rpc_url, config.request_timeout).await?; // Load counter account to get the account ID let counter_account = match load_counter_account(&config.counter_filepath) { diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index 81ae1f98d4..58278d92e3 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -40,6 +40,62 @@ use crate::deploy::wallet::{create_wallet_account, save_wallet_account}; pub mod counter; pub mod wallet; +/// Create an RPC client configured with the correct genesis metadata in the +/// `Accept` header so that write RPCs such as `SubmitProvenTransaction` are +/// accepted by the node. +pub async fn create_genesis_aware_rpc_client( + rpc_url: &Url, + timeout: Duration, +) -> Result { + // First, create a temporary client without genesis metadata to discover the + // genesis block header and its commitment. + let mut rpc: RpcClient = Builder::new(rpc_url.clone()) + .with_tls() + .context("Failed to configure TLS for RPC client")? + .with_timeout(timeout) + .without_metadata_version() + .without_metadata_genesis() + .without_otel_context_injection() + .connect() + .await + .context("Failed to create RPC client for genesis discovery")?; + + let block_header_request = BlockHeaderByNumberRequest { + block_num: Some(BlockNumber::GENESIS.as_u32()), + include_mmr_proof: None, + }; + + let response = rpc + .get_block_header_by_number(block_header_request) + .await + .context("Failed to get genesis block header from RPC")? + .into_inner(); + + let genesis_block_header = response + .block_header + .ok_or_else(|| anyhow::anyhow!("No block header in response"))?; + + let genesis_header: BlockHeader = + genesis_block_header.try_into().context("Failed to convert block header")?; + let genesis_commitment = genesis_header.commitment(); + let genesis = genesis_commitment.to_hex(); + + // Rebuild the client, this time including the required genesis metadata so that + // write RPCs like SubmitProvenTransaction are accepted by the node. + let rpc_client = Builder::new(rpc_url.clone()) + .with_tls() + .context("Failed to configure TLS for RPC client")? + .with_timeout(timeout) + .without_metadata_version() + .with_metadata_genesis(genesis) + .without_otel_context_injection() + .connect() + .await + .context("Failed to connect to RPC server with genesis metadata")?; + + Ok(rpc_client) +} + /// Ensure accounts exist, creating them if they don't. /// /// This function checks if the wallet and counter account files exist. @@ -89,17 +145,8 @@ pub async fn ensure_accounts_exist( /// then saves it to the specified file. #[instrument(target = COMPONENT, name = "deploy-counter-account", skip_all, ret(level = "debug"))] pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> Result<()> { - // Deploy counter account to the network - let mut rpc_client: RpcClient = Builder::new(rpc_url.clone()) - .with_tls() - .context("Failed to configure TLS for RPC client")? - .with_timeout(Duration::from_secs(5)) - .without_metadata_version() - .without_metadata_genesis() - .without_otel_context_injection() - .connect() - .await - .context("Failed to connect to RPC server")?; + // Deploy counter account to the network using a genesis-aware RPC client. + let mut rpc_client = create_genesis_aware_rpc_client(rpc_url, Duration::from_secs(10)).await?; let block_header_request = BlockHeaderByNumberRequest { block_num: Some(BlockNumber::GENESIS.as_u32()), @@ -116,7 +163,8 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> .block_header .ok_or_else(|| anyhow::anyhow!("No block header in response"))?; - let genesis_header = root_block_header.try_into().context("Failed to convert block header")?; + let genesis_header: BlockHeader = + root_block_header.try_into().context("Failed to convert block header")?; let genesis_chain_mmr = PartialBlockchain::new(PartialMmr::from_peaks(MmrPeaks::default()), Vec::new()) diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 8a10ea97d4..f00ada304c 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -138,6 +138,19 @@ pub struct StoreStatusDetails { pub struct BlockProducerStatusDetails { pub version: String, pub status: Status, + /// Mempool statistics for this block producer. + pub mempool: MempoolStatusDetails, +} + +/// Details about the block producer's mempool. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MempoolStatusDetails { + /// Number of transactions currently in the mempool waiting to be batched. + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + pub proven_batches: u64, } /// Details of a remote prover service. @@ -190,9 +203,19 @@ impl From for StoreStatusDetails { impl From for BlockProducerStatusDetails { fn from(value: BlockProducerStatus) -> Self { + // We assume all supported nodes expose mempool statistics. + let mempool_stats = value + .mempool_stats + .expect("block producer status must include mempool statistics"); + Self { version: value.version, status: value.status.into(), + mempool: MempoolStatusDetails { + unbatched_transactions: mempool_stats.unbatched_transactions, + proposed_batches: mempool_stats.proposed_batches, + proven_batches: mempool_stats.proven_batches, + }, } } } diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 3e30b25fab..ed505b4337 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -1,5 +1,6 @@ #![recursion_limit = "256"] use std::num::NonZeroUsize; +use std::time::Duration; #[cfg(test)] pub mod test_utils; @@ -46,6 +47,9 @@ const SERVER_MEMPOOL_STATE_RETENTION: NonZeroUsize = NonZeroUsize::new(5).unwrap /// This rejects transactions which would likely expire before making it into a block. const SERVER_MEMPOOL_EXPIRATION_SLACK: u32 = 2; +/// The interval at which to update the cached mempool statistics. +const CACHED_MEMPOOL_STATS_UPDATE_INTERVAL: Duration = Duration::from_secs(5); + const _: () = assert!( DEFAULT_MAX_BATCHES_PER_BLOCK <= miden_objects::MAX_BATCHES_PER_BLOCK, "Server constraint cannot exceed the protocol's constraint" diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 277bf56fb4..7332d9c08a 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -69,6 +69,9 @@ mod subscription; #[cfg(test)] mod tests; +// MEMPOOL CONFIGURATION +// ================================================================================================ + #[derive(Clone)] pub struct SharedMempool(Arc>); @@ -112,6 +115,9 @@ impl Default for MempoolConfig { } } +// SHARED MEMPOOL +// ================================================================================================ + impl SharedMempool { #[instrument(target = COMPONENT, name = "mempool.lock", skip_all)] pub async fn lock(&self) -> MutexGuard<'_, Mempool> { @@ -119,6 +125,9 @@ impl SharedMempool { } } +// MEMPOOL +// ================================================================================================ + #[derive(Clone, Debug)] pub struct Mempool { /// Contains the aggregated state of all transactions, batches and blocks currently inflight in @@ -142,6 +151,9 @@ impl PartialEq for Mempool { } impl Mempool { + // CONSTRUCTORS + // -------------------------------------------------------------------------------------------- + /// Creates a new [`SharedMempool`] with the provided configuration. pub fn shared(chain_tip: BlockNumber, config: MempoolConfig) -> SharedMempool { SharedMempool(Arc::new(Mutex::new(Self::new(chain_tip, config)))) @@ -157,6 +169,9 @@ impl Mempool { } } + // TRANSACTION & BATCH LIFECYCLE + // -------------------------------------------------------------------------------------------- + /// Adds a transaction to the mempool. /// /// Sends a [`MempoolEvent::TransactionAdded`] event to subscribers. @@ -537,6 +552,9 @@ impl Mempool { self.inject_telemetry(); } + // EVENTS & SUBSCRIPTIONS + // -------------------------------------------------------------------------------------------- + /// Creates a subscription to [`MempoolEvent`] which will be emitted in the order they occur. /// /// Only emits events which occurred after the current committed block. @@ -553,6 +571,27 @@ impl Mempool { self.subscription.subscribe(chain_tip) } + // STATS & INSPECTION + // -------------------------------------------------------------------------------------------- + + /// Returns the number of transactions currently waiting to be batched. + pub fn unbatched_transactions_count(&self) -> usize { + self.nodes.txs.len() + } + + /// Returns the number of batches currently being proven. + pub fn proposed_batches_count(&self) -> usize { + self.nodes.proposed_batches.len() + } + + /// Returns the number of proven batches waiting for block inclusion. + pub fn proven_batches_count(&self) -> usize { + self.nodes.proven_batches.len() + } + + // INTERNAL HELPERS + // -------------------------------------------------------------------------------------------- + /// Adds mempool stats to the current tracing span. /// /// Note that these are only visible in the OpenTelemetry context, as conventional tracing diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 4bedc31e2c..0d028f5a4e 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -17,7 +17,7 @@ use miden_objects::block::BlockNumber; use miden_objects::transaction::ProvenTransaction; use miden_objects::utils::serde::Deserializable; use tokio::net::TcpListener; -use tokio::sync::{Barrier, Mutex}; +use tokio::sync::{Barrier, Mutex, RwLock}; use tokio_stream::wrappers::{ReceiverStream, TcpListenerStream}; use tonic::Status; use tower_http::trace::TraceLayer; @@ -37,7 +37,7 @@ use crate::errors::{ use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; -use crate::{COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; /// The block producer server. /// @@ -213,6 +213,27 @@ impl BlockProducer { } } +/// Mempool statistics that are updated periodically to avoid locking the mempool. +#[derive(Clone, Copy, Default)] +struct MempoolStats { + /// Number of transactions currently in the mempool waiting to be batched. + unbatched_transactions: u64, + /// Number of batches currently being proven. + proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + proven_batches: u64, +} + +impl From for proto::block_producer::MempoolStats { + fn from(stats: MempoolStats) -> Self { + proto::block_producer::MempoolStats { + unbatched_transactions: stats.unbatched_transactions, + proposed_batches: stats.proposed_batches, + proven_batches: stats.proven_batches, + } + } +} + /// Serves the block producer's RPC [api](api_server::Api). struct BlockProducerRpcServer { /// The mutex effectively rate limits incoming transactions into the mempool by forcing them @@ -224,6 +245,10 @@ struct BlockProducerRpcServer { mempool: Mutex, store: StoreClient, + + /// Cached mempool statistics that are updated periodically to avoid locking the mempool + /// for each status request. + cached_mempool_stats: Arc>, } #[tonic::async_trait] @@ -261,9 +286,12 @@ impl api_server::Api for BlockProducerRpcServer { &self, _request: tonic::Request<()>, ) -> Result, Status> { + let mempool_stats = *self.cached_mempool_stats.read().await; + Ok(tonic::Response::new(proto::block_producer::BlockProducerStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), + mempool_stats: Some(mempool_stats.into()), })) } @@ -312,10 +340,49 @@ impl tokio_stream::Stream for MempoolEventSubscription { impl BlockProducerRpcServer { pub fn new(mempool: SharedMempool, store: StoreClient) -> Self { - Self { mempool: Mutex::new(mempool), store } + Self { + mempool: Mutex::new(mempool), + store, + cached_mempool_stats: Arc::new(RwLock::new(MempoolStats::default())), + } + } + + /// Starts a background task that periodically updates the cached mempool statistics. + /// + /// This prevents the need to lock the mempool for each status request. + async fn spawn_mempool_stats_updater(&self) { + let cached_mempool_stats = Arc::clone(&self.cached_mempool_stats); + let mempool = self.mempool.lock().await.clone(); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(CACHED_MEMPOOL_STATS_UPDATE_INTERVAL); + + loop { + interval.tick().await; + + let (unbatched_transactions, proposed_batches, proven_batches) = { + let mempool = mempool.lock().await; + ( + mempool.unbatched_transactions_count() as u64, + mempool.proposed_batches_count() as u64, + mempool.proven_batches_count() as u64, + ) + }; + + let mut cache = cached_mempool_stats.write().await; + *cache = MempoolStats { + unbatched_transactions, + proposed_batches, + proven_batches, + }; + } + }); } async fn serve(self, listener: TcpListener, timeout: Duration) -> anyhow::Result<()> { + // Start background task to periodically update cached mempool stats + self.spawn_mempool_stats_updater().await; + let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(block_producer_api_descriptor()) .build_v1() diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs index 5771d5510a..10b82369b8 100644 --- a/crates/proto/src/generated/block_producer.rs +++ b/crates/proto/src/generated/block_producer.rs @@ -8,6 +8,22 @@ pub struct BlockProducerStatus { /// The block producer's status. #[prost(string, tag = "2")] pub status: ::prost::alloc::string::String, + /// Statistics about the mempool. + #[prost(message, optional, tag = "3")] + pub mempool_stats: ::core::option::Option, +} +/// Statistics about the mempool. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MempoolStats { + /// Number of transactions currently in the mempool waiting to be batched. + #[prost(uint64, tag = "1")] + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + #[prost(uint64, tag = "2")] + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + #[prost(uint64, tag = "3")] + pub proven_batches: u64, } /// Represents the result of submitting proven transaction. #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index a788f40bf9..062e7793ed 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -4,6 +4,7 @@ use std::time::Duration; use anyhow::Context; use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient}; use miden_node_proto::errors::ConversionError; +use miden_node_proto::generated::block_producer::MempoolStats; use miden_node_proto::generated::rpc::api_server::{self, Api}; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; @@ -567,6 +568,7 @@ impl api_server::Api for RpcService { proto::block_producer::BlockProducerStatus { status: "unreachable".to_string(), version: "-".to_string(), + mempool_stats: Some(MempoolStats::default()), }, )), genesis_commitment: self.genesis_commitment.map(Into::into), diff --git a/proto/proto/block_producer.proto b/proto/proto/block_producer.proto index dae8293feb..3f353946e5 100644 --- a/proto/proto/block_producer.proto +++ b/proto/proto/block_producer.proto @@ -53,6 +53,21 @@ message BlockProducerStatus { // The block producer's status. string status = 2; + + // Statistics about the mempool. + MempoolStats mempool_stats = 3; +} + +// Statistics about the mempool. +message MempoolStats { + // Number of transactions currently in the mempool waiting to be batched. + uint64 unbatched_transactions = 1; + + // Number of batches currently being proven. + uint64 proposed_batches = 2; + + // Number of proven batches waiting for block inclusion. + uint64 proven_batches = 3; } // SUBMIT PROVEN TRANSACTION From 9460b4423f6274441975ced75453b121013cab1b Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:19:18 +1300 Subject: [PATCH 023/125] fix: make NtxDataStore note script cache global (#1398) --- Cargo.lock | 2 +- crates/ntx-builder/Cargo.toml | 1 - crates/ntx-builder/src/builder/mod.rs | 8 +--- crates/ntx-builder/src/transaction.rs | 67 ++++++++++++++++----------- crates/utils/Cargo.toml | 6 +-- crates/utils/src/lib.rs | 1 + crates/utils/src/lru_cache.rs | 32 +++++++++++++ 7 files changed, 80 insertions(+), 37 deletions(-) create mode 100644 crates/utils/src/lru_cache.rs diff --git a/Cargo.lock b/Cargo.lock index 3ec62e5f1b..b422d98633 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2733,7 +2733,6 @@ version = "0.13.0" dependencies = [ "anyhow", "futures", - "lru 0.16.2", "miden-lib", "miden-node-proto", "miden-node-test-macro", @@ -2892,6 +2891,7 @@ dependencies = [ "http", "http-body-util", "itertools 0.14.0", + "lru 0.16.2", "miden-objects", "opentelemetry", "opentelemetry-otlp", diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 1a2f0d9e1e..fc27e4ff4b 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -16,7 +16,6 @@ workspace = true [dependencies] anyhow = { workspace = true } futures = { workspace = true } -lru = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-objects = { default-features = true, workspace = true } diff --git a/crates/ntx-builder/src/builder/mod.rs b/crates/ntx-builder/src/builder/mod.rs index 2932240b74..706e804c9a 100644 --- a/crates/ntx-builder/src/builder/mod.rs +++ b/crates/ntx-builder/src/builder/mod.rs @@ -14,7 +14,7 @@ use url::Url; use crate::MAX_IN_PROGRESS_TXS; use crate::block_producer::BlockProducerClient; use crate::store::StoreClient; -use crate::transaction::NtxError; +use crate::transaction::{NtxContext, NtxError}; // NETWORK TRANSACTION BUILDER // ================================================================================================ @@ -91,11 +91,7 @@ impl NetworkTransactionBuilder { let mut inflight = JoinSet::new(); let mut inflight_idx = HashMap::new(); - let context = crate::transaction::NtxContext { - block_producer: block_producer.clone(), - prover, - store, - }; + let context = NtxContext::new(block_producer.clone(), prover, store); loop { tokio::select! { diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/transaction.rs index 971e58672a..a61d3244e5 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/transaction.rs @@ -1,7 +1,7 @@ use std::collections::BTreeSet; -use std::sync::Arc; +use std::num::NonZeroUsize; -use lru::LruCache; +use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_objects::account::{Account, AccountId, PartialAccount, StorageMapWitness, StorageSlot}; use miden_objects::asset::{AssetVaultKey, AssetWitness}; @@ -35,7 +35,6 @@ use miden_tx::{ TransactionMastStore, TransactionProverError, }; -use tokio::sync::Mutex; use tokio::task::JoinError; use tracing::{Instrument, instrument}; @@ -70,19 +69,42 @@ type NtxResult = Result; /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - pub block_producer: BlockProducerClient, + block_producer: BlockProducerClient, /// The prover to delegate proofs to. /// /// Defaults to local proving if unset. This should be avoided in production as this is /// computationally intensive. - pub prover: Option, + prover: Option, /// The store client for retrieving note scripts. - pub store: StoreClient, + store: StoreClient, + + /// LRU cache for storing retrieved note scripts to avoid repeated store calls. + script_cache: LruCache, } impl NtxContext { + /// Default cache size for note scripts. + /// + /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage + /// depends on the complexity of the scripts being cached. + const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); + + /// Creates a new [`NtxContext`] instance. + pub fn new( + block_producer: BlockProducerClient, + prover: Option, + store: StoreClient, + ) -> Self { + Self { + block_producer, + prover, + store, + script_cache: LruCache::new(Self::DEFAULT_SCRIPT_CACHE_SIZE), + } + } + /// Executes a transaction end-to-end: filtering, executing, proving, and submitted to the block /// producer. /// @@ -125,8 +147,13 @@ impl NtxContext { async move { async move { - let data_store = - NtxDataStore::new(account, chain_tip_header, chain_mmr, self.store.clone()); + let data_store = NtxDataStore::new( + account, + chain_tip_header, + chain_mmr, + self.store.clone(), + self.script_cache.clone(), + ); let notes = notes.into_iter().map(Note::from).collect::>(); let (successful, failed) = self.filter_notes(&data_store, notes).await?; @@ -256,22 +283,17 @@ struct NtxDataStore { /// Store client for retrieving note scripts. store: StoreClient, /// LRU cache for storing retrieved note scripts to avoid repeated store calls. - script_cache: Arc>>, + script_cache: LruCache, } impl NtxDataStore { - /// Default cache size for note scripts. - /// - /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage - /// depends on the complexity of the scripts being cached. - const DEFAULT_SCRIPT_CACHE_SIZE: usize = 1000; - /// Creates a new `NtxDataStore` with default cache size. fn new( account: Account, reference_header: BlockHeader, chain_mmr: PartialBlockchain, store: StoreClient, + script_cache: LruCache, ) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(account.code()); @@ -282,10 +304,7 @@ impl NtxDataStore { chain_mmr, mast_store, store, - script_cache: Arc::new(Mutex::new(LruCache::new( - std::num::NonZeroUsize::new(Self::DEFAULT_SCRIPT_CACHE_SIZE) - .expect("default script cache size is non-zero"), - ))), + script_cache, } } } @@ -392,14 +411,11 @@ impl DataStore for NtxDataStore { script_root: Word, ) -> impl FutureMaybeSend, DataStoreError>> { let store = self.store.clone(); - let cache = self.script_cache.clone(); + let mut cache = self.script_cache.clone(); async move { // Attempt to retrieve the script from the cache. - if let Some(cached_script) = { - let mut cache_guard = cache.lock().await; - cache_guard.get(&script_root).cloned() - } { + if let Some(cached_script) = cache.get(&script_root).await { return Ok(Some(cached_script)); } @@ -412,8 +428,7 @@ impl DataStore for NtxDataStore { })?; // Handle response. if let Some(script) = maybe_script { - let mut cache_guard = cache.lock().await; - cache_guard.put(script_root, script.clone()); + cache.put(script_root, script.clone()).await; Ok(Some(script)) } else { Ok(None) diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index db641928e7..322ef98c88 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -18,7 +18,7 @@ workspace = true # Enables depedencies intended for build script generation of version metadata. vergen = ["dep:vergen", "dep:vergen-gitcl"] # Enables utility functions for testing traces created by some other crate's stack. -testing = ["dep:tokio", "miden-objects/testing"] +testing = ["miden-objects/testing"] [dependencies] anyhow = { workspace = true } @@ -28,6 +28,7 @@ fs-err = { workspace = true } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } +lru = { workspace = true } miden-objects = { workspace = true } opentelemetry = { version = "0.31" } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } @@ -35,6 +36,7 @@ opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } +tokio = { workspace = true } tonic = { workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } @@ -47,8 +49,6 @@ url = { workspace = true } # This must match the version expected by `vergen-gitcl`. vergen = { "version" = "9.0", optional = true } vergen-gitcl = { features = ["cargo", "rustc"], optional = true, version = "1.0" } -# Optional dependencies enabled by `testing` feature. -tokio = { optional = true, workspace = true } [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 4ff02f9397..c894e31fb6 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -7,6 +7,7 @@ pub mod formatting; pub mod grpc; pub mod limiter; pub mod logging; +pub mod lru_cache; pub mod panic; pub mod tracing; pub mod version; diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs new file mode 100644 index 0000000000..de325bf104 --- /dev/null +++ b/crates/utils/src/lru_cache.rs @@ -0,0 +1,32 @@ +use std::hash::Hash; +use std::num::NonZeroUsize; +use std::sync::Arc; + +use lru::LruCache as InnerCache; +use tokio::sync::Mutex; + +/// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across +/// await points. +#[derive(Clone)] +pub struct LruCache(Arc>>); + +impl LruCache +where + K: Hash + Eq, + V: Clone, +{ + /// Creates a new cache with the given capacity. + pub fn new(capacity: NonZeroUsize) -> Self { + Self(Arc::new(Mutex::new(InnerCache::new(capacity)))) + } + + /// Retrieves a value from the cache. + pub async fn get(&self, key: &K) -> Option { + self.0.lock().await.get(key).cloned() + } + + /// Puts a value into the cache. + pub async fn put(&mut self, key: K, value: V) { + self.0.lock().await.put(key, value); + } +} From 56273a2e9a4678d30a8fda83bfb7cf2d2447ba60 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 2 Dec 2025 10:23:43 -0300 Subject: [PATCH 024/125] fix: prover clients WASM lint & block prover (#1407) * fix: prover clients WASM lint & block prover * review: add changelog entry --- CHANGELOG.md | 1 + Cargo.lock | 1 - Makefile | 3 ++- crates/remote-prover-client/Cargo.toml | 15 ++++++------- .../src/remote_prover/batch_prover.rs | 4 ++-- .../src/remote_prover/block_prover.rs | 22 ++++++++++++------- .../src/remote_prover/tx_prover.rs | 4 ++-- 7 files changed, 28 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 158f40461e..fd6f20c208 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). - Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). +- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). ## v0.12.6 diff --git a/Cargo.lock b/Cargo.lock index b422d98633..f9de21f373 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3040,7 +3040,6 @@ version = "0.13.0" dependencies = [ "fs-err", "getrandom 0.3.4", - "miden-node-proto", "miden-node-proto-build", "miden-objects", "miden-tx", diff --git a/Makefile b/Makefile index 7a968862cf..5522c2d63b 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ BUILD_PROTO=BUILD_PROTO=1 clippy: ## Runs Clippy with configs cargo clippy --locked --all-targets --all-features --workspace -- -D warnings cargo clippy --locked --all-targets --all-features -p miden-remote-prover -- -D warnings + cargo clippy --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover -- -D warnings .PHONY: fix @@ -90,7 +91,7 @@ check: ## Check all targets and features for errors without code generation .PHONY: build build: ## Builds all crates and re-builds protobuf bindings for proto crates ${BUILD_PROTO} cargo build --locked --workspace - ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features # no-std compatible build + ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build # --- installing ---------------------------------------------------------------------------------- diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index a68fb10c53..32cc07be8b 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -15,7 +15,7 @@ crate-type = ["lib"] [features] batch-prover = ["dep:miden-objects", "dep:tokio"] -block-prover = ["dep:miden-node-proto", "dep:miden-objects", "dep:tokio"] +block-prover = ["dep:miden-objects", "dep:tokio"] default = ["std"] std = ["miden-objects/std", "miden-tx/std"] tx-prover = ["dep:miden-objects", "dep:miden-tx", "dep:tokio"] @@ -38,13 +38,12 @@ tonic-web = { optional = true, version = "0.14" } workspace = true [dependencies] -miden-node-proto = { optional = true, workspace = true } -miden-objects = { optional = true, workspace = true } -miden-tx = { optional = true, workspace = true } -prost = { default-features = false, features = ["derive"], version = "0.14" } -thiserror = { workspace = true } -tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } -tonic-prost = { workspace = true } +miden-objects = { optional = true, workspace = true } +miden-tx = { optional = true, workspace = true } +prost = { default-features = false, features = ["derive"], version = "0.14" } +thiserror = { workspace = true } +tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } +tonic-prost = { workspace = true } [build-dependencies] fs-err = { workspace = true } diff --git a/crates/remote-prover-client/src/remote_prover/batch_prover.rs b/crates/remote-prover-client/src/remote_prover/batch_prover.rs index ed9c7a382e..e035833d9c 100644 --- a/crates/remote-prover-client/src/remote_prover/batch_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/batch_prover.rs @@ -71,8 +71,8 @@ impl RemoteBatchProver { #[cfg(target_arch = "wasm32")] let new_client = { - let mut fetch_options = - tonic_web_wasm_client::FetchOptions::new().timeout(self.timeout); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); let web_client = tonic_web_wasm_client::Client::new_with_options( self.endpoint.clone(), fetch_options, diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index d74e9f158d..569b32af51 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -3,12 +3,10 @@ use alloc::sync::Arc; use alloc::vec::Vec; use core::time::Duration; -use miden_node_proto::BlockProofRequest; use miden_objects::batch::{OrderedBatches, ProvenBatch}; use miden_objects::block::{BlockHeader, BlockInputs, BlockProof, ProposedBlock, ProvenBlock}; use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; -use miden_tx::utils::{ByteReader, ByteWriter}; use tokio::sync::Mutex; use super::generated::api_client::ApiClient; @@ -74,8 +72,8 @@ impl RemoteBlockProver { #[cfg(target_arch = "wasm32")] let new_client = { - let mut fetch_options = - tonic_web_wasm_client::FetchOptions::new().timeout(self.timeout); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); let web_client = tonic_web_wasm_client::Client::new_with_options( self.endpoint.clone(), fetch_options, @@ -123,8 +121,16 @@ impl RemoteBlockProver { })? .clone(); - let request = BlockProofRequest { tx_batches, block_header, block_inputs }; - let request = tonic::Request::new(request.into()); + let block_proof_request = + ProposedBlock::new_at(block_inputs, tx_batches.into_vec(), block_header.timestamp()) + .map_err(|err| { + RemoteProverClientError::other_with_source( + "failed to create proposed block", + err, + ) + })?; + + let request = tonic::Request::new(block_proof_request.into()); let response = client.prove(request).await.map_err(|err| { RemoteProverClientError::other_with_source("failed to prove block", err) @@ -153,8 +159,8 @@ impl TryFrom for BlockProof { } } -impl From for proto::ProofRequest { - fn from(proposed_block: BlockProofRequest) -> Self { +impl From for proto::ProofRequest { + fn from(proposed_block: ProposedBlock) -> Self { proto::ProofRequest { proof_type: proto::ProofType::Block.into(), payload: proposed_block.to_bytes(), diff --git a/crates/remote-prover-client/src/remote_prover/tx_prover.rs b/crates/remote-prover-client/src/remote_prover/tx_prover.rs index b94c9a9ea8..575fc2ed2f 100644 --- a/crates/remote-prover-client/src/remote_prover/tx_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/tx_prover.rs @@ -72,8 +72,8 @@ impl RemoteTransactionProver { #[cfg(target_arch = "wasm32")] let new_client = { - let mut fetch_options = - tonic_web_wasm_client::FetchOptions::new().timeout(self.timeout); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); let web_client = tonic_web_wasm_client::Client::new_with_options( self.endpoint.clone(), fetch_options, From b7b8506ac52fbb7a286935b38a8d0a8e793f7a72 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 2 Dec 2025 17:39:20 +0200 Subject: [PATCH 025/125] ci: intall cargo-msrv using binstall (#1411) This will hopefully reduce CI cache usage. --- .github/workflows/msrv.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 354e2afa92..2d15e8cf16 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -26,8 +26,10 @@ jobs: - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Install cargo-msrv - run: cargo install cargo-msrv + - uses: taiki-e/install-action@v2 + with: + tool: cargo-binstall + - run: cargo binstall --no-confirm cargo-msrv - name: Check MSRV for each workspace member run: | ./scripts/check-msrv.sh From e92bef40c368a8b2985e170ef3ea32c432abac5f Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 3 Dec 2025 10:55:55 +0200 Subject: [PATCH 026/125] ci: only check workspace docs (#1412) This is in an effort to reduce the cache usage of this action which is 2.6GB before this PR. --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 12e76f99cc..d7012500c9 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -89,7 +89,7 @@ jobs: with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - name: Build docs - run: make doc + run: cargo doc --no-deps --workspace --all-features --locked unused_deps: name: check for unused dependencies From 0d3776cd3e3e77c72148669a057e7483fb24c12c Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:06:03 +0200 Subject: [PATCH 027/125] ci: disable debug info (#1421) This is another build cache size tweak. --- .github/workflows/lint.yml | 4 ++++ .github/workflows/msrv.yml | 4 ++++ .github/workflows/network-monitor.yml | 4 ++++ .github/workflows/stress-test-check.yml | 4 ++++ .github/workflows/test.yml | 5 +++++ 5 files changed, 21 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d7012500c9..8cebcbe973 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,6 +16,10 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: typos: runs-on: Linux-ARM64-Runner diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 2d15e8cf16..00b4530310 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -14,6 +14,10 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: # Check MSRV (aka `rust-version`) in `Cargo.toml` is valid for workspace members msrv: diff --git a/.github/workflows/network-monitor.yml b/.github/workflows/network-monitor.yml index 507980803d..1a69216171 100644 --- a/.github/workflows/network-monitor.yml +++ b/.github/workflows/network-monitor.yml @@ -16,6 +16,10 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: check: name: check diff --git a/.github/workflows/stress-test-check.yml b/.github/workflows/stress-test-check.yml index 488a2c0681..47182f8f9f 100644 --- a/.github/workflows/stress-test-check.yml +++ b/.github/workflows/stress-test-check.yml @@ -16,6 +16,10 @@ concurrency: group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" cancel-in-progress: true +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: stress-test-check: name: stress-test-check diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5ff13ee38b..cfee5fc3cd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,6 +16,11 @@ concurrency: group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" cancel-in-progress: true +env: + # Reduce cache usage by removing debug information. + # This works for tests as well because TEST inherits from DEV. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: test: name: test From 41860a0a4156879c1cfd985d39099027e7d65686 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Thu, 4 Dec 2025 09:04:25 +1300 Subject: [PATCH 028/125] chore: Misc followups for block validation (#1402) --- bin/remote-prover/src/api/prover.rs | 11 +-- .../block-producer/src/block_builder/mod.rs | 46 +++++++++-- crates/block-producer/src/errors.rs | 2 +- crates/block-producer/src/lib.rs | 2 +- crates/block-producer/src/validator/mod.rs | 77 +++++++++++++++---- crates/proto/src/generated/validator.rs | 41 ++++------ crates/validator/src/server/mod.rs | 6 +- proto/proto/validator.proto | 10 +-- 8 files changed, 132 insertions(+), 63 deletions(-) diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index b03c06aeae..30971ac561 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -172,16 +172,17 @@ impl ProverRpcApi { return Err(Status::unimplemented("Block prover is not enabled")); }; let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; - let block_proof = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(tx_batches, block_header.clone(), block_inputs) - .map_err(internal_error)?; // Record the commitment of the block in the current tracing span. let block_id = block_header.commitment(); tracing::Span::current().record("block_id", tracing::field::display(&block_id)); + let block_proof = prover + .try_lock() + .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? + .prove(tx_batches, block_header, block_inputs) + .map_err(internal_error)?; + Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) } } diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index ca742904c0..50d3d29462 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use futures::FutureExt; use futures::never::Never; use miden_block_prover::LocalBlockProver; +use miden_lib::block::build_block; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_objects::MIN_PROOF_SECURITY_LEVEL; use miden_objects::batch::{OrderedBatches, ProvenBatch}; @@ -27,7 +28,7 @@ use url::Url; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; use crate::store::StoreClient; -use crate::validator::BlockProducerValidatorClient; +use crate::validator::{BlockProducerValidatorClient, BodyDiff, HeaderDiff, ValidatorError}; use crate::{COMPONENT, TelemetryInjectorExt}; // BLOCK BUILDER @@ -230,16 +231,43 @@ impl BlockBuilder { proposed_block: ProposedBlock, block_inputs: BlockInputs, ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, BlockBody), BuildBlockError> { - let response = self + // Concurrently build the block and validate it via the validator. + let build_result = tokio::task::spawn_blocking({ + let proposed_block = proposed_block.clone(); + move || build_block(proposed_block) + }); + let (header, body) = self .validator - .validate_block(proposed_block.clone()) + .sign_block(proposed_block.clone()) + .await + .map_err(|err| BuildBlockError::ValidateBlockFailed(err.into()))?; + let (expected_header, expected_body) = build_result .await - .map_err(BuildBlockError::ValidateBlockFailed)?; + .map_err(|err| BuildBlockError::other(format!("task join error: {err}")))? + .map_err(BuildBlockError::ProposeBlockFailed)?; - // TODO: Check that the returned header and body match the proposed block. + // Check that the header and body returned from the validator is consistent with the + // proposed block. + // TODO(sergerad): Update Eq implementation once signatures are part of the header. + if header != expected_header { + let diff = HeaderDiff { + validator_header: header, + expected_header, + } + .into(); + return Err(BuildBlockError::ValidateBlockFailed( + ValidatorError::HeaderMismatch(diff).into(), + )); + } + if body != expected_body { + let diff = BodyDiff { validator_body: body, expected_body }.into(); + return Err(BuildBlockError::ValidateBlockFailed( + ValidatorError::BodyMismatch(diff).into(), + )); + } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, response.header, response.body)) + Ok((ordered_batches, block_inputs, header, body)) } #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] @@ -268,6 +296,7 @@ impl BlockBuilder { body: BlockBody, block_proof: BlockProof, ) -> Result { + // SAFETY: The header and body are assumed valid and consistent with the proof. let proven_block = ProvenBlock::new_unchecked(header, body, block_proof); if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { return Err(BuildBlockError::SecurityLevelTooLow( @@ -275,6 +304,9 @@ impl BlockBuilder { MIN_PROOF_SECURITY_LEVEL, )); } + // TODO(sergerad): Consider removing this validation. Once block proving is implemented, + // this would be replaced with verifying the proof returned from the prover against + // the block header. validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; Ok(proven_block) @@ -464,7 +496,7 @@ impl BlockProver { /// passed in the proposed block. /// /// This expects that transactions from the proposed block and proven block are in the same -/// order, as define by [`OrderedTransactionHeaders`]. +/// order, as defined by [`OrderedTransactionHeaders`]. fn validate_tx_headers( proven_block: &ProvenBlock, proposed_txs: &OrderedTransactionHeaders, diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 0a0aa176cd..e60bdc9995 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -209,7 +209,7 @@ pub enum BuildBlockError { #[error("failed to propose block")] ProposeBlockFailed(#[source] ProposedBlockError), #[error("failed to validate block")] - ValidateBlockFailed(#[source] ValidatorError), + ValidateBlockFailed(#[source] Box), #[error("failed to prove block")] ProveBlockFailed(#[source] BlockProverError), /// We sometimes randomly inject errors into the batch building process to test our failure diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index ed505b4337..81fc5b83eb 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -10,7 +10,7 @@ mod block_builder; mod domain; mod mempool; pub mod store; -pub mod validator; +mod validator; #[cfg(feature = "testing")] pub mod errors; diff --git a/crates/block-producer/src/validator/mod.rs b/crates/block-producer/src/validator/mod.rs index 49b72185c8..37162870b5 100644 --- a/crates/block-producer/src/validator/mod.rs +++ b/crates/block-producer/src/validator/mod.rs @@ -1,4 +1,7 @@ +use std::fmt::{Display, Formatter}; + use miden_node_proto::clients::{Builder, ValidatorClient}; +use miden_node_proto::errors::{ConversionError, MissingFieldHelper}; use miden_node_proto::generated as proto; use miden_objects::block::{BlockBody, BlockHeader, ProposedBlock}; use miden_objects::utils::{Deserializable, Serializable}; @@ -15,25 +18,61 @@ use crate::COMPONENT; pub enum ValidatorError { #[error("gRPC transport error: {0}")] Transport(#[from] tonic::Status), - #[error("Failed to convert header: {0}")] + #[error("response content error: {0}")] + ResponseContent(#[from] ConversionError), + #[error("failed to convert header: {0}")] HeaderConversion(String), - #[error("Failed to deserialize body: {0}")] + #[error("failed to deserialize body: {0}")] BodyDeserialization(String), + #[error("validator header does not match the request: {0}")] + HeaderMismatch(Box), + #[error("validator body does not match the request: {0}")] + BodyMismatch(Box), } -// VALIDATE BLOCK RESPONSE +// VALIDATION DIFF TYPES // ================================================================================================ +/// Represents a difference between validator and expected block headers +#[derive(Debug, Clone)] +pub struct HeaderDiff { + pub validator_header: BlockHeader, + pub expected_header: BlockHeader, +} + +impl Display for HeaderDiff { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Expected Header:")?; + writeln!(f, "{:?}", self.expected_header)?; + writeln!(f, "============================")?; + writeln!(f, "Validator Header:")?; + writeln!(f, "{:?}", self.validator_header)?; + Ok(()) + } +} + +/// Represents a difference between validator and expected block bodies #[derive(Debug, Clone)] -pub struct ValidateBlockResponse { - pub header: BlockHeader, - pub body: BlockBody, +pub struct BodyDiff { + pub validator_body: BlockBody, + pub expected_body: BlockBody, +} + +impl Display for BodyDiff { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Expected Body:")?; + writeln!(f, "{:?}", self.expected_body)?; + writeln!(f, "============================")?; + writeln!(f, "Validator Body:")?; + writeln!(f, "{:?}", self.validator_body)?; + Ok(()) + } } // VALIDATOR CLIENT // ================================================================================================ -/// Interface to the validator's block-producer gRPC API. +/// Interface to the validator's gRPC API. /// /// Essentially just a thin wrapper around the generated gRPC client which improves type safety. #[derive(Clone, Debug)] @@ -58,28 +97,34 @@ impl BlockProducerValidatorClient { } #[instrument(target = COMPONENT, name = "validator.client.validate_block", skip_all, err)] - pub async fn validate_block( + pub async fn sign_block( &self, proposed_block: ProposedBlock, - ) -> Result { + ) -> Result<(BlockHeader, BlockBody), ValidatorError> { // Send request and receive response. let message = proto::blockchain::ProposedBlock { proposed_block: proposed_block.to_bytes(), }; let request = tonic::Request::new(message); - let response = self.client.clone().validate_block(request).await?; - let response = response.into_inner(); + let response = self.client.clone().sign_block(request).await?; + let signed_block = response.into_inner(); - // Extract header from response (should always be present). - let header_proto = response.header.expect("validator always returns a header"); + // Extract header from response. + let header_proto = signed_block + .header + .ok_or(miden_node_proto::generated::blockchain::BlockHeader::missing_field("header")) + .map_err(ValidatorError::ResponseContent)?; let header = BlockHeader::try_from(header_proto) .map_err(|err| ValidatorError::HeaderConversion(err.to_string()))?; - // Extract body from response (should always be present). - let body_proto = response.body.expect("validator always returns a body"); + // Extract body from response. + let body_proto = signed_block + .body + .ok_or(miden_node_proto::generated::blockchain::BlockBody::missing_field("body")) + .map_err(ValidatorError::ResponseContent)?; let body = BlockBody::read_from_bytes(&body_proto.block_body) .map_err(|err| ValidatorError::BodyDeserialization(err.to_string()))?; - Ok(ValidateBlockResponse { header, body }) + Ok((header, body)) } } diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs index 3c3ec8a571..53f7aeb590 100644 --- a/crates/proto/src/generated/validator.rs +++ b/crates/proto/src/generated/validator.rs @@ -9,13 +9,13 @@ pub struct ValidatorStatus { #[prost(string, tag = "2")] pub status: ::prost::alloc::string::String, } -/// Response message for ValidateBlock RPC. +/// Response message for SignBlock RPC. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct ValidateBlockResponse { - /// The block header (required - always present). +pub struct SignedBlock { + /// The block header. #[prost(message, optional, tag = "1")] pub header: ::core::option::Option, - /// The block body (required - always present). + /// The block body. #[prost(message, optional, tag = "2")] pub body: ::core::option::Option, } @@ -158,13 +158,10 @@ pub mod api_client { self.inner.unary(req, path, codec).await } /// Validates a proposed block and returns the block header and body. - pub async fn validate_block( + pub async fn sign_block( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await @@ -174,12 +171,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/validator.Api/ValidateBlock", - ); + let path = http::uri::PathAndQuery::from_static("/validator.Api/SignBlock"); let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("validator.Api", "ValidateBlock")); + req.extensions_mut().insert(GrpcMethod::new("validator.Api", "SignBlock")); self.inner.unary(req, path, codec).await } } @@ -208,13 +202,10 @@ pub mod api_server { request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Validates a proposed block and returns the block header and body. - async fn validate_block( + async fn sign_block( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; } /// Validator API for the Validator component. #[derive(Debug)] @@ -380,15 +371,15 @@ pub mod api_server { }; Box::pin(fut) } - "/validator.Api/ValidateBlock" => { + "/validator.Api/SignBlock" => { #[allow(non_camel_case_types)] - struct ValidateBlockSvc(pub Arc); + struct SignBlockSvc(pub Arc); impl< T: Api, > tonic::server::UnaryService< super::super::blockchain::ProposedBlock, - > for ValidateBlockSvc { - type Response = super::ValidateBlockResponse; + > for SignBlockSvc { + type Response = super::SignedBlock; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -401,7 +392,7 @@ pub mod api_server { ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::validate_block(&inner, request).await + ::sign_block(&inner, request).await }; Box::pin(fut) } @@ -412,7 +403,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = ValidateBlockSvc(inner); + let method = SignBlockSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 5284468a50..638b1b1828 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -103,10 +103,10 @@ impl api_server::Api for ValidatorServer { } /// Validates a proposed block and returns the block header and body. - async fn validate_block( + async fn sign_block( &self, request: tonic::Request, - ) -> Result, tonic::Status> { + ) -> Result, tonic::Status> { let proposed_block_bytes = request.into_inner().proposed_block; // Deserialize the proposed block. @@ -126,7 +126,7 @@ impl api_server::Api for ValidatorServer { let body_proto = proto::blockchain::BlockBody { block_body: body.to_bytes() }; // Both header and body are required fields and must always be populated - let response = proto::validator::ValidateBlockResponse { + let response = proto::validator::SignedBlock { header: Some(header_proto), body: Some(body_proto), }; diff --git a/proto/proto/validator.proto b/proto/proto/validator.proto index 8f12b2483f..b6645ef9f8 100644 --- a/proto/proto/validator.proto +++ b/proto/proto/validator.proto @@ -18,7 +18,7 @@ service Api { rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (google.protobuf.Empty) {} // Validates a proposed block and returns the block header and body. - rpc ValidateBlock(blockchain.ProposedBlock) returns (ValidateBlockResponse) {} + rpc SignBlock(blockchain.ProposedBlock) returns (SignedBlock) {} } // STATUS @@ -36,11 +36,11 @@ message ValidatorStatus { // VALIDATE BLOCK RESPONSE // ================================================================================================ -// Response message for ValidateBlock RPC. -message ValidateBlockResponse { - // The block header (required - always present). +// Response message for SignBlock RPC. +message SignedBlock { + // The block header. blockchain.BlockHeader header = 1; - // The block body (required - always present). + // The block body. blockchain.BlockBody body = 2; } From 28973e3724c5e062baf47ad57a0c252349136b06 Mon Sep 17 00:00:00 2001 From: juan518munoz <62400508+juan518munoz@users.noreply.github.com> Date: Wed, 3 Dec 2025 17:59:03 -0300 Subject: [PATCH 029/125] fix: make get account proof retrieve latest known state (#1422) --- CHANGELOG.md | 1 + crates/proto/src/generated/rpc_store.rs | 5 +++-- crates/store/src/db/models/queries/accounts.rs | 13 +++++++++---- proto/proto/store/rpc.proto | 5 +++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd6f20c208..e04239a904 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ - RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). - Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). - Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). +- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). ## v0.12.6 diff --git a/crates/proto/src/generated/rpc_store.rs b/crates/proto/src/generated/rpc_store.rs index 9feea358c3..c962475044 100644 --- a/crates/proto/src/generated/rpc_store.rs +++ b/crates/proto/src/generated/rpc_store.rs @@ -18,8 +18,9 @@ pub struct AccountProofRequest { /// ID of the account for which we want to get data #[prost(message, optional, tag = "1")] pub account_id: ::core::option::Option, - /// Block at which we'd like to get this data. If present, must be close to the chain tip. - /// If not present, data from the latest block will be returned. + /// Optional block height at which to return the proof. + /// + /// Defaults to current chain tip if unspecified. #[prost(message, optional, tag = "2")] pub block_num: ::core::option::Option, /// Request for additional account details; valid only for public accounts. diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 52be3ee84f..8189403bc4 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -93,8 +93,7 @@ pub(crate) fn select_account( Ok(info) } -/// Select account details at a specific block number from the DB using the given -/// [`SqliteConnection`]. +/// Select account details as they are at the given block height. /// /// # Returns /// @@ -118,7 +117,11 @@ pub(crate) fn select_account( /// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// account_id = ?1 -/// AND block_num = ?2 +/// AND block_num <= ?2 +/// ORDER BY +/// block_num DESC +/// LIMIT +/// 1 /// ``` pub(crate) fn select_historical_account_at( conn: &mut SqliteConnection, @@ -134,8 +137,10 @@ pub(crate) fn select_historical_account_at( .filter( schema::accounts::account_id .eq(account_id.to_bytes()) - .and(schema::accounts::block_num.eq(block_num.to_raw_sql())), + .and(schema::accounts::block_num.le(block_num.to_raw_sql())), ) + .order_by(schema::accounts::block_num.desc()) + .limit(1) .get_result::<(AccountRaw, Option>)>(conn) .optional()? .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; diff --git a/proto/proto/store/rpc.proto b/proto/proto/store/rpc.proto index 6f78444c78..6ac9352255 100644 --- a/proto/proto/store/rpc.proto +++ b/proto/proto/store/rpc.proto @@ -147,8 +147,9 @@ message AccountProofRequest { // ID of the account for which we want to get data account.AccountId account_id = 1; - // Block at which we'd like to get this data. If present, must be close to the chain tip. - // If not present, data from the latest block will be returned. + // Optional block height at which to return the proof. + // + // Defaults to current chain tip if unspecified. optional blockchain.BlockNumber block_num = 2; // Request for additional account details; valid only for public accounts. From 6a8c4ae6b2938bc71e9ff1e9f9ff6084668ee842 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare <43513081+bobbinth@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:38:40 -0800 Subject: [PATCH 030/125] chore: bring up miden-base dependencies to the latest next (#1425) --- Cargo.lock | 60 +++++++++++-------- crates/block-producer/src/test_utils/mod.rs | 2 +- .../src/test_utils/proven_tx.rs | 2 +- crates/proto/src/domain/note.rs | 23 ++++--- crates/proto/src/domain/nullifier.rs | 2 +- crates/proto/src/domain/transaction.rs | 2 +- crates/store/src/db/tests.rs | 17 +++--- crates/store/src/server/rpc_api.rs | 4 +- crates/store/src/state.rs | 2 +- 9 files changed, 64 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f9de21f373..760172b424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -338,9 +338,9 @@ checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bech32" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" +checksum = "32637268377fc7b10a8c6d51de3e7fba1ce5dd371a96e342b34e6078db558e7f" [[package]] name = "beef" @@ -1082,21 +1082,22 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" dependencies = [ "proc-macro2", "quote", + "rustc_version 0.4.1", "syn 2.0.111", ] @@ -1876,9 +1877,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64", "bytes", @@ -2241,9 +2242,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libm" @@ -2318,9 +2319,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "logos" @@ -2472,7 +2473,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2568,7 +2569,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" dependencies = [ "fs-err", "miden-assembly", @@ -2932,7 +2933,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2942,6 +2943,7 @@ dependencies = [ "miden-crypto", "miden-mast-package", "miden-processor", + "miden-protocol-macros", "miden-stdlib", "miden-utils-sync", "miden-verifier", @@ -2974,6 +2976,16 @@ dependencies = [ "winter-prover", ] +[[package]] +name = "miden-protocol-macros" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "miden-prover" version = "0.19.1" @@ -3073,7 +3085,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3091,7 +3103,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" dependencies = [ "miden-lib", "miden-objects", @@ -3104,7 +3116,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#0cd80afdbd9b9d26e27341fad895063ac53eacdc" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" dependencies = [ "miden-objects", "miden-tx", @@ -4200,9 +4212,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d93e596a829ebe00afa41c3a056e6308d6b8a4c7d869edf184e2c91b1ba564" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -4213,9 +4225,9 @@ dependencies = [ [[package]] name = "prost-reflect" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a3ac73ec9a9118131a4594c9d336631a07852220a1d0ae03ee36b04503a063" +checksum = "b89455ef41ed200cafc47c76c552ee7792370ac420497e551f16123a9135f76e" dependencies = [ "logos", "miette", @@ -6067,9 +6079,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", diff --git a/crates/block-producer/src/test_utils/mod.rs b/crates/block-producer/src/test_utils/mod.rs index 0695ceadf3..7ab15e62ed 100644 --- a/crates/block-producer/src/test_utils/mod.rs +++ b/crates/block-producer/src/test_utils/mod.rs @@ -34,7 +34,7 @@ impl Random { } pub fn draw_tx_id(&mut self) -> TransactionId { - self.0.draw_word().into() + TransactionId::new_unchecked(self.0.draw_word()) } pub fn draw_account_id(&mut self) -> AccountId { diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index b6a946894c..b4bcc0745c 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -108,7 +108,7 @@ impl MockProvenTxBuilder { .map(|index| { let nullifier = Word::from([ONE, ONE, ONE, Felt::new(index)]); - Nullifier::from(nullifier) + Nullifier::new_unchecked(nullifier) }) .collect(); diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index a61884f69b..7629ce3070 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -135,19 +135,18 @@ impl TryFrom<&proto::note::NoteInclusionInBlockProof> for (NoteId, NoteInclusion .clone(), )?; + let note_id = Word::try_from( + proof + .note_id + .as_ref() + .ok_or(proto::note::NoteInclusionInBlockProof::missing_field(stringify!(note_id)))? + .id + .as_ref() + .ok_or(proto::note::NoteId::missing_field(stringify!(id)))?, + )?; + Ok(( - Word::try_from( - proof - .note_id - .as_ref() - .ok_or(proto::note::NoteInclusionInBlockProof::missing_field(stringify!( - note_id - )))? - .id - .as_ref() - .ok_or(proto::note::NoteId::missing_field(stringify!(id)))?, - )? - .into(), + NoteId::new_unchecked(note_id), NoteInclusionProof::new( proof.block_num.into(), proof.note_index_in_block.try_into()?, diff --git a/crates/proto/src/domain/nullifier.rs b/crates/proto/src/domain/nullifier.rs index f511731f91..d0462c53c0 100644 --- a/crates/proto/src/domain/nullifier.rs +++ b/crates/proto/src/domain/nullifier.rs @@ -28,7 +28,7 @@ impl TryFrom for Nullifier { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(digest.into()) + Ok(Nullifier::new_unchecked(digest)) } } diff --git a/crates/proto/src/domain/transaction.rs b/crates/proto/src/domain/transaction.rs index 53ccf6b0c8..1393513f1f 100644 --- a/crates/proto/src/domain/transaction.rs +++ b/crates/proto/src/domain/transaction.rs @@ -39,7 +39,7 @@ impl TryFrom for TransactionId { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(digest.into()) + Ok(TransactionId::new_unchecked(digest)) } } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index aa8a5617c5..7cf8a722f8 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -317,7 +317,8 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_none, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(0).into()]).unwrap()[0]; + let note_id = NoteId::new_unchecked(num_to_word(0)); + let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::none()); @@ -342,7 +343,8 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_always, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(1).into()]).unwrap()[0]; + let note_id = NoteId::new_unchecked(num_to_word(1)); + let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::always()); let note_after_block = NoteRecord { @@ -365,7 +367,8 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_after_block, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(2).into()]).unwrap()[0]; + let note_id = NoteId::new_unchecked(num_to_word(2)); + let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!( note.metadata.execution_hint(), NoteExecutionHint::after_block(12.into()).unwrap() @@ -1152,7 +1155,7 @@ fn notes() { let note = NoteRecord { block_num: block_num_1, note_index, - note_id: new_note.id().into(), + note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), metadata: NoteMetadata::new( sender, @@ -1199,7 +1202,7 @@ fn notes() { let note2 = NoteRecord { block_num: block_num_2, note_index: note.note_index, - note_id: new_note.id().into(), + note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), metadata: note.metadata, details: None, @@ -1229,7 +1232,7 @@ fn notes() { // test query notes by id let notes = vec![note.clone(), note2]; - let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::from(note.note_id))); + let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::new_unchecked(note.note_id))); let res = queries::select_notes_by_id(conn, ¬e_ids).unwrap(); assert_eq!(res, notes); @@ -1402,7 +1405,7 @@ fn num_to_word(n: u64) -> Word { } fn num_to_nullifier(n: u64) -> Nullifier { - Nullifier::from(num_to_word(n)) + Nullifier::new_unchecked(num_to_word(n)) } fn mock_block_account_update(account_id: AccountId, num: u64) -> BlockAccountUpdate { diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 54df4a70d1..5f877f5306 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -274,7 +274,7 @@ impl rpc_server::Rpc for StoreApi { let note_ids: Vec = convert_digests_to_words::(note_ids)?; - let note_ids: Vec = note_ids.into_iter().map(From::from).collect(); + let note_ids: Vec = note_ids.into_iter().map(NoteId::new_unchecked).collect(); let notes = self .state @@ -587,7 +587,7 @@ impl rpc_server::Rpc for StoreApi { let note_records: Vec<_> = tx_header .output_notes .iter() - .filter_map(|note_id| note_map.get(¬e_id.into()).cloned()) + .filter_map(|note_id| note_map.get(¬e_id.as_word()).cloned()) .collect(); // Convert to proto using the helper method diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index e91a114772..fd7d622706 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -350,7 +350,7 @@ impl State { let note_record = NoteRecord { block_num, note_index, - note_id: note.id().into(), + note_id: note.id().as_word(), note_commitment: note.commitment(), metadata: *note.metadata(), details, From c182c6fca490a42ce1efe5c3969b69ca9bf10f18 Mon Sep 17 00:00:00 2001 From: juan518munoz <62400508+juan518munoz@users.noreply.github.com> Date: Thu, 4 Dec 2025 03:54:20 -0300 Subject: [PATCH 031/125] ci: revert cargo-msrv binstall from #1411 (#1423) For some reason this causes failures when used with gh cache. --- .github/workflows/msrv.yml | 6 ++---- scripts/check-msrv.sh | 6 +++--- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 00b4530310..046ca7663a 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -30,10 +30,8 @@ jobs: - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@v2 - with: - tool: cargo-binstall - - run: cargo binstall --no-confirm cargo-msrv + - name: Install cargo-msrv + run: cargo install cargo-msrv - name: Check MSRV for each workspace member run: | ./scripts/check-msrv.sh diff --git a/scripts/check-msrv.sh b/scripts/check-msrv.sh index 0bde2955f0..6058a0ace2 100755 --- a/scripts/check-msrv.sh +++ b/scripts/check-msrv.sh @@ -90,9 +90,9 @@ while IFS=$'\t' read -r pkg_id package_name manifest_path rust_version; do echo "Searching for correct MSRV for $package_name..." - # Determine the currently-installed stable toolchain version (e.g., "1.81.0") + # Determine the currently-installed stable toolchain version (e.g., "1.91.1") latest_stable="$(rustup run stable rustc --version 2>/dev/null | awk '{print $2}')" - if [[ -z "$latest_stable" ]]; then latest_stable="1.81.0"; fi + if [[ -z "$latest_stable" ]]; then latest_stable="1.91.1"; fi # Search for the actual MSRV starting from the current one if actual_msrv=$(cargo msrv find \ @@ -150,4 +150,4 @@ if [[ -n "$failed_packages" ]]; then else echo "ALL WORKSPACE MEMBERS PASSED MSRV CHECKS!" exit 0 -fi \ No newline at end of file +fi From 676a850a10c0f1a5f2d2286edf20916385802bf9 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 4 Dec 2025 06:19:20 -0300 Subject: [PATCH 032/125] chore(monitor): display success rate as percentage (#1420) --- CHANGELOG.md | 1 + bin/network-monitor/assets/index.html | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e04239a904..c00c445b67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). +- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). ### Fixes diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index 060c294292..7a59c403c4 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -49,6 +49,15 @@ let statusData = null; let updateInterval = null; + function formatSuccessRate(successCount, failureCount) { + const total = successCount + failureCount; + if (!total) { + return 'N/A'; + } + + return `${((successCount / total) * 100).toFixed(1)}%`; + } + async function fetchStatus() { try { const response = await fetch('/status'); @@ -243,7 +252,7 @@
Success Rate: - ${details.FaucetTest.success_count}/${details.FaucetTest.success_count + details.FaucetTest.failure_count} + ${formatSuccessRate(details.FaucetTest.success_count, details.FaucetTest.failure_count)}
Last Response Time: @@ -326,12 +335,8 @@ Counter Increment:
- Successes: - ${details.NtxIncrement.success_count} -
-
- Failures: - ${details.NtxIncrement.failure_count} + Success Rate: + ${formatSuccessRate(details.NtxIncrement.success_count, details.NtxIncrement.failure_count)}
${details.NtxIncrement.last_tx_id ? `
@@ -384,7 +389,7 @@
Success Rate: - ${service.testDetails.success_count}/${service.testDetails.success_count + service.testDetails.failure_count} + ${formatSuccessRate(service.testDetails.success_count, service.testDetails.failure_count)}
Last Response Time: From 03c61dad126766d8904f3a6cc55623e78b128262 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 4 Dec 2025 06:25:25 -0300 Subject: [PATCH 033/125] chore(monitor): dont display internal errors (#1424) --- CHANGELOG.md | 1 + bin/network-monitor/assets/index.html | 22 ---------------------- bin/network-monitor/src/counter.rs | 12 ++++++++++-- 3 files changed, 11 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c00c445b67..12ef4e6be2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). +- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). ### Fixes diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index 7a59c403c4..72fb60b759 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -399,27 +399,6 @@ Last Proof Size: ${(service.testDetails.proof_size_bytes / 1024).toFixed(2)} KB
- ${service.testError ? ` -
- Test Error: - - ${(() => { - try { - const errorObj = JSON.parse(service.testError); - return errorObj.code || 'Unknown error code'; - } catch (e) { - return service.testError; - } - })()} - - -
- ` : ''}
` : ''} @@ -436,7 +415,6 @@
- ${service.error ? `
${service.error}
` : ''} ${detailsHtml}
diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index a67952cccc..0cc0aca476 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -287,7 +287,11 @@ fn handle_increment_failure(details: &mut IncrementDetails, error: &anyhow::Erro /// Build a `ServiceStatus` snapshot from the current increment details and last error. fn build_increment_status(details: &IncrementDetails, last_error: Option) -> ServiceStatus { - let status = if details.failure_count == 0 { + let status = if last_error.is_some() { + // If the most recent attempt failed, surface the service as unhealthy so the + // dashboard reflects that the increment pipeline is not currently working. + Status::Unhealthy + } else if details.failure_count == 0 { Status::Healthy } else if details.success_count == 0 { Status::Unhealthy @@ -423,7 +427,11 @@ fn build_tracking_status( details: &CounterTrackingDetails, last_error: Option, ) -> ServiceStatus { - let status = if details.current_value.is_some() { + let status = if last_error.is_some() { + // If the latest poll failed, surface the service as unhealthy even if we have + // a previously cached value, so the dashboard shows that tracking is degraded. + Status::Unhealthy + } else if details.current_value.is_some() { Status::Healthy } else { Status::Unknown From 8c8fbb10546340fc8c0cfa7d8af1c5f925cae7fa Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 4 Dec 2025 08:41:41 -0300 Subject: [PATCH 034/125] feat: add chain tip to block producer status (#1419) --- CHANGELOG.md | 1 + bin/network-monitor/assets/index.html | 32 +++++++++++++++++--- bin/network-monitor/src/status.rs | 3 ++ crates/block-producer/src/mempool/mod.rs | 7 +++++ crates/block-producer/src/server/mod.rs | 7 ++++- crates/proto/src/generated/block_producer.rs | 6 ++++ crates/rpc/src/server/api.rs | 1 + proto/proto/block_producer.proto | 6 ++++ 8 files changed, 57 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12ef4e6be2..6c7c4c0ad6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). +- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). ### Fixes diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index 72fb60b759..2b05e52621 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -199,16 +199,38 @@ ` : ''} ${details.RpcStatus.store_status ? `
- Store: ${details.RpcStatus.store_status.version} - ${details.RpcStatus.store_status.status} - ${details.RpcStatus.store_status.chain_tip ? ` (Tip: ${details.RpcStatus.store_status.chain_tip})` : ''} +
Store
+
+ Version: + ${details.RpcStatus.store_status.version} +
+
+ Status: + ${details.RpcStatus.store_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.store_status.chain_tip} +
` : ''} ${details.RpcStatus.block_producer_status ? `
- Block Producer: - ${details.RpcStatus.block_producer_status.version} - ${details.RpcStatus.block_producer_status.status} +
Block Producer
+
+ Version: + ${details.RpcStatus.block_producer_status.version} +
+
+ Status: + ${details.RpcStatus.block_producer_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.block_producer_status.chain_tip} +
- Mempool: + Mempool stats:
Unbatched TXs: ${details.RpcStatus.block_producer_status.mempool.unbatched_transactions} diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index f00ada304c..ced33624ca 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -138,6 +138,8 @@ pub struct StoreStatusDetails { pub struct BlockProducerStatusDetails { pub version: String, pub status: Status, + /// The block producer's current view of the chain tip height. + pub chain_tip: u32, /// Mempool statistics for this block producer. pub mempool: MempoolStatusDetails, } @@ -211,6 +213,7 @@ impl From for BlockProducerStatusDetails { Self { version: value.version, status: value.status.into(), + chain_tip: value.chain_tip, mempool: MempoolStatusDetails { unbatched_transactions: mempool_stats.unbatched_transactions, proposed_batches: mempool_stats.proposed_batches, diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 7332d9c08a..15c63dddd9 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -169,6 +169,13 @@ impl Mempool { } } + /// Returns the current chain tip height as seen by the mempool. + /// + /// This reflects the latest committed block that the block producer is aware of. + pub fn chain_tip(&self) -> BlockNumber { + self.chain_tip + } + // TRANSACTION & BATCH LIFECYCLE // -------------------------------------------------------------------------------------------- diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 0d028f5a4e..63f1e0e548 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -216,6 +216,8 @@ impl BlockProducer { /// Mempool statistics that are updated periodically to avoid locking the mempool. #[derive(Clone, Copy, Default)] struct MempoolStats { + /// The mempool's current view of the chain tip height. + chain_tip: BlockNumber, /// Number of transactions currently in the mempool waiting to be batched. unbatched_transactions: u64, /// Number of batches currently being proven. @@ -291,6 +293,7 @@ impl api_server::Api for BlockProducerRpcServer { Ok(tonic::Response::new(proto::block_producer::BlockProducerStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), + chain_tip: mempool_stats.chain_tip.as_u32(), mempool_stats: Some(mempool_stats.into()), })) } @@ -360,9 +363,10 @@ impl BlockProducerRpcServer { loop { interval.tick().await; - let (unbatched_transactions, proposed_batches, proven_batches) = { + let (chain_tip, unbatched_transactions, proposed_batches, proven_batches) = { let mempool = mempool.lock().await; ( + mempool.chain_tip(), mempool.unbatched_transactions_count() as u64, mempool.proposed_batches_count() as u64, mempool.proven_batches_count() as u64, @@ -371,6 +375,7 @@ impl BlockProducerRpcServer { let mut cache = cached_mempool_stats.write().await; *cache = MempoolStats { + chain_tip, unbatched_transactions, proposed_batches, proven_batches, diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs index 10b82369b8..20d9a6e82e 100644 --- a/crates/proto/src/generated/block_producer.rs +++ b/crates/proto/src/generated/block_producer.rs @@ -8,6 +8,12 @@ pub struct BlockProducerStatus { /// The block producer's status. #[prost(string, tag = "2")] pub status: ::prost::alloc::string::String, + /// The block producer's current view of the chain tip height. + /// + /// This is the height of the latest block that the block producer considers + /// to be part of the canonical chain. + #[prost(fixed32, tag = "4")] + pub chain_tip: u32, /// Statistics about the mempool. #[prost(message, optional, tag = "3")] pub mempool_stats: ::core::option::Option, diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 062e7793ed..52f65270eb 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -568,6 +568,7 @@ impl api_server::Api for RpcService { proto::block_producer::BlockProducerStatus { status: "unreachable".to_string(), version: "-".to_string(), + chain_tip: 0, mempool_stats: Some(MempoolStats::default()), }, )), diff --git a/proto/proto/block_producer.proto b/proto/proto/block_producer.proto index 3f353946e5..d1823f70b7 100644 --- a/proto/proto/block_producer.proto +++ b/proto/proto/block_producer.proto @@ -54,6 +54,12 @@ message BlockProducerStatus { // The block producer's status. string status = 2; + // The block producer's current view of the chain tip height. + // + // This is the height of the latest block that the block producer considers + // to be part of the canonical chain. + fixed32 chain_tip = 4; + // Statistics about the mempool. MempoolStats mempool_stats = 3; } From ce873ed4257388f1a05b3b1f27b7a706a73d8308 Mon Sep 17 00:00:00 2001 From: juan518munoz <62400508+juan518munoz@users.noreply.github.com> Date: Thu, 4 Dec 2025 14:51:45 -0300 Subject: [PATCH 035/125] fix(store): acccount upsert failing on insert (new account) (#1415) This was due to foreign key order of operations problems. Co-authored-by: Ignacio Amigo --- .../store/src/db/models/queries/accounts.rs | 122 +++++++++--------- 1 file changed, 59 insertions(+), 63 deletions(-) diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 8189403bc4..5899c53589 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -779,16 +779,21 @@ pub(crate) fn upsert_accounts( let mut count = 0; for update in accounts { let account_id = update.account_id(); - // Extract the 30-bit prefix to provide easy look ups for NTB - // Do not store prefix for accounts that are not network + let network_account_id_prefix = if account_id.is_network() { Some(NetworkAccountPrefix::try_from(account_id)?) } else { None }; - let full_account: Option = match update.details() { - AccountUpdateDetails::Private => None, + // NOTE: we collect storage / asset inserts to apply them only after the account row is + // written. The storage and vault tables have FKs pointing to `accounts (account_id, + // block_num)`, so inserting them earlier would violate those constraints when inserting a + // brand-new account. + let (full_account, pending_storage_inserts, pending_asset_inserts) = match update.details() + { + AccountUpdateDetails::Private => (None, vec![], vec![]), + AccountUpdateDetails::Delta(delta) if delta.is_full_state() => { let account = Account::try_from(delta)?; debug_assert_eq!(account_id, account.id()); @@ -800,69 +805,57 @@ pub(crate) fn upsert_accounts( }); } + // collect storage-map inserts to apply after account upsert + let mut storage = Vec::new(); for (slot_idx, slot) in account.storage().slots().iter().enumerate() { - match slot { - StorageSlot::Value(_) => {}, - StorageSlot::Map(storage_map) => { - for (key, value) in storage_map.entries() { - // SAFETY: We can safely unwrap the conversion to u8 because - // accounts have a limit of 255 storage elements - insert_account_storage_map_value( - conn, - account_id, - block_num, - u8::try_from(slot_idx).unwrap(), - *key, - *value, - )?; - } - }, + if let StorageSlot::Map(storage_map) = slot { + // SAFETY: We can safely unwrap the conversion to u8 because + // accounts have a limit of 255 storage elements + for (key, value) in storage_map.entries() { + storage.push(( + account_id, + u8::try_from(slot_idx).unwrap(), + *key, + *value, + )); + } } } - Some(account) + (Some(account), storage, Vec::new()) }, + AccountUpdateDetails::Delta(delta) => { let mut rows = select_details_stmt(conn, account_id)?.into_iter(); - let Some(account) = rows.next() else { + let Some(account_before) = rows.next() else { return Err(DatabaseError::AccountNotFoundInDb(account_id)); }; - // --- process storage map updates ---------------------------- + // --- collect storage map updates ---------------------------- + let mut storage = Vec::new(); for (&slot, map_delta) in delta.storage().maps() { for (key, value) in map_delta.entries() { - insert_account_storage_map_value( - conn, - account_id, - block_num, - slot, - (*key).into(), - *value, - )?; + storage.push((account_id, slot, (*key).into(), *value)); } } // apply delta to the account; we need to do this before we process asset updates // because we currently need to get the current value of fungible assets from the // account - let account = apply_delta(account, delta, &update.final_state_commitment())?; + let account_after = + apply_delta(account_before, delta, &update.final_state_commitment())?; // --- process asset updates ---------------------------------- + let mut assets = Vec::new(); + for (faucet_id, _) in delta.vault().fungible().iter() { - let current_amount = account.vault().get_balance(*faucet_id).unwrap(); + let current_amount = account_after.vault().get_balance(*faucet_id).unwrap(); let asset: Asset = FungibleAsset::new(*faucet_id, current_amount)?.into(); - let asset_update_or_removal = - if current_amount == 0 { None } else { Some(asset) }; - - insert_account_vault_asset( - conn, - account.id(), - block_num, - asset.vault_key(), - asset_update_or_removal, - )?; + let update_or_remove = if current_amount == 0 { None } else { Some(asset) }; + + assets.push((account_id, asset.vault_key(), update_or_remove)); } for (asset, delta_action) in delta.vault().non_fungible().iter() { @@ -870,16 +863,10 @@ pub(crate) fn upsert_accounts( NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), NonFungibleDeltaAction::Remove => None, }; - insert_account_vault_asset( - conn, - account.id(), - block_num, - asset.vault_key(), - asset_update, - )?; + assets.push((account_id, asset.vault_key(), asset_update)); } - Some(account) + (Some(account_after), storage, assets) }, }; @@ -895,6 +882,16 @@ pub(crate) fn upsert_accounts( .execute(conn)?; } + // mark previous rows as non-latest and insert NEW account row + diesel::update(schema::accounts::table) + .filter( + schema::accounts::account_id + .eq(&account_id.to_bytes()) + .and(schema::accounts::is_latest.eq(true)), + ) + .set(schema::accounts::is_latest.eq(false)) + .execute(conn)?; + let account_value = AccountRowInsert { account_id: account_id.to_bytes(), network_account_id_prefix: network_account_id_prefix @@ -910,22 +907,21 @@ pub(crate) fn upsert_accounts( is_latest: true, }; - // Update any existing rows for this account_id to set is_latest = false - diesel::update(schema::accounts::table) - .filter( - schema::accounts::account_id - .eq(&account_id.to_bytes()) - .and(schema::accounts::is_latest.eq(true)), - ) - .set(schema::accounts::is_latest.eq(false)) + diesel::insert_into(schema::accounts::table) + .values(&account_value) .execute(conn)?; - let v = account_value.clone(); - let inserted = diesel::insert_into(schema::accounts::table).values(&v).execute(conn)?; + // insert pending storage map entries + for (acc_id, slot, key, value) in pending_storage_inserts { + insert_account_storage_map_value(conn, acc_id, block_num, slot, key, value)?; + } - debug_assert_eq!(inserted, 1); + // insert pending vault-asset entries + for (acc_id, vault_key, update) in pending_asset_inserts { + insert_account_vault_asset(conn, acc_id, block_num, vault_key, update)?; + } - count += inserted; + count += 1; } Ok(count) From 5b3438b07d68dba88e6c0319701b70e3270ffccb Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 4 Dec 2025 15:42:58 -0300 Subject: [PATCH 036/125] chore: use dependencies from workspace (#1417) --- Cargo.toml | 4 ++-- bin/remote-prover/Cargo.toml | 2 +- bin/stress-test/Cargo.toml | 2 +- crates/block-producer/Cargo.toml | 2 +- crates/ntx-builder/Cargo.toml | 2 +- crates/proto/Cargo.toml | 2 +- crates/remote-prover-client/Cargo.toml | 11 +++-------- crates/rpc/Cargo.toml | 2 +- crates/store/Cargo.toml | 2 +- crates/utils/Cargo.toml | 2 +- crates/validator/Cargo.toml | 2 +- 11 files changed, 14 insertions(+), 19 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 88705f3424..b91b68ab2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,7 @@ lru = { default-features = false, version = "0.16" } pretty_assertions = { version = "1.4" } # breaking change `DecodeError::new` is not exposed anymore # but is assumed public by some internal dependency -prost = { version = "=0.14.1" } +prost = { default-features = false, version = "=0.14.1" } protox = { version = "=0.9.0" } rand = { version = "0.9" } rand_chacha = { version = "0.9" } @@ -85,7 +85,7 @@ thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } toml = { version = "0.9" } -tonic = { version = "0.14" } +tonic = { default-features = false, version = "0.14" } tonic-prost = { version = "0.14" } tonic-prost-build = { version = "0.14" } tonic-reflection = { version = "0.14" } diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index d3b28797d1..921c7a8770 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -42,7 +42,7 @@ pingora-core = { version = "0.6" } pingora-limits = { version = "0.6" } pingora-proxy = { version = "0.6" } prometheus = { version = "0.14" } -prost = { default-features = false, features = ["derive"], version = "0.14" } +prost = { default-features = false, features = ["derive"], workspace = true } reqwest = { version = "0.12" } semver = { version = "1.0" } serde = { features = ["derive"], version = "1.0" } diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index 9c3029a829..fa0bbec82d 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -31,5 +31,5 @@ miden-objects = { workspace = true } rand = { workspace = true } rayon = { version = "1.10" } tokio = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } url = { workspace = true } diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index eba7c6a13b..3b0c373002 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -35,7 +35,7 @@ rand = { version = "0.9" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["transport"], workspace = true } +tonic = { default-features = true, features = ["transport"], workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index fc27e4ff4b..4c2f9ab530 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -24,7 +24,7 @@ miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } tracing = { workspace = true } url = { workspace = true } diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 0b09430305..ffb86a23ab 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -23,7 +23,7 @@ miden-node-utils = { workspace = true } miden-objects = { workspace = true } prost = { workspace = true } thiserror = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } tonic-prost = { workspace = true } url = { workspace = true } diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 32cc07be8b..262cac78fb 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -22,16 +22,11 @@ tx-prover = ["dep:miden-objects", "dep:miden-tx", "dep:tokio"] [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] getrandom = { features = ["wasm_js"], version = "0.3" } -tonic = { default-features = false, features = ["codegen"], version = "0.14" } +tonic = { features = ["codegen"], workspace = true } tonic-web-wasm-client = { default-features = false, version = "0.8" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] -tonic = { default-features = false, features = [ - "codegen", - "tls-native-roots", - "tls-ring", - "transport", -], version = "0.14" } +tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } tonic-web = { optional = true, version = "0.14" } [lints] @@ -40,7 +35,7 @@ workspace = true [dependencies] miden-objects = { optional = true, workspace = true } miden-tx = { optional = true, workspace = true } -prost = { default-features = false, features = ["derive"], version = "0.14" } +prost = { default-features = false, features = ["derive"], workspace = true } thiserror = { workspace = true } tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } tonic-prost = { workspace = true } diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 540f85eb2b..d7a0e1a35c 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -28,7 +28,7 @@ semver = { version = "1.0" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["tls-native-roots", "tls-ring"], workspace = true } +tonic = { default-features = true, features = ["tls-native-roots", "tls-ring"], workspace = true } tonic-reflection = { workspace = true } tonic-web = { version = "0.14" } tower = { workspace = true } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index bad38b9e29..97fb701ba2 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -38,7 +38,7 @@ thiserror = { workspace = true } tokio = { features = ["fs", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } toml = { version = "0.9" } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 322ef98c88..15bc8528a1 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -37,7 +37,7 @@ rand = { workspace = true } serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } tokio = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 23e32bcbf3..8f50ef4934 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -25,7 +25,7 @@ miden-node-utils = { features = ["testing"], workspace = true } miden-objects = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["transport"], workspace = true } +tonic = { default-features = true, features = ["transport"], workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } From 89918eaf72536c6b0d6fad53d2b4546e1b2f9107 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 4 Dec 2025 22:10:21 +0100 Subject: [PATCH 037/125] feat: add genesis regression (#1427) Co-authored-by: Ignacio Amigo --- crates/store/src/db/tests.rs | 188 +++++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 7cf8a722f8..c89a03300e 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1513,3 +1513,191 @@ fn mock_account_code_and_storage( .build_existing() .unwrap() } + +// GENESIS REGRESSION TESTS +// ================================================================================================ + +/// Verifies genesis block with account containing vault assets can be inserted. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_assets() { + use crate::genesis::GenesisState; + + let component = + AccountComponent::compile("export.foo push.1 end", TransactionKernel::assembler(), vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 1000).unwrap(); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with account containing storage maps can be inserted. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_storage_map() { + use miden_objects::account::StorageMap; + + use crate::genesis::GenesisState; + + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![StorageSlot::Map(storage_map), StorageSlot::Value(Word::empty())]; + + let component = AccountComponent::compile( + "export.foo push.1 end", + TransactionKernel::assembler(), + component_storage, + ) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with account containing both vault assets and storage maps. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_assets_and_storage() { + use miden_objects::account::StorageMap; + + use crate::genesis::GenesisState; + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + + let storage_map = StorageMap::with_entries(vec![( + Word::from([Felt::new(100), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]), + )]) + .unwrap(); + + let component_storage = vec![StorageSlot::Value(Word::empty()), StorageSlot::Map(storage_map)]; + + let component = AccountComponent::compile( + "export.foo push.1 end", + TransactionKernel::assembler(), + component_storage, + ) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with multiple accounts of different types. +/// Tests realistic genesis scenario with basic accounts, assets, and storage. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_multiple_accounts() { + use miden_objects::account::StorageMap; + + use crate::genesis::GenesisState; + + let component1 = + AccountComponent::compile("export.foo push.1 end", TransactionKernel::assembler(), vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account1 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component1) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 2000).unwrap(); + + let component2 = + AccountComponent::compile("export.bar push.2 end", TransactionKernel::assembler(), vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account2 = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component2) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_map = StorageMap::with_entries(vec![( + Word::from([Felt::new(5), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(15), Felt::new(25), Felt::new(35), Felt::new(45)]), + )]) + .unwrap(); + + let component_storage = vec![StorageSlot::Map(storage_map)]; + + let component3 = AccountComponent::compile( + "export.baz push.3 end", + TransactionKernel::assembler(), + component_storage, + ) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account3 = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component3) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account1, account2, account3], test_fee_params(), 1, 0); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} From 74b679f35e4ed19d8d6c3e5a2e5da06eb1825158 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sun, 7 Dec 2025 13:40:35 -0800 Subject: [PATCH 038/125] chore: update miden-base dependencies to latest --- Cargo.lock | 98 +++++++++++++------ crates/block-producer/src/test_utils/mod.rs | 2 +- .../src/test_utils/proven_tx.rs | 2 +- crates/proto/src/domain/note.rs | 2 +- crates/proto/src/domain/nullifier.rs | 2 +- crates/proto/src/domain/transaction.rs | 2 +- crates/store/src/db/tests.rs | 13 +-- crates/store/src/server/rpc_api.rs | 2 +- 8 files changed, 81 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 760172b424..6a0360c0f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,9 +332,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" [[package]] name = "bech32" @@ -504,9 +504,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.48" +version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "jobserver", @@ -1610,6 +1610,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "gimli" version = "0.32.3" @@ -2298,14 +2310,14 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "786c72d9739fc316a7acf9b22d9c2794ac9cb91074e9668feb04304ab7219783" dependencies = [ "libc", "neli", "thiserror 2.0.17", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2473,7 +2485,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2499,9 +2511,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.18.4" +version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048d2d987f215bc9633ced499a8c488d0e2474350c765f904b87cae3462acb7" +checksum = "395e5cc76b64e24533ee55c8d1ff90305b8cad372bdbea4f4f324239e36a895f" dependencies = [ "blake3", "cc", @@ -2531,9 +2543,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.18.4" +version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3b38aace84e157fb02aba8f8ae85bbf8c3afdcdbdf8190fbe7476f3be7ef44" +checksum = "c89641b257eb395cf03105ac1c6cbdf3fd9a5450749696af9835c3c47fc6806e" dependencies = [ "quote", "syn 2.0.111", @@ -2569,7 +2581,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "fs-err", "miden-assembly", @@ -2933,7 +2945,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2979,7 +2991,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "proc-macro2", "quote", @@ -3085,7 +3097,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3103,7 +3115,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "miden-lib", "miden-objects", @@ -3116,7 +3128,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#15e787d2c9ee4744be2fedbc5cdd10abf639a5cb" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" dependencies = [ "miden-objects", "miden-tx", @@ -3248,9 +3260,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -3291,27 +3303,31 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "87fe4204517c0dafc04a1d99ecb577d52c0ffc81e1bbe5cf322769aa8fbd1b05" dependencies = [ + "bitflags 2.10.0", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "90e502fe5db321c6e0ae649ccda600675680125a8e8dee327744fe1910b19332" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -4080,7 +4096,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.9", ] [[package]] @@ -4107,6 +4123,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "proc-macro2" version = "1.0.103" @@ -5298,9 +5336,9 @@ dependencies = [ [[package]] name = "term" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2111ef44dae28680ae9752bb89409e7310ca33a8c621ebe7b106cf5c928b3ac0" +checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ "windows-sys 0.61.2", ] @@ -5610,9 +5648,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" dependencies = [ "indexmap 2.12.1", "toml_datetime 0.7.3", diff --git a/crates/block-producer/src/test_utils/mod.rs b/crates/block-producer/src/test_utils/mod.rs index 7ab15e62ed..4f9cfcb8fd 100644 --- a/crates/block-producer/src/test_utils/mod.rs +++ b/crates/block-producer/src/test_utils/mod.rs @@ -34,7 +34,7 @@ impl Random { } pub fn draw_tx_id(&mut self) -> TransactionId { - TransactionId::new_unchecked(self.0.draw_word()) + TransactionId::from_raw(self.0.draw_word()) } pub fn draw_account_id(&mut self) -> AccountId { diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index b4bcc0745c..f08a602f3d 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -108,7 +108,7 @@ impl MockProvenTxBuilder { .map(|index| { let nullifier = Word::from([ONE, ONE, ONE, Felt::new(index)]); - Nullifier::new_unchecked(nullifier) + Nullifier::from_raw(nullifier) }) .collect(); diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 7629ce3070..9e25ac422a 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -146,7 +146,7 @@ impl TryFrom<&proto::note::NoteInclusionInBlockProof> for (NoteId, NoteInclusion )?; Ok(( - NoteId::new_unchecked(note_id), + NoteId::from_raw(note_id), NoteInclusionProof::new( proof.block_num.into(), proof.note_index_in_block.try_into()?, diff --git a/crates/proto/src/domain/nullifier.rs b/crates/proto/src/domain/nullifier.rs index d0462c53c0..0301a38e38 100644 --- a/crates/proto/src/domain/nullifier.rs +++ b/crates/proto/src/domain/nullifier.rs @@ -28,7 +28,7 @@ impl TryFrom for Nullifier { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(Nullifier::new_unchecked(digest)) + Ok(Nullifier::from_raw(digest)) } } diff --git a/crates/proto/src/domain/transaction.rs b/crates/proto/src/domain/transaction.rs index 1393513f1f..783263a267 100644 --- a/crates/proto/src/domain/transaction.rs +++ b/crates/proto/src/domain/transaction.rs @@ -39,7 +39,7 @@ impl TryFrom for TransactionId { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(TransactionId::new_unchecked(digest)) + Ok(TransactionId::from_raw(digest)) } } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index c89a03300e..96fd77666b 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -9,7 +9,7 @@ use miden_lib::account::auth::AuthRpoFalcon512; use miden_lib::note::create_p2id_note; use miden_lib::transaction::TransactionKernel; use miden_node_proto::domain::account::AccountSummary; -use miden_node_utils::fee::test_fee_params; +use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_objects::account::auth::PublicKeyCommitment; use miden_objects::account::delta::AccountUpdateDetails; use miden_objects::account::{ @@ -317,7 +317,7 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_none, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note_id = NoteId::new_unchecked(num_to_word(0)); + let note_id = NoteId::from_raw(num_to_word(0)); let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::none()); @@ -343,7 +343,7 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_always, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note_id = NoteId::new_unchecked(num_to_word(1)); + let note_id = NoteId::from_raw(num_to_word(1)); let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::always()); @@ -367,7 +367,7 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_after_block, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note_id = NoteId::new_unchecked(num_to_word(2)); + let note_id = NoteId::from_raw(num_to_word(2)); let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!( note.metadata.execution_hint(), @@ -1232,7 +1232,7 @@ fn notes() { // test query notes by id let notes = vec![note.clone(), note2]; - let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::new_unchecked(note.note_id))); + let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::from_raw(note.note_id))); let res = queries::select_notes_by_id(conn, ¬e_ids).unwrap(); assert_eq!(res, notes); @@ -1405,7 +1405,7 @@ fn num_to_word(n: u64) -> Word { } fn num_to_nullifier(n: u64) -> Nullifier { - Nullifier::new_unchecked(num_to_word(n)) + Nullifier::from_raw(num_to_word(n)) } fn mock_block_account_update(account_id: AccountId, num: u64) -> BlockAccountUpdate { @@ -1448,6 +1448,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader final_account_commitment, input_notes, output_notes, + test_fee(), ) } diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 5f877f5306..0e57c1b0e1 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -274,7 +274,7 @@ impl rpc_server::Rpc for StoreApi { let note_ids: Vec = convert_digests_to_words::(note_ids)?; - let note_ids: Vec = note_ids.into_iter().map(NoteId::new_unchecked).collect(); + let note_ids: Vec = note_ids.into_iter().map(NoteId::from_raw).collect(); let notes = self .state From 51a8a384c8d6661a67dfa8ae81da250e59e99795 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Mon, 8 Dec 2025 15:47:51 -0300 Subject: [PATCH 039/125] fix: separate public gRPC API from internal schema (#1401) This reorganizes our gRPC schema such that the public API no longer relies on our internal services or types. This also fixes the accidental exposure of our internal services via gRPC server reflection. --- CHANGELOG.md | 1 + bin/network-monitor/src/counter.rs | 4 +- bin/network-monitor/src/deploy/mod.rs | 2 +- bin/network-monitor/src/status.rs | 4 +- bin/stress-test/src/seeding/mod.rs | 2 +- bin/stress-test/src/store/mod.rs | 32 +- crates/block-producer/src/server/mod.rs | 29 +- crates/block-producer/src/server/tests.rs | 2 +- crates/block-producer/src/store/mod.rs | 20 +- crates/ntx-builder/src/store.rs | 8 +- crates/proto/build.rs | 2 - crates/proto/src/clients/mod.rs | 6 +- crates/proto/src/domain/account.rs | 118 +- crates/proto/src/domain/batch.rs | 10 +- crates/proto/src/domain/block.rs | 12 +- crates/proto/src/domain/nullifier.rs | 20 +- crates/proto/src/generated/block_producer.rs | 71 +- .../src/generated/block_producer_store.rs | 789 ----- crates/proto/src/generated/mod.rs | 5 +- .../proto/src/generated/ntx_builder_store.rs | 843 ----- crates/proto/src/generated/rpc.rs | 753 ++++- crates/proto/src/generated/rpc_store.rs | 1810 ---------- crates/proto/src/generated/shared.rs | 34 - crates/proto/src/generated/store.rs | 2958 +++++++++++++++++ crates/rpc/src/server/api.rs | 66 +- crates/rpc/src/tests.rs | 7 +- crates/store/src/db/mod.rs | 4 +- crates/store/src/server/api.rs | 10 +- crates/store/src/server/block_producer.rs | 32 +- crates/store/src/server/mod.rs | 16 +- crates/store/src/server/ntx_builder.rs | 34 +- crates/store/src/server/rpc_api.rs | 76 +- proto/build.rs | 20 +- proto/proto/README.md | 19 + .../proto/{ => internal}/block_producer.proto | 61 +- proto/proto/internal/store.proto | 342 ++ proto/proto/{ => internal}/validator.proto | 0 proto/proto/rpc.proto | 528 ++- proto/proto/store/block_producer.proto | 164 - proto/proto/store/ntx_builder.proto | 113 - proto/proto/store/rpc.proto | 510 --- proto/proto/store/shared.proto | 45 - proto/src/lib.rs | 8 - 43 files changed, 4718 insertions(+), 4872 deletions(-) delete mode 100644 crates/proto/src/generated/block_producer_store.rs delete mode 100644 crates/proto/src/generated/ntx_builder_store.rs delete mode 100644 crates/proto/src/generated/rpc_store.rs delete mode 100644 crates/proto/src/generated/shared.rs create mode 100644 crates/proto/src/generated/store.rs create mode 100644 proto/proto/README.md rename proto/proto/{ => internal}/block_producer.proto (67%) create mode 100644 proto/proto/internal/store.proto rename proto/proto/{ => internal}/validator.proto (100%) delete mode 100644 proto/proto/store/block_producer.proto delete mode 100644 proto/proto/store/ntx_builder.proto delete mode 100644 proto/proto/store/rpc.proto delete mode 100644 proto/proto/store/shared.proto diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c7c4c0ad6..3c5d82b346 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). - Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). +- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). ### Fixes diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 0cc0aca476..bc258cb846 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -12,7 +12,7 @@ use miden_lib::AuthScheme; use miden_lib::account::interface::AccountInterface; use miden_lib::utils::ScriptBuilder; use miden_node_proto::clients::RpcClient; -use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; +use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; use miden_objects::account::auth::AuthSecretKey; use miden_objects::account::{Account, AccountFile, AccountHeader, AccountId}; @@ -513,7 +513,7 @@ async fn create_and_submit_network_note( .await .context("Failed to submit proven transaction to RPC")? .into_inner() - .block_height + .block_num .into(); info!("Submitted proven transaction to RPC"); diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index 58278d92e3..4e094a3ce5 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -10,7 +10,7 @@ use std::time::Duration; use anyhow::{Context, Result}; use miden_lib::transaction::TransactionKernel; use miden_node_proto::clients::{Builder, RpcClient}; -use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; +use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; use miden_objects::account::{Account, AccountId, PartialAccount, PartialStorage}; use miden_objects::assembly::{DefaultSourceManager, Library, LibraryPath, Module, ModuleKind}; diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index ced33624ca..9107b3d134 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -11,9 +11,7 @@ use miden_node_proto::clients::{ RpcClient, }; use miden_node_proto::generated as proto; -use miden_node_proto::generated::block_producer::BlockProducerStatus; -use miden_node_proto::generated::rpc::RpcStatus; -use miden_node_proto::generated::rpc_store::StoreStatus; +use miden_node_proto::generated::rpc::{BlockProducerStatus, RpcStatus, StoreStatus}; use serde::{Deserialize, Serialize}; use tokio::sync::watch; use tokio::time::MissedTickBehavior; diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 225001a3bb..ea845572b4 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -14,7 +14,7 @@ use miden_lib::note::create_p2id_note; use miden_lib::utils::Serializable; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_proto::generated::rpc_store::rpc_client::RpcClient; +use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_store::{DataDirectory, GenesisState, Store}; use miden_node_utils::tracing::grpc::OtelInterceptor; use miden_objects::account::delta::AccountUpdateDetails; diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index a0adb87ab5..6bc08ae5c3 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -2,7 +2,7 @@ use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use futures::{StreamExt, stream}; -use miden_node_proto::generated::rpc_store::rpc_client::RpcClient; +use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::state::State; use miden_node_utils::tracing::grpc::OtelInterceptor; @@ -89,7 +89,7 @@ pub async fn sync_state( api_client: &mut RpcClient>, account_ids: Vec, block_num: u32, -) -> (Duration, proto::rpc_store::SyncStateResponse) { +) -> (Duration, proto::rpc::SyncStateResponse) { let note_tags = account_ids .iter() .map(|id| u32::from(NoteTag::from_account_id(*id))) @@ -100,7 +100,7 @@ pub async fn sync_state( .map(|id| proto::account::AccountId { id: id.to_bytes() }) .collect::>(); - let sync_request = proto::rpc_store::SyncStateRequest { block_num, note_tags, account_ids }; + let sync_request = proto::rpc::SyncStateRequest { block_num, note_tags, account_ids }; let start = Instant::now(); let response = api_client.sync_state(sync_request).await.unwrap(); @@ -160,8 +160,8 @@ pub async fn sync_notes( .iter() .map(|id| u32::from(NoteTag::from_account_id(*id))) .collect::>(); - let sync_request = proto::rpc_store::SyncNotesRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from: 0, block_to: None }), + let sync_request = proto::rpc::SyncNotesRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), note_tags, }; @@ -282,9 +282,9 @@ pub async fn bench_sync_nullifiers( async fn sync_nullifiers( api_client: &mut RpcClient>, nullifiers_prefixes: Vec, -) -> (Duration, proto::rpc_store::SyncNullifiersResponse) { - let sync_request = proto::rpc_store::SyncNullifiersRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from: 0, block_to: None }), +) -> (Duration, proto::rpc::SyncNullifiersResponse) { + let sync_request = proto::rpc::SyncNullifiersRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), nullifiers: nullifiers_prefixes, prefix_len: 16, }; @@ -359,7 +359,7 @@ pub async fn bench_sync_transactions( .await; let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); - let responses: Vec = + let responses: Vec = results.iter().map(|r| r.response.clone()).collect(); print_summary(&timers_accumulator); @@ -404,14 +404,14 @@ pub async fn sync_transactions( account_ids: Vec, block_from: u32, block_to: u32, -) -> (Duration, proto::rpc_store::SyncTransactionsResponse) { +) -> (Duration, proto::rpc::SyncTransactionsResponse) { let account_ids = account_ids .iter() .map(|id| proto::account::AccountId { id: id.to_bytes() }) .collect::>(); - let sync_request = proto::rpc_store::SyncTransactionsRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from, block_to: Some(block_to) }), + let sync_request = proto::rpc::SyncTransactionsRequest { + block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), account_ids, }; @@ -423,7 +423,7 @@ pub async fn sync_transactions( #[derive(Clone)] struct SyncTransactionsRun { duration: Duration, - response: proto::rpc_store::SyncTransactionsResponse, + response: proto::rpc::SyncTransactionsResponse, pages: usize, } @@ -451,7 +451,7 @@ async fn sync_transactions_paginated( total_duration += elapsed; pages += 1; - let info = response.pagination_info.unwrap_or(proto::rpc_store::PaginationInfo { + let info = response.pagination_info.unwrap_or(proto::rpc::PaginationInfo { chain_tip: target_block_to, block_num: target_block_to, }); @@ -460,7 +460,7 @@ async fn sync_transactions_paginated( let reached_block = info.block_num; let chain_tip = info.chain_tip; final_pagination_info = - Some(proto::rpc_store::PaginationInfo { chain_tip, block_num: reached_block }); + Some(proto::rpc::PaginationInfo { chain_tip, block_num: reached_block }); if reached_block >= chain_tip { break; @@ -473,7 +473,7 @@ async fn sync_transactions_paginated( SyncTransactionsRun { duration: total_duration, - response: proto::rpc_store::SyncTransactionsResponse { + response: proto::rpc::SyncTransactionsResponse { pagination_info: final_pagination_info, transactions: aggregated_records, }, diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 63f1e0e548..9b74a32573 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -226,9 +226,9 @@ struct MempoolStats { proven_batches: u64, } -impl From for proto::block_producer::MempoolStats { +impl From for proto::rpc::MempoolStats { fn from(stats: MempoolStats) -> Self { - proto::block_producer::MempoolStats { + proto::rpc::MempoolStats { unbatched_transactions: stats.unbatched_transactions, proposed_batches: stats.proposed_batches, proven_batches: stats.proven_batches, @@ -258,8 +258,7 @@ impl api_server::Api for BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: tonic::Request, - ) -> Result, Status> - { + ) -> Result, Status> { self.submit_proven_transaction(request.into_inner()) .await .map(tonic::Response::new) @@ -270,7 +269,7 @@ impl api_server::Api for BlockProducerRpcServer { async fn submit_proven_batch( &self, request: tonic::Request, - ) -> Result, Status> { + ) -> Result, Status> { self.submit_proven_batch(request.into_inner()) .await .map(tonic::Response::new) @@ -287,10 +286,10 @@ impl api_server::Api for BlockProducerRpcServer { async fn status( &self, _request: tonic::Request<()>, - ) -> Result, Status> { + ) -> Result, Status> { let mempool_stats = *self.cached_mempool_stats.read().await; - Ok(tonic::Response::new(proto::block_producer::BlockProducerStatus { + Ok(tonic::Response::new(proto::rpc::BlockProducerStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), chain_tip: mempool_stats.chain_tip.as_u32(), @@ -424,7 +423,7 @@ impl BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: proto::transaction::ProvenTransaction, - ) -> Result { + ) -> Result { debug!(target: COMPONENT, ?request); let tx = ProvenTransaction::read_from_bytes(&request.transaction) @@ -450,11 +449,13 @@ impl BlockProducerRpcServer { // SAFETY: we assume that the rpc component has verified the transaction proof already. let tx = AuthenticatedTransaction::new_unchecked(tx, inputs).map(Arc::new)?; - self.mempool.lock().await.lock().await.add_transaction(tx).map(|block_height| { - proto::block_producer::SubmitProvenTransactionResponse { - block_height: block_height.as_u32(), - } - }) + self.mempool + .lock() + .await + .lock() + .await + .add_transaction(tx) + .map(|block_height| proto::blockchain::BlockNumber { block_num: block_height.as_u32() }) } #[instrument( @@ -466,7 +467,7 @@ impl BlockProducerRpcServer { async fn submit_proven_batch( &self, request: proto::transaction::ProvenTransactionBatch, - ) -> Result { + ) -> Result { let _batch = ProvenBatch::read_from_bytes(&request.encoded) .map_err(SubmitProvenBatchError::Deserialization)?; diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index ad23766137..86b73bd9aa 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -148,7 +148,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { async fn send_request( mut client: block_producer_client::ApiClient, i: u8, -) -> Result, tonic::Status> +) -> Result, tonic::Status> { let tx = ProvenTransactionBuilder::new( AccountId::dummy( diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index fad738641c..5ea3089cb5 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -65,17 +65,13 @@ impl Display for TransactionInputs { } } -impl TryFrom for TransactionInputs { +impl TryFrom for TransactionInputs { type Error = ConversionError; - fn try_from( - response: proto::block_producer_store::TransactionInputs, - ) -> Result { + fn try_from(response: proto::store::TransactionInputs) -> Result { let AccountState { account_id, account_commitment } = response .account_state - .ok_or(proto::block_producer_store::TransactionInputs::missing_field(stringify!( - account_state - )))? + .ok_or(proto::store::TransactionInputs::missing_field(stringify!(account_state)))? .try_into()?; let mut nullifiers = HashMap::new(); @@ -83,7 +79,7 @@ impl TryFrom for TransactionInpu let nullifier = nullifier_record .nullifier .ok_or( - proto::block_producer_store::transaction_inputs::NullifierTransactionInputRecord::missing_field( + proto::store::transaction_inputs::NullifierTransactionInputRecord::missing_field( stringify!(nullifier), ), )? @@ -146,7 +142,7 @@ impl StoreClient { .client .clone() .get_block_header_by_number(tonic::Request::new( - proto::shared::BlockHeaderByNumberRequest::default(), + proto::rpc::BlockHeaderByNumberRequest::default(), )) .await? .into_inner() @@ -163,7 +159,7 @@ impl StoreClient { &self, proven_tx: &ProvenTransaction, ) -> Result { - let message = proto::block_producer_store::TransactionInputsRequest { + let message = proto::store::TransactionInputsRequest { account_id: Some(proven_tx.account_id().into()), nullifiers: proven_tx.nullifiers().map(Into::into).collect(), unauthenticated_notes: proven_tx @@ -211,7 +207,7 @@ impl StoreClient { unauthenticated_notes: impl Iterator + Send, reference_blocks: impl Iterator + Send, ) -> Result { - let request = tonic::Request::new(proto::block_producer_store::BlockInputsRequest { + let request = tonic::Request::new(proto::store::BlockInputsRequest { account_ids: updated_accounts.map(Into::into).collect(), nullifiers: created_nullifiers.map(proto::primitives::Digest::from).collect(), unauthenticated_notes: unauthenticated_notes @@ -231,7 +227,7 @@ impl StoreClient { block_references: impl Iterator + Send, note_commitments: impl Iterator + Send, ) -> Result { - let request = tonic::Request::new(proto::block_producer_store::BatchInputsRequest { + let request = tonic::Request::new(proto::store::BatchInputsRequest { reference_blocks: block_references.map(|(block_num, _)| block_num.as_u32()).collect(), note_commitments: note_commitments.map(proto::primitives::Digest::from).collect(), }); diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 4329107882..2d53757407 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -112,10 +112,7 @@ impl StoreClient { let mut page_token: Option = None; loop { - let req = proto::ntx_builder_store::UnconsumedNetworkNotesRequest { - page_token, - page_size: 128, - }; + let req = proto::store::UnconsumedNetworkNotesRequest { page_token, page_size: 128 }; let resp = self.inner.clone().get_unconsumed_network_notes(req).await?.into_inner(); let page: Vec = resp @@ -140,8 +137,7 @@ impl StoreClient { &self, prefix: NetworkAccountPrefix, ) -> Result, StoreError> { - let request = - proto::ntx_builder_store::AccountIdPrefix { account_id_prefix: prefix.inner() }; + let request = proto::store::AccountIdPrefix { account_id_prefix: prefix.inner() }; let store_response = self .inner diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 6d71e84004..b0ac773a72 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -9,7 +9,6 @@ use miden_node_proto_build::{ store_block_producer_api_descriptor, store_ntx_builder_api_descriptor, store_rpc_api_descriptor, - store_shared_api_descriptor, validator_api_descriptor, }; use miette::{Context, IntoDiagnostic}; @@ -44,7 +43,6 @@ fn main() -> miette::Result<()> { generate_bindings(store_rpc_api_descriptor(), &dst_dir)?; generate_bindings(store_ntx_builder_api_descriptor(), &dst_dir)?; generate_bindings(store_block_producer_api_descriptor(), &dst_dir)?; - generate_bindings(store_shared_api_descriptor(), &dst_dir)?; generate_bindings(block_producer_api_descriptor(), &dst_dir)?; generate_bindings(remote_prover_api_descriptor(), &dst_dir)?; generate_bindings(validator_api_descriptor(), &dst_dir)?; diff --git a/crates/proto/src/clients/mod.rs b/crates/proto/src/clients/mod.rs index 3388d7875e..3599b472c4 100644 --- a/crates/proto/src/clients/mod.rs +++ b/crates/proto/src/clients/mod.rs @@ -112,10 +112,10 @@ type GeneratedRpcClient = generated::rpc::api_client::ApiClient; type GeneratedStoreClientForNtxBuilder = - generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient; + generated::store::ntx_builder_client::NtxBuilderClient; type GeneratedStoreClientForBlockProducer = - generated::block_producer_store::block_producer_client::BlockProducerClient; -type GeneratedStoreClientForRpc = generated::rpc_store::rpc_client::RpcClient; + generated::store::block_producer_client::BlockProducerClient; +type GeneratedStoreClientForRpc = generated::store::rpc_client::RpcClient; type GeneratedProxyStatusClient = generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient; type GeneratedProverClient = generated::remote_prover::api_client::ApiClient; diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 8d690803c0..011957209f 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -112,14 +112,14 @@ pub struct AccountProofRequest { pub details: Option, } -impl TryFrom for AccountProofRequest { +impl TryFrom for AccountProofRequest { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountProofRequest) -> Result { - let proto::rpc_store::AccountProofRequest { account_id, block_num, details } = value; + fn try_from(value: proto::rpc::AccountProofRequest) -> Result { + let proto::rpc::AccountProofRequest { account_id, block_num, details } = value; let account_id = account_id - .ok_or(proto::rpc_store::AccountProofRequest::missing_field(stringify!(account_id)))? + .ok_or(proto::rpc::AccountProofRequest::missing_field(stringify!(account_id)))? .try_into()?; let block_num = block_num.map(Into::into); @@ -136,15 +136,13 @@ pub struct AccountDetailRequest { pub storage_requests: Vec, } -impl TryFrom - for AccountDetailRequest -{ +impl TryFrom for AccountDetailRequest { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_request::AccountDetailRequest, + value: proto::rpc::account_proof_request::AccountDetailRequest, ) -> Result { - let proto::rpc_store::account_proof_request::AccountDetailRequest { + let proto::rpc::account_proof_request::AccountDetailRequest { code_commitment, asset_vault_commitment, storage_maps, @@ -182,15 +180,15 @@ impl TryFrom for AccountStorageHeader { } } -impl TryFrom +impl TryFrom for AccountStorageMapDetails { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_storage_details::AccountStorageMapDetails, + value: proto::rpc::account_storage_details::AccountStorageMapDetails, ) -> Result { - let proto::rpc_store::account_storage_details::AccountStorageMapDetails { + let proto::rpc::account_storage_details::AccountStorageMapDetails { slot_index, too_many_entries, entries, @@ -206,13 +204,13 @@ impl TryFrom for StorageMapRequest +impl TryFrom + for StorageMapRequest { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest, + value: proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest, ) -> Result { - let proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest { + let proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest { slot_index, slot_data, } = value; let slot_index = slot_index.try_into()?; - let slot_data = slot_data.ok_or(proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; + let slot_data = slot_data.ok_or(proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; Ok(StorageMapRequest { slot_index, slot_data }) } @@ -265,13 +261,13 @@ pub enum SlotData { MapKeys(Vec), } -impl TryFrom +impl TryFrom for SlotData { type Error = ConversionError; - fn try_from(value: proto::rpc_store::account_proof_request::account_detail_request::storage_map_detail_request::SlotData) -> Result { - use proto::rpc_store::account_proof_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; + fn try_from(value: proto::rpc::account_proof_request::account_detail_request::storage_map_detail_request::SlotData) -> Result { + use proto::rpc::account_proof_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; Ok(match value { ProtoSlotData::AllEntries(true) => SlotData::All, @@ -383,11 +379,11 @@ impl AccountVaultDetails { } } -impl TryFrom for AccountVaultDetails { +impl TryFrom for AccountVaultDetails { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountVaultDetails) -> Result { - let proto::rpc_store::AccountVaultDetails { too_many_assets, assets } = value; + fn try_from(value: proto::rpc::AccountVaultDetails) -> Result { + let proto::rpc::AccountVaultDetails { too_many_assets, assets } = value; let assets = Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { @@ -401,7 +397,7 @@ impl TryFrom for AccountVaultDetails { } } -impl From for proto::rpc_store::AccountVaultDetails { +impl From for proto::rpc::AccountVaultDetails { fn from(value: AccountVaultDetails) -> Self { let AccountVaultDetails { too_many_assets, assets } = value; @@ -468,14 +464,14 @@ pub struct AccountStorageDetails { pub map_details: Vec, } -impl TryFrom for AccountStorageDetails { +impl TryFrom for AccountStorageDetails { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountStorageDetails) -> Result { - let proto::rpc_store::AccountStorageDetails { header, map_details } = value; + fn try_from(value: proto::rpc::AccountStorageDetails) -> Result { + let proto::rpc::AccountStorageDetails { header, map_details } = value; let header = header - .ok_or(proto::rpc_store::AccountStorageDetails::missing_field(stringify!(header)))? + .ok_or(proto::rpc::AccountStorageDetails::missing_field(stringify!(header)))? .try_into()?; let map_details = try_convert(map_details).collect::, _>>()?; @@ -484,7 +480,7 @@ impl TryFrom for AccountStorageDetails } } -impl From for proto::rpc_store::AccountStorageDetails { +impl From for proto::rpc::AccountStorageDetails { fn from(value: AccountStorageDetails) -> Self { let AccountStorageDetails { header, map_details } = value; @@ -525,18 +521,18 @@ pub struct AccountProofResponse { pub details: Option, } -impl TryFrom for AccountProofResponse { +impl TryFrom for AccountProofResponse { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountProofResponse) -> Result { - let proto::rpc_store::AccountProofResponse { block_num, witness, details } = value; + fn try_from(value: proto::rpc::AccountProofResponse) -> Result { + let proto::rpc::AccountProofResponse { block_num, witness, details } = value; let block_num = block_num - .ok_or(proto::rpc_store::AccountProofResponse::missing_field(stringify!(block_num)))? + .ok_or(proto::rpc::AccountProofResponse::missing_field(stringify!(block_num)))? .into(); let witness = witness - .ok_or(proto::rpc_store::AccountProofResponse::missing_field(stringify!(witness)))? + .ok_or(proto::rpc::AccountProofResponse::missing_field(stringify!(witness)))? .try_into()?; let details = details.map(TryFrom::try_from).transpose()?; @@ -545,7 +541,7 @@ impl TryFrom for AccountProofResponse { } } -impl From for proto::rpc_store::AccountProofResponse { +impl From for proto::rpc::AccountProofResponse { fn from(value: AccountProofResponse) -> Self { let AccountProofResponse { block_num, witness, details } = value; @@ -557,13 +553,13 @@ impl From for proto::rpc_store::AccountProofResponse { } } -impl TryFrom for AccountDetails { +impl TryFrom for AccountDetails { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_response::AccountDetails, + value: proto::rpc::account_proof_response::AccountDetails, ) -> Result { - let proto::rpc_store::account_proof_response::AccountDetails { + let proto::rpc::account_proof_response::AccountDetails { header, code, vault_details, @@ -571,21 +567,21 @@ impl TryFrom for Accou } = value; let account_header = header - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(header), - ))? + .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + header + )))? .try_into()?; let storage_details = storage_details - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(storage_details), - ))? + .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + storage_details + )))? .try_into()?; let vault_details = vault_details - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(vault_details), - ))? + .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + vault_details + )))? .try_into()?; let account_code = code; @@ -598,7 +594,7 @@ impl TryFrom for Accou } } -impl From for proto::rpc_store::account_proof_response::AccountDetails { +impl From for proto::rpc::account_proof_response::AccountDetails { fn from(value: AccountDetails) -> Self { let AccountDetails { account_header, @@ -622,10 +618,10 @@ impl From for proto::rpc_store::account_proof_response::AccountD } impl From - for proto::rpc_store::account_storage_details::AccountStorageMapDetails + for proto::rpc::account_storage_details::AccountStorageMapDetails { fn from(value: AccountStorageMapDetails) -> Self { - use proto::rpc_store::account_storage_details::account_storage_map_details; + use proto::rpc::account_storage_details::account_storage_map_details; let AccountStorageMapDetails { slot_index, @@ -770,24 +766,22 @@ impl Display for AccountState { } } -impl TryFrom - for AccountState -{ +impl TryFrom for AccountState { type Error = ConversionError; fn try_from( - from: proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord, + from: proto::store::transaction_inputs::AccountTransactionInputRecord, ) -> Result { let account_id = from .account_id - .ok_or(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord::missing_field( + .ok_or(proto::store::transaction_inputs::AccountTransactionInputRecord::missing_field( stringify!(account_id), ))? .try_into()?; let account_commitment = from .account_commitment - .ok_or(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord::missing_field( + .ok_or(proto::store::transaction_inputs::AccountTransactionInputRecord::missing_field( stringify!(account_commitment), ))? .try_into()?; @@ -804,9 +798,7 @@ impl TryFrom - for proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord -{ +impl From for proto::store::transaction_inputs::AccountTransactionInputRecord { fn from(from: AccountState) -> Self { Self { account_id: Some(from.account_id.into()), diff --git a/crates/proto/src/domain/batch.rs b/crates/proto/src/domain/batch.rs index 718e74463a..fd4c51c42e 100644 --- a/crates/proto/src/domain/batch.rs +++ b/crates/proto/src/domain/batch.rs @@ -16,7 +16,7 @@ pub struct BatchInputs { pub partial_block_chain: PartialBlockchain, } -impl From for proto::block_producer_store::BatchInputs { +impl From for proto::store::BatchInputs { fn from(inputs: BatchInputs) -> Self { Self { batch_reference_block_header: Some(inputs.batch_reference_block_header.into()), @@ -26,16 +26,14 @@ impl From for proto::block_producer_store::BatchInputs { } } -impl TryFrom for BatchInputs { +impl TryFrom for BatchInputs { type Error = ConversionError; - fn try_from( - response: proto::block_producer_store::BatchInputs, - ) -> Result { + fn try_from(response: proto::store::BatchInputs) -> Result { let result = Self { batch_reference_block_header: response .batch_reference_block_header - .ok_or(proto::block_producer_store::BatchInputs::missing_field("block_header"))? + .ok_or(proto::store::BatchInputs::missing_field("block_header"))? .try_into()?, note_proofs: response .note_proofs diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index a64427d1ae..0cb96fa082 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -122,7 +122,7 @@ impl TryFrom for BlockHeader { // BLOCK INPUTS // ================================================================================================ -impl From for proto::block_producer_store::BlockInputs { +impl From for proto::store::BlockInputs { fn from(inputs: BlockInputs) -> Self { let ( prev_block_header, @@ -132,7 +132,7 @@ impl From for proto::block_producer_store::BlockInputs { unauthenticated_note_proofs, ) = inputs.into_parts(); - proto::block_producer_store::BlockInputs { + proto::store::BlockInputs { latest_block_header: Some(prev_block_header.into()), account_witnesses: account_witnesses .into_iter() @@ -154,10 +154,10 @@ impl From for proto::block_producer_store::BlockInputs { } } -impl TryFrom for BlockInputs { +impl TryFrom for BlockInputs { type Error = ConversionError; - fn try_from(response: proto::block_producer_store::BlockInputs) -> Result { + fn try_from(response: proto::store::BlockInputs) -> Result { let latest_block_header: BlockHeader = response .latest_block_header .ok_or(proto::blockchain::BlockHeader::missing_field("block_header"))? @@ -242,7 +242,7 @@ pub enum InvalidBlockRange { EmptyRange { start: BlockNumber, end: BlockNumber }, } -impl proto::rpc_store::BlockRange { +impl proto::rpc::BlockRange { /// Converts the block range into an inclusive range, using the fallback block number if the /// block to is not specified. pub fn into_inclusive_range>( @@ -274,7 +274,7 @@ impl proto::rpc_store::BlockRange { } } -impl From> for proto::rpc_store::BlockRange { +impl From> for proto::rpc::BlockRange { fn from(range: RangeInclusive) -> Self { Self { block_from: range.start().as_u32(), diff --git a/crates/proto/src/domain/nullifier.rs b/crates/proto/src/domain/nullifier.rs index 0301a38e38..d4e38a3221 100644 --- a/crates/proto/src/domain/nullifier.rs +++ b/crates/proto/src/domain/nullifier.rs @@ -41,32 +41,30 @@ pub struct NullifierWitnessRecord { pub proof: SmtProof, } -impl TryFrom - for NullifierWitnessRecord -{ +impl TryFrom for NullifierWitnessRecord { type Error = ConversionError; fn try_from( - nullifier_witness_record: proto::block_producer_store::block_inputs::NullifierWitness, + nullifier_witness_record: proto::store::block_inputs::NullifierWitness, ) -> Result { Ok(Self { nullifier: nullifier_witness_record .nullifier - .ok_or(proto::block_producer_store::block_inputs::NullifierWitness::missing_field( - stringify!(nullifier), - ))? + .ok_or(proto::store::block_inputs::NullifierWitness::missing_field(stringify!( + nullifier + )))? .try_into()?, proof: nullifier_witness_record .opening - .ok_or(proto::block_producer_store::block_inputs::NullifierWitness::missing_field( - stringify!(opening), - ))? + .ok_or(proto::store::block_inputs::NullifierWitness::missing_field(stringify!( + opening + )))? .try_into()?, }) } } -impl From for proto::block_producer_store::block_inputs::NullifierWitness { +impl From for proto::store::block_inputs::NullifierWitness { fn from(value: NullifierWitnessRecord) -> Self { Self { nullifier: Some(value.nullifier.into()), diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs index 20d9a6e82e..1df7f0f8e0 100644 --- a/crates/proto/src/generated/block_producer.rs +++ b/crates/proto/src/generated/block_producer.rs @@ -1,49 +1,4 @@ // This file is @generated by prost-build. -/// Represents the status of the block producer. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockProducerStatus { - /// The block producer's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The block producer's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// The block producer's current view of the chain tip height. - /// - /// This is the height of the latest block that the block producer considers - /// to be part of the canonical chain. - #[prost(fixed32, tag = "4")] - pub chain_tip: u32, - /// Statistics about the mempool. - #[prost(message, optional, tag = "3")] - pub mempool_stats: ::core::option::Option, -} -/// Statistics about the mempool. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MempoolStats { - /// Number of transactions currently in the mempool waiting to be batched. - #[prost(uint64, tag = "1")] - pub unbatched_transactions: u64, - /// Number of batches currently being proven. - #[prost(uint64, tag = "2")] - pub proposed_batches: u64, - /// Number of proven batches waiting for block inclusion. - #[prost(uint64, tag = "3")] - pub proven_batches: u64, -} -/// Represents the result of submitting proven transaction. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SubmitProvenTransactionResponse { - /// The node's current block height. - #[prost(fixed32, tag = "1")] - pub block_height: u32, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SubmitProvenBatchResponse { - /// The node's current block height. - #[prost(fixed32, tag = "1")] - pub block_height: u32, -} /// Request to subscribe to mempool events. #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct MempoolSubscriptionRequest { @@ -208,7 +163,7 @@ pub mod api_client { &mut self, request: impl tonic::IntoRequest<()>, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -227,14 +182,14 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("block_producer.Api", "Status")); self.inner.unary(req, path, codec).await } - /// Submits proven transaction to the Miden network + /// Submits proven transaction to the Miden network. Returns the node's current block height. pub async fn submit_proven_transaction( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransaction, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -266,13 +221,15 @@ pub mod api_client { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. pub async fn submit_proven_batch( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransactionBatch, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -346,15 +303,15 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; - /// Submits proven transaction to the Miden network + /// Submits proven transaction to the Miden network. Returns the node's current block height. async fn submit_proven_transaction( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Submits a proven batch to the Miden network. @@ -367,11 +324,13 @@ pub mod api_server { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. async fn submit_proven_batch( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Server streaming response type for the MempoolSubscription method. @@ -478,7 +437,7 @@ pub mod api_server { #[allow(non_camel_case_types)] struct StatusSvc(pub Arc); impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::BlockProducerStatus; + type Response = super::super::rpc::BlockProducerStatus; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -521,7 +480,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransaction, > for SubmitProvenTransactionSvc { - type Response = super::SubmitProvenTransactionResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -569,7 +528,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransactionBatch, > for SubmitProvenBatchSvc { - type Response = super::SubmitProvenBatchResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, diff --git a/crates/proto/src/generated/block_producer_store.rs b/crates/proto/src/generated/block_producer_store.rs deleted file mode 100644 index 3603ca50cb..0000000000 --- a/crates/proto/src/generated/block_producer_store.rs +++ /dev/null @@ -1,789 +0,0 @@ -// This file is @generated by prost-build. -/// Returns data required to prove the next block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputsRequest { - /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - #[prost(message, repeated, tag = "1")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. - /// - /// Due to note erasure it will generally not be possible to know the exact set of nullifiers - /// a block will create, unless we pre-execute note erasure. So in practice, this set of - /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a - /// superset of the nullifiers the block may create. - /// - /// However, if it is known that a certain note will be erased, it would not be necessary to - /// provide a nullifier witness for it. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, - /// Array of block numbers referenced by all batches in the block. - #[prost(fixed32, repeated, tag = "4")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting block inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputs { - /// The latest block header. - #[prost(message, optional, tag = "1")] - pub latest_block_header: ::core::option::Option, - /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - /// the store**. - #[prost(message, repeated, tag = "2")] - pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< - super::note::NoteInclusionInBlockProof, - >, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the requested blocks - /// referenced by the batches in the block. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, - /// The state commitments of the requested accounts and their authentication paths. - #[prost(message, repeated, tag = "4")] - pub account_witnesses: ::prost::alloc::vec::Vec, - /// The requested nullifiers and their authentication paths. - #[prost(message, repeated, tag = "5")] - pub nullifier_witnesses: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `BlockInputs`. -pub mod block_inputs { - /// A nullifier returned as a response to the `GetBlockInputs`. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NullifierWitness { - /// The nullifier. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. - #[prost(message, optional, tag = "2")] - pub opening: ::core::option::Option, - } -} -/// Returns the inputs for a transaction batch. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputsRequest { - /// List of unauthenticated note commitments to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub note_commitments: ::prost::alloc::vec::Vec, - /// Set of block numbers referenced by transactions. - #[prost(fixed32, repeated, tag = "2")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting batch inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputs { - /// The block header that the transaction batch should reference. - #[prost(message, optional, tag = "1")] - pub batch_reference_block_header: ::core::option::Option< - super::blockchain::BlockHeader, - >, - /// Proof of each *found* unauthenticated note's inclusion in a block. - #[prost(message, repeated, tag = "2")] - pub note_proofs: ::prost::alloc::vec::Vec, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced - /// by the transactions in the batch. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, -} -/// Returns data required to validate a new transaction. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputsRequest { - /// ID of the account against which a transaction is executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of nullifiers consumed by this transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Set of unauthenticated note commitments to check for existence on-chain. - /// - /// These are notes which were not on-chain at the state the transaction was proven, - /// but could by now be present. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting transaction inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputs { - /// Account state proof. - #[prost(message, optional, tag = "1")] - pub account_state: ::core::option::Option< - transaction_inputs::AccountTransactionInputRecord, - >, - /// List of nullifiers that have been consumed. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec< - transaction_inputs::NullifierTransactionInputRecord, - >, - /// List of unauthenticated notes that were not found in the database. - #[prost(message, repeated, tag = "3")] - pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, - /// The node's current block height. - #[prost(fixed32, tag = "4")] - pub block_height: u32, - /// Whether the account ID prefix is unique. Only relevant for account creation requests. - /// - /// TODO: Replace this with an error. When a general error message exists. - #[prost(bool, optional, tag = "5")] - pub new_account_id_prefix_is_unique: ::core::option::Option, -} -/// Nested message and enum types in `TransactionInputs`. -pub mod transaction_inputs { - /// An account returned as a response to the `GetTransactionInputs`. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct AccountTransactionInputRecord { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The latest account commitment, zero commitment if the account doesn't exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - } - /// A nullifier returned as a response to the `GetTransactionInputs`. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierTransactionInputRecord { - /// The nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The block at which the nullifier has been consumed, zero if not consumed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Generated client implementations. -pub mod block_producer_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the BlockProducer component - #[derive(Debug, Clone)] - pub struct BlockProducerClient { - inner: tonic::client::Grpc, - } - impl BlockProducerClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl BlockProducerClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> BlockProducerClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - BlockProducerClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Applies changes of a new block to the DB and in-memory data structures. - pub async fn apply_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/ApplyBlock", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("block_producer_store.BlockProducer", "ApplyBlock"), - ); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBlockHeaderByNumber", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to prove the next block. - pub async fn get_block_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBlockInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBlockInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the inputs for a transaction batch. - pub async fn get_batch_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBatchInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBatchInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to validate a new transaction. - pub async fn get_transaction_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetTransactionInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetTransactionInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod block_producer_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. - #[async_trait] - pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { - /// Applies changes of a new block to the DB and in-memory data structures. - async fn apply_block( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns data required to prove the next block. - async fn get_block_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns the inputs for a transaction batch. - async fn get_batch_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns data required to validate a new transaction. - async fn get_transaction_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the BlockProducer component - #[derive(Debug)] - pub struct BlockProducerServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl BlockProducerServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for BlockProducerServer - where - T: BlockProducer, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/block_producer_store.BlockProducer/ApplyBlock" => { - #[allow(non_camel_case_types)] - struct ApplyBlockSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for ApplyBlockSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::apply_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ApplyBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBlockInputs" => { - #[allow(non_camel_case_types)] - struct GetBlockInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBlockInputsSvc { - type Response = super::BlockInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBatchInputs" => { - #[allow(non_camel_case_types)] - struct GetBatchInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBatchInputsSvc { - type Response = super::BatchInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_batch_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBatchInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetTransactionInputs" => { - #[allow(non_camel_case_types)] - struct GetTransactionInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetTransactionInputsSvc { - type Response = super::TransactionInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_transaction_inputs( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetTransactionInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for BlockProducerServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "block_producer_store.BlockProducer"; - impl tonic::server::NamedService for BlockProducerServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index ab0567476f..61e3a53790 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -3,14 +3,11 @@ pub mod account; pub mod block_producer; -pub mod block_producer_store; pub mod blockchain; pub mod note; -pub mod ntx_builder_store; pub mod primitives; pub mod remote_prover; pub mod rpc; -pub mod rpc_store; -pub mod shared; +pub mod store; pub mod transaction; pub mod validator; diff --git a/crates/proto/src/generated/ntx_builder_store.rs b/crates/proto/src/generated/ntx_builder_store.rs deleted file mode 100644 index 3beb83076d..0000000000 --- a/crates/proto/src/generated/ntx_builder_store.rs +++ /dev/null @@ -1,843 +0,0 @@ -// This file is @generated by prost-build. -/// Account ID prefix. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountIdPrefix { - /// Account ID prefix. - #[prost(fixed32, tag = "1")] - pub account_id_prefix: u32, -} -/// Represents the result of getting network account details by prefix. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeAccountDetails { - /// Account details. - #[prost(message, optional, tag = "1")] - pub details: ::core::option::Option, -} -/// Returns a list of unconsumed network notes using pagination. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesRequest { - /// An opaque token used to paginate through the notes. - /// - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, -} -/// Returns a paginated list of unconsumed network notes for an account. -/// -/// Notes created or consumed after the specified block are excluded from the result. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesForAccountRequest { - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - /// - /// Note that this token is only valid if used with the same parameters. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, - /// The network account ID prefix to filter notes by. - #[prost(uint32, tag = "3")] - pub network_account_id_prefix: u32, - /// The block number to filter the returned notes by. - /// - /// Notes that are created or consumed after this block are excluded from the result. - #[prost(fixed32, tag = "4")] - pub block_num: u32, -} -/// Represents the result of getting the unconsumed network notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnconsumedNetworkNotes { - /// An opaque pagination token. - /// - /// Use this in your next request to get the next - /// set of data. - /// - /// Will be null once there is no more data remaining. - #[prost(uint64, optional, tag = "1")] - pub next_token: ::core::option::Option, - /// The list of unconsumed network notes. - #[prost(message, repeated, tag = "2")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Current blockchain data based on the requested block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CurrentBlockchainData { - /// Commitments that represent the current state according to the MMR. - #[prost(message, repeated, tag = "1")] - pub current_peaks: ::prost::alloc::vec::Vec, - /// Current block header. - #[prost(message, optional, tag = "2")] - pub current_block_header: ::core::option::Option, -} -/// Generated client implementations. -pub mod ntx_builder_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the network transaction builder component - #[derive(Debug, Clone)] - pub struct NtxBuilderClient { - inner: tonic::client::Grpc, - } - impl NtxBuilderClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl NtxBuilderClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> NtxBuilderClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetBlockHeaderByNumber", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of unconsumed network notes. - pub async fn get_unconsumed_network_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetUnconsumedNetworkNotes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - pub async fn get_unconsumed_network_notes_for_account( - &mut self, - request: impl tonic::IntoRequest< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotesForAccount", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetUnconsumedNetworkNotesForAccount", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - pub async fn get_current_blockchain_data( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetCurrentBlockchainData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetCurrentBlockchainData", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of a network account with the specified account prefix. - pub async fn get_network_account_details_by_prefix( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetNetworkAccountDetailsByPrefix", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetNetworkAccountDetailsByPrefix", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetNoteScriptByRoot", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod ntx_builder_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. - #[async_trait] - pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of unconsumed network notes. - async fn get_unconsumed_network_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - async fn get_unconsumed_network_notes_for_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - async fn get_current_blockchain_data( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of a network account with the specified account prefix. - async fn get_network_account_details_by_prefix( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the network transaction builder component - #[derive(Debug)] - pub struct NtxBuilderServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl NtxBuilderServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for NtxBuilderServer - where - T: NtxBuilder, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/ntx_builder_store.NtxBuilder/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotes" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetUnconsumedNetworkNotesSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotesForAccount" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesForAccountSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::UnconsumedNetworkNotesForAccountRequest, - > for GetUnconsumedNetworkNotesForAccountSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes_for_account( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesForAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetCurrentBlockchainData" => { - #[allow(non_camel_case_types)] - struct GetCurrentBlockchainDataSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::blockchain::MaybeBlockNumber, - > for GetCurrentBlockchainDataSvc { - type Response = super::CurrentBlockchainData; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::MaybeBlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_current_blockchain_data( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetCurrentBlockchainDataSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByPrefixSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByPrefixSvc { - type Response = super::MaybeAccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_details_by_prefix( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountDetailsByPrefixSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for NtxBuilderServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "ntx_builder_store.NtxBuilder"; - impl tonic::server::NamedService for NtxBuilderServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 5c6a4ce4f1..0e443d09a1 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -10,13 +10,539 @@ pub struct RpcStatus { pub genesis_commitment: ::core::option::Option, /// The store status. #[prost(message, optional, tag = "3")] - pub store: ::core::option::Option, + pub store: ::core::option::Option, /// The block producer status. #[prost(message, optional, tag = "4")] - pub block_producer: ::core::option::Option< - super::block_producer::BlockProducerStatus, + pub block_producer: ::core::option::Option, +} +/// Represents the status of the block producer. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockProducerStatus { + /// The block producer's running version. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// The block producer's status. + #[prost(string, tag = "2")] + pub status: ::prost::alloc::string::String, + /// The block producer's current view of the chain tip height. + /// + /// This is the height of the latest block that the block producer considers + /// to be part of the canonical chain. + #[prost(fixed32, tag = "4")] + pub chain_tip: u32, + /// Statistics about the mempool. + #[prost(message, optional, tag = "3")] + pub mempool_stats: ::core::option::Option, +} +/// Statistics about the mempool. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MempoolStats { + /// Number of transactions currently in the mempool waiting to be batched. + #[prost(uint64, tag = "1")] + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + #[prost(uint64, tag = "2")] + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + #[prost(uint64, tag = "3")] + pub proven_batches: u64, +} +/// Represents the status of the store. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StoreStatus { + /// The store's running version. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// The store's status. + #[prost(string, tag = "2")] + pub status: ::prost::alloc::string::String, + /// Number of the latest block in the chain. + #[prost(fixed32, tag = "3")] + pub chain_tip: u32, +} +/// Returns the block header corresponding to the requested block number, as well as the merkle +/// path and current forest which validate the block's inclusion in the chain. +/// +/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockHeaderByNumberRequest { + /// The target block height, defaults to latest if not provided. + #[prost(uint32, optional, tag = "1")] + pub block_num: ::core::option::Option, + /// Whether or not to return authentication data for the block header. + #[prost(bool, optional, tag = "2")] + pub include_mmr_proof: ::core::option::Option, +} +/// Represents the result of getting a block header by block number. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockHeaderByNumberResponse { + /// The requested block header. + #[prost(message, optional, tag = "1")] + pub block_header: ::core::option::Option, + /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + #[prost(message, optional, tag = "2")] + pub mmr_path: ::core::option::Option, + /// Current chain length. + #[prost(fixed32, optional, tag = "3")] + pub chain_length: ::core::option::Option, +} +/// Represents a note script or nothing. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MaybeNoteScript { + /// The script for a note by its root. + #[prost(message, optional, tag = "1")] + pub script: ::core::option::Option, +} +/// Returns the latest state proof of the specified account. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountProofRequest { + /// ID of the account for which we want to get data + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Optional block height at which to return the proof. + /// + /// Defaults to current chain tip if unspecified. + #[prost(message, optional, tag = "2")] + pub block_num: ::core::option::Option, + /// Request for additional account details; valid only for public accounts. + #[prost(message, optional, tag = "3")] + pub details: ::core::option::Option, +} +/// Nested message and enum types in `AccountProofRequest`. +pub mod account_proof_request { + /// Request the details for a public account. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDetailRequest { + /// Last known code commitment to the requester. The response will include account code + /// only if its commitment is different from this value. + /// + /// If the field is ommiteed, the response will not include the account code. + #[prost(message, optional, tag = "1")] + pub code_commitment: ::core::option::Option, + /// Last known asset vault commitment to the requester. The response will include asset vault data + /// only if its commitment is different from this value. If the value is not present in the + /// request, the response will not contain one either. + /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested + /// separately, which is signaled in the response message with dedicated flag. + #[prost(message, optional, tag = "2")] + pub asset_vault_commitment: ::core::option::Option< + super::super::primitives::Digest, + >, + /// Additional request per storage map. + #[prost(message, repeated, tag = "3")] + pub storage_maps: ::prost::alloc::vec::Vec< + account_detail_request::StorageMapDetailRequest, + >, + } + /// Nested message and enum types in `AccountDetailRequest`. + pub mod account_detail_request { + /// Represents a storage slot index and the associated map keys. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageMapDetailRequest { + /// Storage slot index (`\[0..255\]`). + #[prost(uint32, tag = "1")] + pub slot_index: u32, + #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] + pub slot_data: ::core::option::Option, + } + /// Nested message and enum types in `StorageMapDetailRequest`. + pub mod storage_map_detail_request { + /// Indirection required for use in `oneof {..}` block. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MapKeys { + /// A list of map keys associated with this storage slot. + #[prost(message, repeated, tag = "1")] + pub map_keys: ::prost::alloc::vec::Vec< + super::super::super::super::primitives::Digest, + >, + } + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum SlotData { + /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + /// the response will not contain them but must be requested separately. + #[prost(bool, tag = "2")] + AllEntries(bool), + /// A list of map keys associated with the given storage slot identified by `slot_index`. + #[prost(message, tag = "3")] + MapKeys(MapKeys), + } + } + } +} +/// Represents the result of getting account proof. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountProofResponse { + /// The block number at which the account witness was created and the account details were observed. + #[prost(message, optional, tag = "1")] + pub block_num: ::core::option::Option, + /// Account ID, current state commitment, and SMT path. + #[prost(message, optional, tag = "2")] + pub witness: ::core::option::Option, + /// Additional details for public accounts. + #[prost(message, optional, tag = "3")] + pub details: ::core::option::Option, +} +/// Nested message and enum types in `AccountProofResponse`. +pub mod account_proof_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDetails { + /// Account header. + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Account storage data + #[prost(message, optional, tag = "2")] + pub storage_details: ::core::option::Option, + /// Account code; empty if code commitments matched or none was requested. + #[prost(bytes = "vec", optional, tag = "3")] + pub code: ::core::option::Option<::prost::alloc::vec::Vec>, + /// Account asset vault data; empty if vault commitments matched or the requester + /// omitted it in the request. + #[prost(message, optional, tag = "4")] + pub vault_details: ::core::option::Option, + } +} +/// Account vault details for AccountProofResponse +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountVaultDetails { + /// A flag that is set to true if the account contains too many assets. This indicates + /// to the user that `SyncAccountVault` endpoint should be used to retrieve the + /// account's assets + #[prost(bool, tag = "1")] + pub too_many_assets: bool, + /// When too_many_assets == false, this will contain the list of assets in the + /// account's vault + #[prost(message, repeated, tag = "2")] + pub assets: ::prost::alloc::vec::Vec, +} +/// Account storage details for AccountProofResponse +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountStorageDetails { + /// Account storage header (storage slot info for up to 256 slots) + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Additional data for the requested storage maps + #[prost(message, repeated, tag = "2")] + pub map_details: ::prost::alloc::vec::Vec< + account_storage_details::AccountStorageMapDetails, >, } +/// Nested message and enum types in `AccountStorageDetails`. +pub mod account_storage_details { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountStorageMapDetails { + /// slot index of the storage map + #[prost(uint32, tag = "1")] + pub slot_index: u32, + /// A flag that is set to `true` if the number of to-be-returned entries in the + /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` + /// endpoint should be used to get all storage map data. + #[prost(bool, tag = "2")] + pub too_many_entries: bool, + /// By default we provide all storage entries. + #[prost(message, optional, tag = "3")] + pub entries: ::core::option::Option, + } + /// Nested message and enum types in `AccountStorageMapDetails`. + pub mod account_storage_map_details { + /// Wrapper for repeated storage map entries + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MapEntries { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + } + /// Nested message and enum types in `MapEntries`. + pub mod map_entries { + /// Definition of individual storage entries. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct StorageMapEntry { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + } + } + } +} +/// List of nullifiers to return proofs for. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NullifierList { + /// List of nullifiers to return proofs for. + #[prost(message, repeated, tag = "1")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Represents the result of checking nullifiers. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckNullifiersResponse { + /// Each requested nullifier has its corresponding nullifier proof at the same position. + #[prost(message, repeated, tag = "1")] + pub proofs: ::prost::alloc::vec::Vec, +} +/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncNullifiersRequest { + /// Block number from which the nullifiers are requested (inclusive). + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Number of bits used for nullifier prefix. Currently the only supported value is 16. + #[prost(uint32, tag = "2")] + pub prefix_len: u32, + /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal + /// to `prefix_len`. + #[prost(uint32, repeated, tag = "3")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing nullifiers. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncNullifiersResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of nullifiers matching the prefixes specified in the request. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `SyncNullifiersResponse`. +pub mod sync_nullifiers_response { + /// Represents a single nullifier update. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct NullifierUpdate { + /// Nullifier ID. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// Block number. + #[prost(fixed32, tag = "2")] + pub block_num: u32, + } +} +/// Account vault synchronization request. +/// +/// Allows requesters to sync asset values for specific public accounts within a block range. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncAccountVaultRequest { + /// Block range from which to start synchronizing. + /// + /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + /// otherwise an error will be returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Account for which we want to sync asset vault. + #[prost(message, optional, tag = "2")] + pub account_id: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncAccountVaultResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of asset updates for the account. + /// + /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` + /// is expected to be retained by the caller. + #[prost(message, repeated, tag = "2")] + pub updates: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct AccountVaultUpdate { + /// Vault key associated with the asset. + #[prost(message, optional, tag = "1")] + pub vault_key: ::core::option::Option, + /// Asset value related to the vault key. + /// If not present, the asset was removed from the vault. + #[prost(message, optional, tag = "2")] + pub asset: ::core::option::Option, + /// Block number at which the above asset was updated in the account vault. + #[prost(fixed32, tag = "3")] + pub block_num: u32, +} +/// Note synchronization request. +/// +/// Specifies note tags that requester is interested in. The server will return the first block which +/// contains a note matching `note_tags` or the chain tip. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncNotesRequest { + /// Block range from which to start synchronizing. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Specifies the tags which the requester is interested in. + #[prost(fixed32, repeated, tag = "2")] + pub note_tags: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing notes request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncNotesResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// Block header of the block with the first note matching the specified criteria. + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + /// + /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of + /// an MMR of forest `chain_tip` with this path. + #[prost(message, optional, tag = "3")] + pub mmr_path: ::core::option::Option, + /// List of all notes together with the Merkle paths from `response.block_header.note_root`. + #[prost(message, repeated, tag = "4")] + pub notes: ::prost::alloc::vec::Vec, +} +/// State synchronization request. +/// +/// Specifies state updates the requester is interested in. The server will return the first block which +/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to +/// `account_ids` for that block range. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateRequest { + /// Last block known by the requester. The response will contain data starting from the next block, + /// until the first block which contains a note of matching the requested tag, or the chain tip + /// if there are no notes. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// Accounts' commitment to include in the response. + /// + /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is + /// possible there was an update to the account for the given range, but if it is not the latest, + /// it won't be included in the response. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Specifies the tags which the requester is interested in. + #[prost(fixed32, repeated, tag = "3")] + pub note_tags: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing state request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateResponse { + /// Number of the latest block in the chain. + #[prost(fixed32, tag = "1")] + pub chain_tip: u32, + /// Block header of the block with the first note matching the specified criteria. + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. + #[prost(message, optional, tag = "3")] + pub mmr_delta: ::core::option::Option, + /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. + #[prost(message, repeated, tag = "5")] + pub accounts: ::prost::alloc::vec::Vec, + /// List of transactions executed against requested accounts between `request.block_num + 1` and + /// `response.block_header.block_num`. + #[prost(message, repeated, tag = "6")] + pub transactions: ::prost::alloc::vec::Vec, + /// List of all notes together with the Merkle paths from `response.block_header.note_root`. + #[prost(message, repeated, tag = "7")] + pub notes: ::prost::alloc::vec::Vec, +} +/// Storage map synchronization request. +/// +/// Allows requesters to sync storage map values for specific public accounts within a block range, +/// with support for cursor-based pagination to handle large storage maps. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncStorageMapsRequest { + /// Block range from which to start synchronizing. + /// + /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + /// otherwise an error will be returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Account for which we want to sync storage maps. + #[prost(message, optional, tag = "3")] + pub account_id: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStorageMapsResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// The list of storage map updates. + /// + /// Multiple updates can be returned for a single slot index and key combination, and the one + /// with a higher `block_num` is expected to be retained by the caller. + #[prost(message, repeated, tag = "2")] + pub updates: ::prost::alloc::vec::Vec, +} +/// Represents a single storage map update. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StorageMapUpdate { + /// Block number in which the slot was updated. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// Slot index (\[0..255\]). + #[prost(uint32, tag = "2")] + pub slot_index: u32, + /// The storage map key. + #[prost(message, optional, tag = "3")] + pub key: ::core::option::Option, + /// The storage map value. + #[prost(message, optional, tag = "4")] + pub value: ::core::option::Option, +} +/// Represents a block range. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockRange { + /// Block number from which to start (inclusive). + #[prost(fixed32, tag = "1")] + pub block_from: u32, + /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. + #[prost(fixed32, optional, tag = "2")] + pub block_to: ::core::option::Option, +} +/// Represents pagination information for chunked responses. +/// +/// Pagination is done using block numbers as the axis, allowing requesters to request +/// data in chunks by specifying block ranges and continuing from where the previous +/// response left off. +/// +/// To request the next chunk, the requester should use `block_num + 1` from the previous response +/// as the `block_from` for the next request. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct PaginationInfo { + /// Current chain tip + #[prost(fixed32, tag = "1")] + pub chain_tip: u32, + /// The block number of the last check included in this response. + /// + /// For chunked responses, this may be less than `request.block_range.block_to`. + /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request + /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + #[prost(fixed32, tag = "2")] + pub block_num: u32, +} +/// Transactions synchronization request. +/// +/// Allows requesters to sync transactions for specific accounts within a block range. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncTransactionsRequest { + /// Block range from which to start synchronizing. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Accounts to sync transactions for. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing transactions request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncTransactionsResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of transaction records. + #[prost(message, repeated, tag = "2")] + pub transactions: ::prost::alloc::vec::Vec, +} +/// Represents a transaction record. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionRecord { + /// Block number in which the transaction was included. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// A transaction header. + #[prost(message, optional, tag = "2")] + pub header: ::core::option::Option, +} /// Generated client implementations. pub mod api_client { #![allow( @@ -131,9 +657,9 @@ pub mod api_client { /// Returns a nullifier proof for each of the requested nullifiers. pub async fn check_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -177,11 +703,9 @@ pub mod api_client { /// Returns the latest state proof of the specified account. pub async fn get_account_proof( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::AccountProofRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -224,11 +748,9 @@ pub mod api_client { /// and current chain length to authenticate the block's inclusion. pub async fn get_block_header_by_number( &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -275,7 +797,7 @@ pub mod api_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -295,16 +817,14 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "GetNoteScriptByRoot")); self.inner.unary(req, path, codec).await } - /// Submits proven transaction to the Miden network. + /// Submits proven transaction to the Miden network. Returns the node's current block height. pub async fn submit_proven_transaction( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransaction, >, ) -> std::result::Result< - tonic::Response< - super::super::block_producer::SubmitProvenTransactionResponse, - >, + tonic::Response, tonic::Status, > { self.inner @@ -334,13 +854,15 @@ pub mod api_client { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. pub async fn submit_proven_batch( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransactionBatch, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -364,11 +886,9 @@ pub mod api_client { /// Note that only 16-bit prefixes are supported at this time. pub async fn sync_nullifiers( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncNullifiersRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -388,11 +908,9 @@ pub mod api_client { /// Returns account vault updates for specified account within a block range. pub async fn sync_account_vault( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncAccountVaultRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -420,9 +938,9 @@ pub mod api_client { /// tip of the chain. pub async fn sync_notes( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -456,9 +974,9 @@ pub mod api_client { /// additional filtering of that data on its side. pub async fn sync_state( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -478,11 +996,9 @@ pub mod api_client { /// Returns storage map updates for specified account and storage slots within a block range. pub async fn sync_storage_maps( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncStorageMapsRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -502,11 +1018,9 @@ pub mod api_client { /// Returns transactions records for specific accounts within a block range. pub async fn sync_transactions( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncTransactionsRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -546,9 +1060,9 @@ pub mod api_server { /// Returns a nullifier proof for each of the requested nullifiers. async fn check_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns the latest state of an account with the specified ID. @@ -562,9 +1076,9 @@ pub mod api_server { /// Returns the latest state proof of the specified account. async fn get_account_proof( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns raw block data for the specified block number. @@ -579,9 +1093,9 @@ pub mod api_server { /// and current chain length to authenticate the block's inclusion. async fn get_block_header_by_number( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns a list of notes matching the provided note IDs. @@ -596,18 +1110,13 @@ pub mod api_server { async fn get_note_script_by_root( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits proven transaction to the Miden network. + ) -> std::result::Result, tonic::Status>; + /// Submits proven transaction to the Miden network. Returns the node's current block height. async fn submit_proven_transaction( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response< - super::super::block_producer::SubmitProvenTransactionResponse, - >, + tonic::Response, tonic::Status, >; /// Submits a proven batch of transactions to the Miden network. @@ -620,11 +1129,13 @@ pub mod api_server { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. async fn submit_proven_batch( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. @@ -632,17 +1143,17 @@ pub mod api_server { /// Note that only 16-bit prefixes are supported at this time. async fn sync_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns account vault updates for specified account within a block range. async fn sync_account_vault( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. @@ -656,9 +1167,9 @@ pub mod api_server { /// tip of the chain. async fn sync_notes( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the latest state of the chain @@ -678,25 +1189,25 @@ pub mod api_server { /// additional filtering of that data on its side. async fn sync_state( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns storage map updates for specified account and storage slots within a block range. async fn sync_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns transactions records for specific accounts within a block range. async fn sync_transactions( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; } @@ -819,20 +1330,16 @@ pub mod api_server { "/rpc.Api/CheckNullifiers" => { #[allow(non_camel_case_types)] struct CheckNullifiersSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService + impl tonic::server::UnaryService for CheckNullifiersSvc { - type Response = super::super::rpc_store::CheckNullifiersResponse; + type Response = super::CheckNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::NullifierList, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -911,21 +1418,16 @@ pub mod api_server { "/rpc.Api/GetAccountProof" => { #[allow(non_camel_case_types)] struct GetAccountProofSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::AccountProofRequest, - > for GetAccountProofSvc { - type Response = super::super::rpc_store::AccountProofResponse; + impl tonic::server::UnaryService + for GetAccountProofSvc { + type Response = super::AccountProofResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::AccountProofRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1008,19 +1510,16 @@ pub mod api_server { struct GetBlockHeaderByNumberSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; + > tonic::server::UnaryService + for GetBlockHeaderByNumberSvc { + type Response = super::BlockHeaderByNumberResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1104,7 +1603,7 @@ pub mod api_server { T: Api, > tonic::server::UnaryService for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; + type Response = super::MaybeNoteScript; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1150,7 +1649,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransaction, > for SubmitProvenTransactionSvc { - type Response = super::super::block_producer::SubmitProvenTransactionResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1198,7 +1697,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransactionBatch, > for SubmitProvenBatchSvc { - type Response = super::super::block_producer::SubmitProvenBatchResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1243,19 +1742,16 @@ pub mod api_server { struct SyncNullifiersSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncNullifiersRequest, - > for SyncNullifiersSvc { - type Response = super::super::rpc_store::SyncNullifiersResponse; + > tonic::server::UnaryService + for SyncNullifiersSvc { + type Response = super::SyncNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncNullifiersRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1291,19 +1787,16 @@ pub mod api_server { struct SyncAccountVaultSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncAccountVaultRequest, - > for SyncAccountVaultSvc { - type Response = super::super::rpc_store::SyncAccountVaultResponse; + > tonic::server::UnaryService + for SyncAccountVaultSvc { + type Response = super::SyncAccountVaultResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncAccountVaultRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1337,21 +1830,16 @@ pub mod api_server { "/rpc.Api/SyncNotes" => { #[allow(non_camel_case_types)] struct SyncNotesSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncNotesRequest, - > for SyncNotesSvc { - type Response = super::super::rpc_store::SyncNotesResponse; + impl tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::SyncNotesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncNotesRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1385,21 +1873,16 @@ pub mod api_server { "/rpc.Api/SyncState" => { #[allow(non_camel_case_types)] struct SyncStateSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncStateRequest, - > for SyncStateSvc { - type Response = super::super::rpc_store::SyncStateResponse; + impl tonic::server::UnaryService + for SyncStateSvc { + type Response = super::SyncStateResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncStateRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1435,19 +1918,16 @@ pub mod api_server { struct SyncStorageMapsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncStorageMapsRequest, - > for SyncStorageMapsSvc { - type Response = super::super::rpc_store::SyncStorageMapsResponse; + > tonic::server::UnaryService + for SyncStorageMapsSvc { + type Response = super::SyncStorageMapsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncStorageMapsRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1483,19 +1963,16 @@ pub mod api_server { struct SyncTransactionsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncTransactionsRequest, - > for SyncTransactionsSvc { - type Response = super::super::rpc_store::SyncTransactionsResponse; + > tonic::server::UnaryService + for SyncTransactionsSvc { + type Response = super::SyncTransactionsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncTransactionsRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { diff --git a/crates/proto/src/generated/rpc_store.rs b/crates/proto/src/generated/rpc_store.rs deleted file mode 100644 index c962475044..0000000000 --- a/crates/proto/src/generated/rpc_store.rs +++ /dev/null @@ -1,1810 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the store. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StoreStatus { - /// The store's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The store's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "3")] - pub chain_tip: u32, -} -/// Returns the latest state proof of the specified account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofRequest { - /// ID of the account for which we want to get data - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Optional block height at which to return the proof. - /// - /// Defaults to current chain tip if unspecified. - #[prost(message, optional, tag = "2")] - pub block_num: ::core::option::Option, - /// Request for additional account details; valid only for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountProofRequest`. -pub mod account_proof_request { - /// Request the details for a public account. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetailRequest { - /// Last known code commitment to the requester. The response will include account code - /// only if its commitment is different from this value. - /// - /// If the field is ommiteed, the response will not include the account code. - #[prost(message, optional, tag = "1")] - pub code_commitment: ::core::option::Option, - /// Last known asset vault commitment to the requester. The response will include asset vault data - /// only if its commitment is different from this value. If the value is not present in the - /// request, the response will not contain one either. - /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested - /// separately, which is signaled in the response message with dedicated flag. - #[prost(message, optional, tag = "2")] - pub asset_vault_commitment: ::core::option::Option< - super::super::primitives::Digest, - >, - /// Additional request per storage map. - #[prost(message, repeated, tag = "3")] - pub storage_maps: ::prost::alloc::vec::Vec< - account_detail_request::StorageMapDetailRequest, - >, - } - /// Nested message and enum types in `AccountDetailRequest`. - pub mod account_detail_request { - /// Represents a storage slot index and the associated map keys. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapDetailRequest { - /// Storage slot index (`\[0..255\]`). - #[prost(uint32, tag = "1")] - pub slot_index: u32, - #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] - pub slot_data: ::core::option::Option, - } - /// Nested message and enum types in `StorageMapDetailRequest`. - pub mod storage_map_detail_request { - /// Indirection required for use in `oneof {..}` block. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapKeys { - /// A list of map keys associated with this storage slot. - #[prost(message, repeated, tag = "1")] - pub map_keys: ::prost::alloc::vec::Vec< - super::super::super::super::primitives::Digest, - >, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SlotData { - /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - /// the response will not contain them but must be requested separately. - #[prost(bool, tag = "2")] - AllEntries(bool), - /// A list of map keys associated with the given storage slot identified by `slot_index`. - #[prost(message, tag = "3")] - MapKeys(MapKeys), - } - } - } -} -/// Represents the result of getting account proof. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofResponse { - /// The block number at which the account witness was created and the account details were observed. - #[prost(message, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Account ID, current state commitment, and SMT path. - #[prost(message, optional, tag = "2")] - pub witness: ::core::option::Option, - /// Additional details for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountProofResponse`. -pub mod account_proof_response { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetails { - /// Account header. - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Account storage data - #[prost(message, optional, tag = "2")] - pub storage_details: ::core::option::Option, - /// Account code; empty if code commitments matched or none was requested. - #[prost(bytes = "vec", optional, tag = "3")] - pub code: ::core::option::Option<::prost::alloc::vec::Vec>, - /// Account asset vault data; empty if vault commitments matched or the requester - /// omitted it in the request. - #[prost(message, optional, tag = "4")] - pub vault_details: ::core::option::Option, - } -} -/// Account vault details for AccountProofResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountVaultDetails { - /// A flag that is set to true if the account contains too many assets. This indicates - /// to the user that `SyncAccountVault` endpoint should be used to retrieve the - /// account's assets - #[prost(bool, tag = "1")] - pub too_many_assets: bool, - /// When too_many_assets == false, this will contain the list of assets in the - /// account's vault - #[prost(message, repeated, tag = "2")] - pub assets: ::prost::alloc::vec::Vec, -} -/// Account storage details for AccountProofResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageDetails { - /// Account storage header (storage slot info for up to 256 slots) - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Additional data for the requested storage maps - #[prost(message, repeated, tag = "2")] - pub map_details: ::prost::alloc::vec::Vec< - account_storage_details::AccountStorageMapDetails, - >, -} -/// Nested message and enum types in `AccountStorageDetails`. -pub mod account_storage_details { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountStorageMapDetails { - /// slot index of the storage map - #[prost(uint32, tag = "1")] - pub slot_index: u32, - /// A flag that is set to `true` if the number of to-be-returned entries in the - /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - /// endpoint should be used to get all storage map data. - #[prost(bool, tag = "2")] - pub too_many_entries: bool, - /// By default we provide all storage entries. - #[prost(message, optional, tag = "3")] - pub entries: ::core::option::Option, - } - /// Nested message and enum types in `AccountStorageMapDetails`. - pub mod account_storage_map_details { - /// Wrapper for repeated storage map entries - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapEntries { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - } - /// Nested message and enum types in `MapEntries`. - pub mod map_entries { - /// Definition of individual storage entries. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageMapEntry { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - } - } - } -} -/// List of nullifiers to return proofs for. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NullifierList { - /// List of nullifiers to return proofs for. - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of checking nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNullifiersRequest { - /// Block number from which the nullifiers are requested (inclusive). - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Number of bits used for nullifier prefix. Currently the only supported value is 16. - #[prost(uint32, tag = "2")] - pub prefix_len: u32, - /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal - /// to `prefix_len`. - #[prost(uint32, repeated, tag = "3")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNullifiersResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of nullifiers matching the prefixes specified in the request. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SyncNullifiersResponse`. -pub mod sync_nullifiers_response { - /// Represents a single nullifier update. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierUpdate { - /// Nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// Block number. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// State synchronization request. -/// -/// Specifies state updates the requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -/// `account_ids` for that block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateRequest { - /// Last block known by the requester. The response will contain data starting from the next block, - /// until the first block which contains a note of matching the requested tag, or the chain tip - /// if there are no notes. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Accounts' commitment to include in the response. - /// - /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is - /// possible there was an update to the account for the given range, but if it is not the latest, - /// it won't be included in the response. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "3")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing state request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateResponse { - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - #[prost(message, optional, tag = "3")] - pub mmr_delta: ::core::option::Option, - /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - #[prost(message, repeated, tag = "5")] - pub accounts: ::prost::alloc::vec::Vec, - /// List of transactions executed against requested accounts between `request.block_num + 1` and - /// `response.block_header.block_num`. - #[prost(message, repeated, tag = "6")] - pub transactions: ::prost::alloc::vec::Vec, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "7")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Account vault synchronization request. -/// -/// Allows requesters to sync asset values for specific public accounts within a block range. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountVaultRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync asset vault. - #[prost(message, optional, tag = "2")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountVaultResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of asset updates for the account. - /// - /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` - /// is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountVaultUpdate { - /// Vault key associated with the asset. - #[prost(message, optional, tag = "1")] - pub vault_key: ::core::option::Option, - /// Asset value related to the vault key. - /// If not present, the asset was removed from the vault. - #[prost(message, optional, tag = "2")] - pub asset: ::core::option::Option, - /// Block number at which the above asset was updated in the account vault. - #[prost(fixed32, tag = "3")] - pub block_num: u32, -} -/// Note synchronization request. -/// -/// Specifies note tags that requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNotesRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "2")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing notes request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNotesResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - /// - /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of - /// an MMR of forest `chain_tip` with this path. - #[prost(message, optional, tag = "3")] - pub mmr_path: ::core::option::Option, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "4")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Storage map synchronization request. -/// -/// Allows requesters to sync storage map values for specific public accounts within a block range, -/// with support for cursor-based pagination to handle large storage maps. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncStorageMapsRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync storage maps. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStorageMapsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of storage map updates. - /// - /// Multiple updates can be returned for a single slot index and key combination, and the one - /// with a higher `block_num` is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -/// Represents a single storage map update. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapUpdate { - /// Block number in which the slot was updated. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Slot index (\[0..255\]). - #[prost(uint32, tag = "2")] - pub slot_index: u32, - /// The storage map key. - #[prost(message, optional, tag = "3")] - pub key: ::core::option::Option, - /// The storage map value. - #[prost(message, optional, tag = "4")] - pub value: ::core::option::Option, -} -/// Represents a block range. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockRange { - /// Block number from which to start (inclusive). - #[prost(fixed32, tag = "1")] - pub block_from: u32, - /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. - #[prost(fixed32, optional, tag = "2")] - pub block_to: ::core::option::Option, -} -/// Represents pagination information for chunked responses. -/// -/// Pagination is done using block numbers as the axis, allowing requesters to request -/// data in chunks by specifying block ranges and continuing from where the previous -/// response left off. -/// -/// To request the next chunk, the requester should use `block_num + 1` from the previous response -/// as the `block_from` for the next request. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct PaginationInfo { - /// Current chain tip - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// The block number of the last check included in this response. - /// - /// For chunked responses, this may be less than `request.block_range.block_to`. - /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request - /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Transactions synchronization request. -/// -/// Allows requesters to sync transactions for specific accounts within a block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Accounts to sync transactions for. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing transactions request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of transaction records. - #[prost(message, repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec, -} -/// Represents a transaction record. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionRecord { - /// Block number in which the transaction was included. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// A transaction header. - #[prost(message, optional, tag = "2")] - pub header: ::core::option::Option, -} -/// Generated client implementations. -pub mod rpc_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the RPC component - #[derive(Debug, Clone)] - pub struct RpcClient { - inner: tonic::client::Grpc, - } - impl RpcClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl RpcClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> RpcClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - RpcClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns a nullifier proof for each of the requested nullifiers. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/CheckNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of an account with the specified ID. - pub async fn get_account_details( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetAccountDetails", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetAccountDetails")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state proof of the specified account. - pub async fn get_account_proof( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetAccountProof", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetAccountProof")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetBlockByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of committed notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetNotesById", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - pub async fn sync_state( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/SyncState"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "SyncState")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncAccountVault", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_storage_maps( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncTransactions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod rpc_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. - #[async_trait] - pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Returns a nullifier proof for each of the requested nullifiers. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of an account with the specified ID. - async fn get_account_details( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state proof of the specified account. - async fn get_account_proof( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of committed notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - async fn sync_state( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the RPC component - #[derive(Debug)] - pub struct RpcServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl RpcServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for RpcServer - where - T: Rpc, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/rpc_store.Rpc/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::StoreStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetAccountDetails" => { - #[allow(non_camel_case_types)] - struct GetAccountDetailsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetAccountDetailsSvc { - type Response = super::super::account::AccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_details(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountDetailsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetAccountProof" => { - #[allow(non_camel_case_types)] - struct GetAccountProofSvc(pub Arc); - impl tonic::server::UnaryService - for GetAccountProofSvc { - type Response = super::AccountProofResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_proof(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountProofSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncState" => { - #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_state(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStateSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncStorageMapsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncStorageMapsSvc { - type Response = super::SyncStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for RpcServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "rpc_store.Rpc"; - impl tonic::server::NamedService for RpcServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/shared.rs b/crates/proto/src/generated/shared.rs deleted file mode 100644 index f79b9117e5..0000000000 --- a/crates/proto/src/generated/shared.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is @generated by prost-build. -/// Returns the block header corresponding to the requested block number, as well as the merkle -/// path and current forest which validate the block's inclusion in the chain. -/// -/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeaderByNumberRequest { - /// The target block height, defaults to latest if not provided. - #[prost(uint32, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Whether or not to return authentication data for the block header. - #[prost(bool, optional, tag = "2")] - pub include_mmr_proof: ::core::option::Option, -} -/// Represents the result of getting a block header by block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockHeaderByNumberResponse { - /// The requested block header. - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - #[prost(message, optional, tag = "2")] - pub mmr_path: ::core::option::Option, - /// Current chain length. - #[prost(fixed32, optional, tag = "3")] - pub chain_length: ::core::option::Option, -} -/// Represents a note script or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeNoteScript { - /// The script for a note by its root. - #[prost(message, optional, tag = "1")] - pub script: ::core::option::Option, -} diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs new file mode 100644 index 0000000000..351384033b --- /dev/null +++ b/crates/proto/src/generated/store.rs @@ -0,0 +1,2958 @@ +// This file is @generated by prost-build. +/// Returns data required to prove the next block. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockInputsRequest { + /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. + #[prost(message, repeated, tag = "1")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. + /// + /// Due to note erasure it will generally not be possible to know the exact set of nullifiers + /// a block will create, unless we pre-execute note erasure. So in practice, this set of + /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a + /// superset of the nullifiers the block may create. + /// + /// However, if it is known that a certain note will be erased, it would not be necessary to + /// provide a nullifier witness for it. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, + /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. + #[prost(message, repeated, tag = "3")] + pub unauthenticated_notes: ::prost::alloc::vec::Vec, + /// Array of block numbers referenced by all batches in the block. + #[prost(fixed32, repeated, tag = "4")] + pub reference_blocks: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting block inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockInputs { + /// The latest block header. + #[prost(message, optional, tag = "1")] + pub latest_block_header: ::core::option::Option, + /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in + /// the store**. + #[prost(message, repeated, tag = "2")] + pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< + super::note::NoteInclusionInBlockProof, + >, + /// The serialized chain MMR which includes proofs for all blocks referenced by the + /// above note inclusion proofs as well as proofs for inclusion of the requested blocks + /// referenced by the batches in the block. + #[prost(bytes = "vec", tag = "3")] + pub partial_block_chain: ::prost::alloc::vec::Vec, + /// The state commitments of the requested accounts and their authentication paths. + #[prost(message, repeated, tag = "4")] + pub account_witnesses: ::prost::alloc::vec::Vec, + /// The requested nullifiers and their authentication paths. + #[prost(message, repeated, tag = "5")] + pub nullifier_witnesses: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `BlockInputs`. +pub mod block_inputs { + /// A nullifier returned as a response to the `GetBlockInputs`. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct NullifierWitness { + /// The nullifier. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. + #[prost(message, optional, tag = "2")] + pub opening: ::core::option::Option, + } +} +/// Returns the inputs for a transaction batch. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchInputsRequest { + /// List of unauthenticated note commitments to be queried from the database. + #[prost(message, repeated, tag = "1")] + pub note_commitments: ::prost::alloc::vec::Vec, + /// Set of block numbers referenced by transactions. + #[prost(fixed32, repeated, tag = "2")] + pub reference_blocks: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting batch inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchInputs { + /// The block header that the transaction batch should reference. + #[prost(message, optional, tag = "1")] + pub batch_reference_block_header: ::core::option::Option< + super::blockchain::BlockHeader, + >, + /// Proof of each *found* unauthenticated note's inclusion in a block. + #[prost(message, repeated, tag = "2")] + pub note_proofs: ::prost::alloc::vec::Vec, + /// The serialized chain MMR which includes proofs for all blocks referenced by the + /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced + /// by the transactions in the batch. + #[prost(bytes = "vec", tag = "3")] + pub partial_block_chain: ::prost::alloc::vec::Vec, +} +/// Returns data required to validate a new transaction. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionInputsRequest { + /// ID of the account against which a transaction is executed. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Set of nullifiers consumed by this transaction. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, + /// Set of unauthenticated note commitments to check for existence on-chain. + /// + /// These are notes which were not on-chain at the state the transaction was proven, + /// but could by now be present. + #[prost(message, repeated, tag = "3")] + pub unauthenticated_notes: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting transaction inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionInputs { + /// Account state proof. + #[prost(message, optional, tag = "1")] + pub account_state: ::core::option::Option< + transaction_inputs::AccountTransactionInputRecord, + >, + /// List of nullifiers that have been consumed. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec< + transaction_inputs::NullifierTransactionInputRecord, + >, + /// List of unauthenticated notes that were not found in the database. + #[prost(message, repeated, tag = "3")] + pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, + /// The node's current block height. + #[prost(fixed32, tag = "4")] + pub block_height: u32, + /// Whether the account ID prefix is unique. Only relevant for account creation requests. + /// + /// TODO: Replace this with an error. When a general error message exists. + #[prost(bool, optional, tag = "5")] + pub new_account_id_prefix_is_unique: ::core::option::Option, +} +/// Nested message and enum types in `TransactionInputs`. +pub mod transaction_inputs { + /// An account returned as a response to the `GetTransactionInputs`. + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] + pub struct AccountTransactionInputRecord { + /// The account ID. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// The latest account commitment, zero commitment if the account doesn't exist. + #[prost(message, optional, tag = "2")] + pub account_commitment: ::core::option::Option, + } + /// A nullifier returned as a response to the `GetTransactionInputs`. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct NullifierTransactionInputRecord { + /// The nullifier ID. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// The block at which the nullifier has been consumed, zero if not consumed. + #[prost(fixed32, tag = "2")] + pub block_num: u32, + } +} +/// Account ID prefix. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct AccountIdPrefix { + /// Account ID prefix. + #[prost(fixed32, tag = "1")] + pub account_id_prefix: u32, +} +/// Represents the result of getting network account details by prefix. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MaybeAccountDetails { + /// Account details. + #[prost(message, optional, tag = "1")] + pub details: ::core::option::Option, +} +/// Returns a list of unconsumed network notes using pagination. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct UnconsumedNetworkNotesRequest { + /// An opaque token used to paginate through the notes. + /// + /// This should be null on the first call, and set to the response token until the response token + /// is null, at which point all data has been fetched. + #[prost(uint64, optional, tag = "1")] + pub page_token: ::core::option::Option, + /// Number of notes to retrieve per page. + #[prost(uint64, tag = "2")] + pub page_size: u64, +} +/// Returns a paginated list of unconsumed network notes for an account. +/// +/// Notes created or consumed after the specified block are excluded from the result. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct UnconsumedNetworkNotesForAccountRequest { + /// This should be null on the first call, and set to the response token until the response token + /// is null, at which point all data has been fetched. + /// + /// Note that this token is only valid if used with the same parameters. + #[prost(uint64, optional, tag = "1")] + pub page_token: ::core::option::Option, + /// Number of notes to retrieve per page. + #[prost(uint64, tag = "2")] + pub page_size: u64, + /// The network account ID prefix to filter notes by. + #[prost(uint32, tag = "3")] + pub network_account_id_prefix: u32, + /// The block number to filter the returned notes by. + /// + /// Notes that are created or consumed after this block are excluded from the result. + #[prost(fixed32, tag = "4")] + pub block_num: u32, +} +/// Represents the result of getting the unconsumed network notes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnconsumedNetworkNotes { + /// An opaque pagination token. + /// + /// Use this in your next request to get the next + /// set of data. + /// + /// Will be null once there is no more data remaining. + #[prost(uint64, optional, tag = "1")] + pub next_token: ::core::option::Option, + /// The list of unconsumed network notes. + #[prost(message, repeated, tag = "2")] + pub notes: ::prost::alloc::vec::Vec, +} +/// Current blockchain data based on the requested block number. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CurrentBlockchainData { + /// Commitments that represent the current state according to the MMR. + #[prost(message, repeated, tag = "1")] + pub current_peaks: ::prost::alloc::vec::Vec, + /// Current block header. + #[prost(message, optional, tag = "2")] + pub current_block_header: ::core::option::Option, +} +/// Generated client implementations. +pub mod rpc_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the RPC component + #[derive(Debug, Clone)] + pub struct RpcClient { + inner: tonic::client::Grpc, + } + impl RpcClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl RpcClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> RpcClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + RpcClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Returns the status info. + pub async fn status( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/Status"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "Status")); + self.inner.unary(req, path, codec).await + } + /// Returns a nullifier proof for each of the requested nullifiers. + pub async fn check_nullifiers( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/CheckNullifiers", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state of an account with the specified ID. + pub async fn get_account_details( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetAccountDetails", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetAccountDetails")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state proof of the specified account. + pub async fn get_account_proof( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetAccountProof", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccountProof")); + self.inner.unary(req, path, codec).await + } + /// Returns raw block data for the specified block number. + pub async fn get_block_by_number( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetBlockByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetBlockByNumber")); + self.inner.unary(req, path, codec).await + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetBlockHeaderByNumber")); + self.inner.unary(req, path, codec).await + } + /// Returns a list of committed notes matching the provided note IDs. + pub async fn get_notes_by_id( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetNotesById"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetNotesById")); + self.inner.unary(req, path, codec).await + } + /// Returns the script for a note by its root. + pub async fn get_note_script_by_root( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetNoteScriptByRoot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetNoteScriptByRoot")); + self.inner.unary(req, path, codec).await + } + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + /// + /// Note that only 16-bit prefixes are supported at this time. + pub async fn sync_nullifiers( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNullifiers"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNullifiers")); + self.inner.unary(req, path, codec).await + } + /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + /// + /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + /// tip of the chain. + pub async fn sync_notes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNotes"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); + self.inner.unary(req, path, codec).await + } + /// Returns info which can be used by the requester to sync up to the latest state of the chain + /// for the objects (accounts, notes, nullifiers) the requester is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. requester is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the requester is fully synchronized with the chain. + /// + /// Each request also returns info about new notes, nullifiers etc. created. It also returns + /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + /// MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags and nullifiers filters contain only high + /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + /// additional filtering of that data on its side. + pub async fn sync_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncState"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncState")); + self.inner.unary(req, path, codec).await + } + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncAccountVault", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncAccountVault")); + self.inner.unary(req, path, codec).await + } + /// Returns storage map updates for specified account and storage slots within a block range. + pub async fn sync_storage_maps( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncStorageMaps", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncStorageMaps")); + self.inner.unary(req, path, codec).await + } + /// Returns transactions records for specific accounts within a block range. + pub async fn sync_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncTransactions")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod rpc_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. + #[async_trait] + pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { + /// Returns the status info. + async fn status( + &self, + request: tonic::Request<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a nullifier proof for each of the requested nullifiers. + async fn check_nullifiers( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state of an account with the specified ID. + async fn get_account_details( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state proof of the specified account. + async fn get_account_proof( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns raw block data for the specified block number. + async fn get_block_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of committed notes matching the provided note IDs. + async fn get_notes_by_id( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the script for a note by its root. + async fn get_note_script_by_root( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + /// + /// Note that only 16-bit prefixes are supported at this time. + async fn sync_nullifiers( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + /// + /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + /// tip of the chain. + async fn sync_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the requester to sync up to the latest state of the chain + /// for the objects (accounts, notes, nullifiers) the requester is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. requester is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the requester is fully synchronized with the chain. + /// + /// Each request also returns info about new notes, nullifiers etc. created. It also returns + /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + /// MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags and nullifiers filters contain only high + /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + /// additional filtering of that data on its side. + async fn sync_state( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns account vault updates for specified account within a block range. + async fn sync_account_vault( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns storage map updates for specified account and storage slots within a block range. + async fn sync_storage_maps( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns transactions records for specific accounts within a block range. + async fn sync_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the RPC component + #[derive(Debug)] + pub struct RpcServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl RpcServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for RpcServer + where + T: Rpc, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.Rpc/Status" => { + #[allow(non_camel_case_types)] + struct StatusSvc(pub Arc); + impl tonic::server::UnaryService<()> for StatusSvc { + type Response = super::super::rpc::StoreStatus; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::status(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = StatusSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/CheckNullifiers" => { + #[allow(non_camel_case_types)] + struct CheckNullifiersSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for CheckNullifiersSvc { + type Response = super::super::rpc::CheckNullifiersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::check_nullifiers(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = CheckNullifiersSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetAccountDetails" => { + #[allow(non_camel_case_types)] + struct GetAccountDetailsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetAccountDetailsSvc { + type Response = super::super::account::AccountDetails; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account_details(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountDetailsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetAccountProof" => { + #[allow(non_camel_case_types)] + struct GetAccountProofSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetAccountProofSvc { + type Response = super::super::rpc::AccountProofResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::AccountProofRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account_proof(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountProofSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetBlockByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockByNumberSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetBlockByNumberSvc { + type Response = super::super::blockchain::MaybeBlock; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::BlockNumber, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_by_number(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetNotesById" => { + #[allow(non_camel_case_types)] + struct GetNotesByIdSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetNotesByIdSvc { + type Response = super::super::note::CommittedNoteList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_notes_by_id(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNotesByIdSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetNoteScriptByRoot" => { + #[allow(non_camel_case_types)] + struct GetNoteScriptByRootSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetNoteScriptByRootSvc { + type Response = super::super::rpc::MaybeNoteScript; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_note_script_by_root(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNoteScriptByRootSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncNullifiers" => { + #[allow(non_camel_case_types)] + struct SyncNullifiersSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncNullifiersRequest, + > for SyncNullifiersSvc { + type Response = super::super::rpc::SyncNullifiersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncNullifiersRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_nullifiers(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncNullifiersSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncNotes" => { + #[allow(non_camel_case_types)] + struct SyncNotesSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::super::rpc::SyncNotesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_notes(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncNotesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncState" => { + #[allow(non_camel_case_types)] + struct SyncStateSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for SyncStateSvc { + type Response = super::super::rpc::SyncStateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_state(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncStateSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncAccountVault" => { + #[allow(non_camel_case_types)] + struct SyncAccountVaultSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncAccountVaultRequest, + > for SyncAccountVaultSvc { + type Response = super::super::rpc::SyncAccountVaultResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncAccountVaultRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_account_vault(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncAccountVaultSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncStorageMaps" => { + #[allow(non_camel_case_types)] + struct SyncStorageMapsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncStorageMapsRequest, + > for SyncStorageMapsSvc { + type Response = super::super::rpc::SyncStorageMapsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncStorageMapsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_storage_maps(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncStorageMapsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncTransactions" => { + #[allow(non_camel_case_types)] + struct SyncTransactionsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncTransactionsRequest, + > for SyncTransactionsSvc { + type Response = super::super::rpc::SyncTransactionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncTransactionsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_transactions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncTransactionsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for RpcServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.Rpc"; + impl tonic::server::NamedService for RpcServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod block_producer_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the BlockProducer component + #[derive(Debug, Clone)] + pub struct BlockProducerClient { + inner: tonic::client::Grpc, + } + impl BlockProducerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl BlockProducerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> BlockProducerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + BlockProducerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Applies changes of a new block to the DB and in-memory data structures. + pub async fn apply_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/ApplyBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "ApplyBlock")); + self.inner.unary(req, path, codec).await + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("store.BlockProducer", "GetBlockHeaderByNumber"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns data required to prove the next block. + pub async fn get_block_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBlockInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetBlockInputs")); + self.inner.unary(req, path, codec).await + } + /// Returns the inputs for a transaction batch. + pub async fn get_batch_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBatchInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetBatchInputs")); + self.inner.unary(req, path, codec).await + } + /// Returns data required to validate a new transaction. + pub async fn get_transaction_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetTransactionInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetTransactionInputs")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod block_producer_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. + #[async_trait] + pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { + /// Applies changes of a new block to the DB and in-memory data structures. + async fn apply_block( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns data required to prove the next block. + async fn get_block_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns the inputs for a transaction batch. + async fn get_batch_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns data required to validate a new transaction. + async fn get_transaction_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the BlockProducer component + #[derive(Debug)] + pub struct BlockProducerServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl BlockProducerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for BlockProducerServer + where + T: BlockProducer, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.BlockProducer/ApplyBlock" => { + #[allow(non_camel_case_types)] + struct ApplyBlockSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for ApplyBlockSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::apply_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ApplyBlockSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBlockInputs" => { + #[allow(non_camel_case_types)] + struct GetBlockInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetBlockInputsSvc { + type Response = super::BlockInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_inputs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBatchInputs" => { + #[allow(non_camel_case_types)] + struct GetBatchInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetBatchInputsSvc { + type Response = super::BatchInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_batch_inputs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBatchInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetTransactionInputs" => { + #[allow(non_camel_case_types)] + struct GetTransactionInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetTransactionInputsSvc { + type Response = super::TransactionInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transaction_inputs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for BlockProducerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.BlockProducer"; + impl tonic::server::NamedService for BlockProducerServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod ntx_builder_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the network transaction builder component + #[derive(Debug, Clone)] + pub struct NtxBuilderClient { + inner: tonic::client::Grpc, + } + impl NtxBuilderClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl NtxBuilderClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> NtxBuilderClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetBlockHeaderByNumber")); + self.inner.unary(req, path, codec).await + } + /// Returns a paginated list of unconsumed network notes. + pub async fn get_unconsumed_network_notes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetUnconsumedNetworkNotes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("store.NtxBuilder", "GetUnconsumedNetworkNotes"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. + pub async fn get_unconsumed_network_notes_for_account( + &mut self, + request: impl tonic::IntoRequest< + super::UnconsumedNetworkNotesForAccountRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetUnconsumedNetworkNotesForAccount", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "store.NtxBuilder", + "GetUnconsumedNetworkNotesForAccount", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + /// header for executing network transactions. If the block number is not provided, the latest + /// header and peaks will be retrieved. + pub async fn get_current_blockchain_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetCurrentBlockchainData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetCurrentBlockchainData")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state of a network account with the specified account prefix. + pub async fn get_network_account_details_by_prefix( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "store.NtxBuilder", + "GetNetworkAccountDetailsByPrefix", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the script for a note by its root. + pub async fn get_note_script_by_root( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNoteScriptByRoot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetNoteScriptByRoot")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod ntx_builder_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. + #[async_trait] + pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a paginated list of unconsumed network notes. + async fn get_unconsumed_network_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. + async fn get_unconsumed_network_notes_for_account( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + /// header for executing network transactions. If the block number is not provided, the latest + /// header and peaks will be retrieved. + async fn get_current_blockchain_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state of a network account with the specified account prefix. + async fn get_network_account_details_by_prefix( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the script for a note by its root. + async fn get_note_script_by_root( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the network transaction builder component + #[derive(Debug)] + pub struct NtxBuilderServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl NtxBuilderServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for NtxBuilderServer + where + T: NtxBuilder, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.NtxBuilder/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetUnconsumedNetworkNotes" => { + #[allow(non_camel_case_types)] + struct GetUnconsumedNetworkNotesSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetUnconsumedNetworkNotesSvc { + type Response = super::UnconsumedNetworkNotes; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_unconsumed_network_notes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetUnconsumedNetworkNotesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetUnconsumedNetworkNotesForAccount" => { + #[allow(non_camel_case_types)] + struct GetUnconsumedNetworkNotesForAccountSvc( + pub Arc, + ); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::UnconsumedNetworkNotesForAccountRequest, + > for GetUnconsumedNetworkNotesForAccountSvc { + type Response = super::UnconsumedNetworkNotes; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::UnconsumedNetworkNotesForAccountRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_unconsumed_network_notes_for_account( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetUnconsumedNetworkNotesForAccountSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetCurrentBlockchainData" => { + #[allow(non_camel_case_types)] + struct GetCurrentBlockchainDataSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::super::blockchain::MaybeBlockNumber, + > for GetCurrentBlockchainDataSvc { + type Response = super::CurrentBlockchainData; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::MaybeBlockNumber, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_current_blockchain_data( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetCurrentBlockchainDataSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { + #[allow(non_camel_case_types)] + struct GetNetworkAccountDetailsByPrefixSvc( + pub Arc, + ); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNetworkAccountDetailsByPrefixSvc { + type Response = super::MaybeAccountDetails; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_network_account_details_by_prefix( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNetworkAccountDetailsByPrefixSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNoteScriptByRoot" => { + #[allow(non_camel_case_types)] + struct GetNoteScriptByRootSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNoteScriptByRootSvc { + type Response = super::super::rpc::MaybeNoteScript; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_note_script_by_root(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNoteScriptByRootSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for NtxBuilderServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.NtxBuilder"; + impl tonic::server::NamedService for NtxBuilderServer { + const NAME: &'static str = SERVICE_NAME; + } +} diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 52f65270eb..4c370d777b 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -4,7 +4,7 @@ use std::time::Duration; use anyhow::Context; use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient}; use miden_node_proto::errors::ConversionError; -use miden_node_proto::generated::block_producer::MempoolStats; +use miden_node_proto::generated::rpc::MempoolStats; use miden_node_proto::generated::rpc::api_server::{self, Api}; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; @@ -100,7 +100,7 @@ impl RpcService { loop { let result = self .get_block_header_by_number( - proto::shared::BlockHeaderByNumberRequest { + proto::rpc::BlockHeaderByNumberRequest { block_num: Some(BlockNumber::GENESIS.as_u32()), include_mmr_proof: None, } @@ -153,8 +153,8 @@ impl api_server::Api for RpcService { )] async fn check_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -179,8 +179,8 @@ impl api_server::Api for RpcService { )] async fn sync_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -198,8 +198,8 @@ impl api_server::Api for RpcService { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { info!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().get_block_header_by_number(request).await @@ -215,8 +215,8 @@ impl api_server::Api for RpcService { )] async fn sync_state( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().account_ids.len())?; @@ -235,8 +235,8 @@ impl api_server::Api for RpcService { )] async fn sync_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_storage_maps(request).await @@ -252,8 +252,8 @@ impl api_server::Api for RpcService { )] async fn sync_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().note_tags.len())?; @@ -300,11 +300,9 @@ impl api_server::Api for RpcService { )] async fn sync_account_vault( &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + request: tonic::Request, + ) -> std::result::Result, tonic::Status> + { debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_account_vault(request).await @@ -314,7 +312,7 @@ impl api_server::Api for RpcService { async fn submit_proven_transaction( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); let Some(block_producer) = &self.block_producer else { @@ -413,7 +411,7 @@ impl api_server::Api for RpcService { async fn submit_proven_batch( &self, request: tonic::Request, - ) -> Result, Status> { + ) -> Result, Status> { let Some(block_producer) = &self.block_producer else { return Err(Status::unavailable("Batch submission not available in read-only mode")); }; @@ -521,8 +519,8 @@ impl api_server::Api for RpcService { )] async fn get_account_proof( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); debug!(target: COMPONENT, ?request); @@ -559,19 +557,17 @@ impl api_server::Api for RpcService { Ok(Response::new(proto::rpc::RpcStatus { version: env!("CARGO_PKG_VERSION").to_string(), - store: store_status.or(Some(proto::rpc_store::StoreStatus { + store: store_status.or(Some(proto::rpc::StoreStatus { status: "unreachable".to_string(), chain_tip: 0, version: "-".to_string(), })), - block_producer: block_producer_status.or(Some( - proto::block_producer::BlockProducerStatus { - status: "unreachable".to_string(), - version: "-".to_string(), - chain_tip: 0, - mempool_stats: Some(MempoolStats::default()), - }, - )), + block_producer: block_producer_status.or(Some(proto::rpc::BlockProducerStatus { + status: "unreachable".to_string(), + version: "-".to_string(), + chain_tip: 0, + mempool_stats: Some(MempoolStats::default()), + })), genesis_commitment: self.genesis_commitment.map(Into::into), })) } @@ -587,7 +583,7 @@ impl api_server::Api for RpcService { async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); self.store.clone().get_note_script_by_root(request).await @@ -603,8 +599,8 @@ impl api_server::Api for RpcService { )] async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); self.store.clone().sync_transactions(request).await diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 4b4f1dd0b4..38578d1bd3 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -46,7 +46,7 @@ async fn rpc_server_accepts_requests_without_accept_header() { }; // Send any request to the RPC. - let request = proto::shared::BlockHeaderByNumberRequest { + let request = proto::rpc::BlockHeaderByNumberRequest { block_num: Some(0), include_mmr_proof: None, }; @@ -361,9 +361,8 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { /// Sends an arbitrary / irrelevant request to the RPC. async fn send_request( rpc_client: &mut RpcClient, -) -> std::result::Result, tonic::Status> -{ - let request = proto::shared::BlockHeaderByNumberRequest { +) -> std::result::Result, tonic::Status> { + let request = proto::rpc::BlockHeaderByNumberRequest { block_num: Some(0), include_mmr_proof: None, }; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 6339c0660f..918b395502 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -111,11 +111,11 @@ impl TransactionRecord { pub fn into_proto_with_note_records( self, note_records: Vec, - ) -> proto::rpc_store::TransactionRecord { + ) -> proto::rpc::TransactionRecord { let output_notes: Vec = note_records.into_iter().map(Into::into).collect(); - proto::rpc_store::TransactionRecord { + proto::rpc::TransactionRecord { header: Some(proto::transaction::TransactionHeader { account_id: Some(self.account_id.into()), initial_state_commitment: Some(self.initial_state_commitment.into()), diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index cd7d27cef9..b266feb59e 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -25,8 +25,8 @@ impl StoreApi { /// Shared implementation for all `get_block_header_by_number` endpoints. pub async fn get_block_header_by_number_inner( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { info!(target: COMPONENT, ?request); let request = request.into_inner(); @@ -36,7 +36,7 @@ impl StoreApi { .get_block_header(block_num, request.include_mmr_proof.unwrap_or(false)) .await?; - Ok(Response::new(proto::shared::BlockHeaderByNumberResponse { + Ok(Response::new(proto::rpc::BlockHeaderByNumberResponse { block_header: block_header.map(Into::into), chain_length: mmr_proof.as_ref().map(|p| p.forest.num_leaves() as u32), mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), @@ -64,9 +64,9 @@ pub fn conversion_error_to_status(value: &ConversionError) -> Status { /// Reads a block range from a request, returning a specific error type if the field is missing pub fn read_block_range( - block_range: Option, + block_range: Option, entity: &'static str, -) -> Result +) -> Result where E: From, { diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index b6d29f1362..91b595aadd 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,6 +1,6 @@ use std::convert::Infallible; -use miden_node_proto::generated::block_producer_store::block_producer_server; +use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; @@ -40,8 +40,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -93,8 +93,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_block_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_ids = read_account_ids::(&request.account_ids)?; @@ -114,7 +114,7 @@ impl block_producer_server::BlockProducer for StoreApi { reference_blocks, ) .await - .map(proto::block_producer_store::BlockInputs::from) + .map(proto::store::BlockInputs::from) .map(Response::new) .map_err(internal_error) } @@ -132,8 +132,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_batch_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let note_commitments: Vec = try_convert(request.note_commitments) @@ -164,8 +164,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_transaction_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); debug!(target: COMPONENT, ?request); @@ -183,17 +183,19 @@ impl block_producer_server::BlockProducer for StoreApi { let block_height = self.state.latest_block_num().await.as_u32(); - Ok(Response::new(proto::block_producer_store::TransactionInputs { - account_state: Some(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord { + Ok(Response::new(proto::store::TransactionInputs { + account_state: Some(proto::store::transaction_inputs::AccountTransactionInputRecord { account_id: Some(account_id.into()), account_commitment: Some(tx_inputs.account_commitment.into()), }), nullifiers: tx_inputs .nullifiers .into_iter() - .map(|nullifier| proto::block_producer_store::transaction_inputs::NullifierTransactionInputRecord { - nullifier: Some(nullifier.nullifier.into()), - block_num: nullifier.block_num.as_u32(), + .map(|nullifier| { + proto::store::transaction_inputs::NullifierTransactionInputRecord { + nullifier: Some(nullifier.nullifier.into()), + block_num: nullifier.block_num.as_u32(), + } }) .collect(), found_unauthenticated_notes: tx_inputs diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 91a2fbc368..32e9515546 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -4,12 +4,11 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use miden_node_proto::generated::{block_producer_store, ntx_builder_store, rpc_store}; +use miden_node_proto::generated::store; use miden_node_proto_build::{ store_block_producer_api_descriptor, store_ntx_builder_api_descriptor, store_rpc_api_descriptor, - store_shared_api_descriptor, }; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; @@ -96,20 +95,18 @@ impl Store { DbMaintenance::new(Arc::clone(&state), DATABASE_MAINTENANCE_INTERVAL); let rpc_service = - rpc_store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); - let ntx_builder_service = - ntx_builder_store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { - state: Arc::clone(&state), - }); + store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { + state: Arc::clone(&state), + }); let block_producer_service = - block_producer_store::block_producer_server::BlockProducerServer::new(api::StoreApi { + store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) .register_file_descriptor_set(store_ntx_builder_api_descriptor()) .register_file_descriptor_set(store_block_producer_api_descriptor()) - .register_file_descriptor_set(store_shared_api_descriptor()) .build_v1() .context("failed to build reflection service")?; @@ -121,7 +118,6 @@ impl Store { .register_file_descriptor_set(store_rpc_api_descriptor()) .register_file_descriptor_set(store_ntx_builder_api_descriptor()) .register_file_descriptor_set(store_block_producer_api_descriptor()) - .register_file_descriptor_set(store_shared_api_descriptor()) .build_v1alpha() .context("failed to build reflection service")?; diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 1f2dd15958..9834ed5bb6 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -1,7 +1,7 @@ use std::num::{NonZero, TryFromIntError}; use miden_node_proto::domain::account::{AccountInfo, NetworkAccountPrefix}; -use miden_node_proto::generated::ntx_builder_store::ntx_builder_server; +use miden_node_proto::generated::store::ntx_builder_server; use miden_node_proto::generated::{self as proto}; use miden_node_utils::ErrorReport; use miden_objects::block::BlockNumber; @@ -32,8 +32,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -53,7 +53,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { async fn get_current_blockchain_data( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { let block_num = request.into_inner().block_num.map(BlockNumber::from); let response = match self @@ -62,11 +62,11 @@ impl ntx_builder_server::NtxBuilder for StoreApi { .await .map_err(internal_error)? { - Some((header, peaks)) => proto::ntx_builder_store::CurrentBlockchainData { + Some((header, peaks)) => proto::store::CurrentBlockchainData { current_peaks: peaks.peaks().iter().map(Into::into).collect(), current_block_header: Some(header.into()), }, - None => proto::ntx_builder_store::CurrentBlockchainData { + None => proto::store::CurrentBlockchainData { current_peaks: vec![], current_block_header: None, }, @@ -85,8 +85,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_network_account_details_by_prefix( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); // Validate that the call is for a valid network account prefix @@ -98,7 +98,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { let account_info: Option = self.state.get_network_account_details_by_prefix(prefix.inner()).await?; - Ok(Response::new(proto::ntx_builder_store::MaybeAccountDetails { + Ok(Response::new(proto::store::MaybeAccountDetails { details: account_info.map(|acc| (&acc).into()), })) } @@ -112,8 +112,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_unconsumed_network_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let state = self.state.clone(); @@ -137,7 +137,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { network_notes.push(note.into()); } - Ok(Response::new(proto::ntx_builder_store::UnconsumedNetworkNotes { + Ok(Response::new(proto::store::UnconsumedNetworkNotes { notes: network_notes, next_token: next_page.token, })) @@ -152,8 +152,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_unconsumed_network_notes_for_account( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let block_num = BlockNumber::from(request.block_num); let network_account_id_prefix = @@ -184,7 +184,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { network_notes.push(note.into()); } - Ok(Response::new(proto::ntx_builder_store::UnconsumedNetworkNotes { + Ok(Response::new(proto::store::UnconsumedNetworkNotes { notes: network_notes, next_token: next_page.token, })) @@ -201,7 +201,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let root = read_root::(request.into_inner().root, "NoteRoot")?; @@ -212,7 +212,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { .await .map_err(GetNoteScriptByRootError::from)?; - Ok(Response::new(proto::shared::MaybeNoteScript { + Ok(Response::new(proto::rpc::MaybeNoteScript { script: note_script.map(Into::into), })) } diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 0e57c1b0e1..5919b72480 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,6 +1,6 @@ use miden_node_proto::convert; use miden_node_proto::domain::account::AccountInfo; -use miden_node_proto::generated::rpc_store::rpc_server; +use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; use miden_objects::Word; use miden_objects::account::AccountId; @@ -51,8 +51,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -71,8 +71,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn check_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { // Validate the nullifiers and convert them to Word values. Stop on first error. let request = request.into_inner(); @@ -90,7 +90,7 @@ impl rpc_server::Rpc for StoreApi { // Query the state for the request's nullifiers let proofs = self.state.check_nullifiers(&nullifiers).await; - Ok(Response::new(proto::rpc_store::CheckNullifiersResponse { + Ok(Response::new(proto::rpc::CheckNullifiersResponse { proofs: convert(proofs).collect(), })) } @@ -109,8 +109,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); if request.prefix_len != 16 { @@ -130,14 +130,14 @@ impl rpc_server::Rpc for StoreApi { let nullifiers = nullifiers .into_iter() - .map(|nullifier_info| proto::rpc_store::sync_nullifiers_response::NullifierUpdate { + .map(|nullifier_info| proto::rpc::sync_nullifiers_response::NullifierUpdate { nullifier: Some(nullifier_info.nullifier.into()), block_num: nullifier_info.block_num.as_u32(), }) .collect(); - Ok(Response::new(proto::rpc_store::SyncNullifiersResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncNullifiersResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: block_num.as_u32(), }), @@ -158,8 +158,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_state( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_ids: Vec = read_account_ids::(&request.account_ids)?; @@ -192,7 +192,7 @@ impl rpc_server::Rpc for StoreApi { let notes = state.notes.into_iter().map(Into::into).collect(); - Ok(Response::new(proto::rpc_store::SyncStateResponse { + Ok(Response::new(proto::rpc::SyncStateResponse { chain_tip: self.state.latest_block_num().await.as_u32(), block_header: Some(state.block_header.into()), mmr_delta: Some(delta.into()), @@ -214,8 +214,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -235,8 +235,8 @@ impl rpc_server::Rpc for StoreApi { let notes = state.notes.into_iter().map(Into::into).collect(); - Ok(Response::new(proto::rpc_store::SyncNotesResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncNotesResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), @@ -348,8 +348,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn get_account_proof( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); let account_proof_request = request.try_into()?; @@ -370,8 +370,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_account_vault( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -397,7 +397,7 @@ impl rpc_server::Rpc for StoreApi { .into_iter() .map(|update| { let vault_key: Word = update.vault_key.into(); - proto::rpc_store::AccountVaultUpdate { + proto::rpc::AccountVaultUpdate { vault_key: Some(vault_key.into()), asset: update.asset.map(Into::into), block_num: update.block_num.as_u32(), @@ -405,8 +405,8 @@ impl rpc_server::Rpc for StoreApi { }) .collect(); - Ok(Response::new(proto::rpc_store::SyncAccountVaultResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncAccountVaultResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_included_block.as_u32(), }), @@ -428,8 +428,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_id = read_account_id::(request.account_id)?; @@ -454,7 +454,7 @@ impl rpc_server::Rpc for StoreApi { let updates = storage_maps_page .values .into_iter() - .map(|map_value| proto::rpc_store::StorageMapUpdate { + .map(|map_value| proto::rpc::StorageMapUpdate { slot_index: u32::from(map_value.slot_index), key: Some(map_value.key.into()), value: Some(map_value.value.into()), @@ -462,8 +462,8 @@ impl rpc_server::Rpc for StoreApi { }) .collect(); - Ok(Response::new(proto::rpc_store::SyncStorageMapsResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncStorageMapsResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: storage_maps_page.last_block_included.as_u32(), }), @@ -483,8 +483,8 @@ impl rpc_server::Rpc for StoreApi { async fn status( &self, _request: Request<()>, - ) -> Result, Status> { - Ok(Response::new(proto::rpc_store::StoreStatus { + ) -> Result, Status> { + Ok(Response::new(proto::rpc::StoreStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), chain_tip: self.state.latest_block_num().await.as_u32(), @@ -502,7 +502,7 @@ impl rpc_server::Rpc for StoreApi { async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let root = read_root::(request.into_inner().root, "NoteRoot")?; @@ -513,7 +513,7 @@ impl rpc_server::Rpc for StoreApi { .await .map_err(GetNoteScriptByRootError::from)?; - Ok(Response::new(proto::shared::MaybeNoteScript { + Ok(Response::new(proto::rpc::MaybeNoteScript { script: note_script.map(Into::into), })) } @@ -528,8 +528,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let request = request.into_inner(); @@ -595,8 +595,8 @@ impl rpc_server::Rpc for StoreApi { transactions.push(proto_record); } - Ok(Response::new(proto::rpc_store::SyncTransactionsResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncTransactionsResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), diff --git a/proto/build.rs b/proto/build.rs index 87eb57e554..3d4047e24d 100644 --- a/proto/build.rs +++ b/proto/build.rs @@ -6,19 +6,19 @@ use miette::{Context, IntoDiagnostic}; use protox::prost::Message; const RPC_PROTO: &str = "rpc.proto"; -const STORE_RPC_PROTO: &str = "store/rpc.proto"; -const STORE_NTX_BUILDER_PROTO: &str = "store/ntx_builder.proto"; -const STORE_BLOCK_PRODUCER_PROTO: &str = "store/block_producer.proto"; -const STORE_SHARED_PROTO: &str = "store/shared.proto"; -const BLOCK_PRODUCER_PROTO: &str = "block_producer.proto"; +// Unified internal store API (store.Rpc, store.BlockProducer, store.NtxBuilder). +// We compile the same file three times to preserve existing descriptor names. +const STORE_RPC_PROTO: &str = "internal/store.proto"; +const STORE_NTX_BUILDER_PROTO: &str = "internal/store.proto"; +const STORE_BLOCK_PRODUCER_PROTO: &str = "internal/store.proto"; +const BLOCK_PRODUCER_PROTO: &str = "internal/block_producer.proto"; const REMOTE_PROVER_PROTO: &str = "remote_prover.proto"; -const VALIDATOR_PROTO: &str = "validator.proto"; +const VALIDATOR_PROTO: &str = "internal/validator.proto"; const RPC_DESCRIPTOR: &str = "rpc_file_descriptor.bin"; const STORE_RPC_DESCRIPTOR: &str = "store_rpc_file_descriptor.bin"; const STORE_NTX_BUILDER_DESCRIPTOR: &str = "store_ntx_builder_file_descriptor.bin"; const STORE_BLOCK_PRODUCER_DESCRIPTOR: &str = "store_block_producer_file_descriptor.bin"; -const STORE_SHARED_DESCRIPTOR: &str = "store_shared_file_descriptor.bin"; const BLOCK_PRODUCER_DESCRIPTOR: &str = "block_producer_file_descriptor.bin"; const REMOTE_PROVER_DESCRIPTOR: &str = "remote_prover_file_descriptor.bin"; const VALIDATOR_DESCRIPTOR: &str = "validator_file_descriptor.bin"; @@ -69,12 +69,6 @@ fn main() -> miette::Result<()> { .into_diagnostic() .wrap_err("writing store block producer file descriptor")?; - let store_shared_file_descriptor = protox::compile([STORE_SHARED_PROTO], includes)?; - let store_shared_path = PathBuf::from(&out).join(STORE_SHARED_DESCRIPTOR); - fs::write(&store_shared_path, store_shared_file_descriptor.encode_to_vec()) - .into_diagnostic() - .wrap_err("writing store shared file descriptor")?; - let block_producer_file_descriptor = protox::compile([BLOCK_PRODUCER_PROTO], includes)?; let block_producer_path = PathBuf::from(&out).join(BLOCK_PRODUCER_DESCRIPTOR); fs::write(&block_producer_path, block_producer_file_descriptor.encode_to_vec()) diff --git a/proto/proto/README.md b/proto/proto/README.md new file mode 100644 index 0000000000..5a3a9e321f --- /dev/null +++ b/proto/proto/README.md @@ -0,0 +1,19 @@ +# Proto Files Organization + +The files are organized by a visibility hierarchy, where the root directory contains the public-facing RPC and remote prover protocols, while the `types` directory contains the data types used by these protocols. The `internal` directory contains the internal protocols used by the node, such as the store, non-transactional data, and block producer protocols. + +The organization of the files is as follows: + +``` +rpc.proto +remote_prover.proto +types/ +├── primitives.proto +└── xxx.proto +internal/ +├── store.proto +├── ntx.proto +└── block_producer.proto +``` + +The public-facing files should only allow the usage of the `types` directory, to avoid service reflection to internal protocols. diff --git a/proto/proto/block_producer.proto b/proto/proto/internal/block_producer.proto similarity index 67% rename from proto/proto/block_producer.proto rename to proto/proto/internal/block_producer.proto index d1823f70b7..1a284dcd83 100644 --- a/proto/proto/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package block_producer; +import "rpc.proto"; import "types/note.proto"; import "types/blockchain.proto"; import "types/primitives.proto"; @@ -13,10 +14,10 @@ import "google/protobuf/empty.proto"; service Api { // Returns the status info. - rpc Status(google.protobuf.Empty) returns (BlockProducerStatus) {} + rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} - // Submits proven transaction to the Miden network - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (SubmitProvenTransactionResponse) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} // Submits a proven batch to the Miden network. // @@ -28,7 +29,9 @@ service Api { // // All transactions in the batch but not in the mempool must build on the current mempool // state following normal transaction submission rules. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (SubmitProvenBatchResponse) {} + // + // Returns the node's current block height. + rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} // Subscribe to mempool events. // @@ -43,56 +46,6 @@ service Api { rpc MempoolSubscription(MempoolSubscriptionRequest) returns (stream MempoolEvent) {} } -// STATUS -// ================================================================================================ - -// Represents the status of the block producer. -message BlockProducerStatus { - // The block producer's running version. - string version = 1; - - // The block producer's status. - string status = 2; - - // The block producer's current view of the chain tip height. - // - // This is the height of the latest block that the block producer considers - // to be part of the canonical chain. - fixed32 chain_tip = 4; - - // Statistics about the mempool. - MempoolStats mempool_stats = 3; -} - -// Statistics about the mempool. -message MempoolStats { - // Number of transactions currently in the mempool waiting to be batched. - uint64 unbatched_transactions = 1; - - // Number of batches currently being proven. - uint64 proposed_batches = 2; - - // Number of proven batches waiting for block inclusion. - uint64 proven_batches = 3; -} - -// SUBMIT PROVEN TRANSACTION -// ================================================================================================ - -// Represents the result of submitting proven transaction. -message SubmitProvenTransactionResponse { - // The node's current block height. - fixed32 block_height = 1; -} - -// SUBMIT PROVEN TRANSACTION -// ================================================================================================ - -message SubmitProvenBatchResponse { - // The node's current block height. - fixed32 block_height = 1; -} - // MEMPOOL SUBSCRIPTION // ================================================================================================ diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto new file mode 100644 index 0000000000..27473b9267 --- /dev/null +++ b/proto/proto/internal/store.proto @@ -0,0 +1,342 @@ +// Unified specification of the internal store gRPC APIs. +syntax = "proto3"; +package store; + +import "google/protobuf/empty.proto"; +import "types/account.proto"; +import "types/blockchain.proto"; +import "types/transaction.proto"; +import "types/note.proto"; +import "types/primitives.proto"; +import "rpc.proto"; + +// RPC STORE API +// ================================================================================================ + +// Store API for the RPC component +service Rpc { + // Returns the status info. + rpc Status(google.protobuf.Empty) returns (rpc.StoreStatus) {} + + // Returns a nullifier proof for each of the requested nullifiers. + rpc CheckNullifiers(rpc.NullifierList) returns (rpc.CheckNullifiersResponse) {} + + // Returns the latest state of an account with the specified ID. + rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} + + // Returns the latest state proof of the specified account. + rpc GetAccountProof(rpc.AccountProofRequest) returns (rpc.AccountProofResponse) {} + + // Returns raw block data for the specified block number. + rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns a list of committed notes matching the provided note IDs. + rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + // + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(rpc.SyncNullifiersRequest) returns (rpc.SyncNullifiersResponse) {} + + // Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + // + // requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + // matching notes for. The request will then return the next block containing any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + // tip of the chain. + rpc SyncNotes(rpc.SyncNotesRequest) returns (rpc.SyncNotesResponse) {} + + // Returns info which can be used by the requester to sync up to the latest state of the chain + // for the objects (accounts, notes, nullifiers) the requester is interested in. + // + // This request returns the next block containing requested data. It also returns `chain_tip` + // which is the latest block number in the chain. requester is expected to repeat these requests + // in a loop until `response.block_header.block_num == response.chain_tip`, at which point + // the requester is fully synchronized with the chain. + // + // Each request also returns info about new notes, nullifiers etc. created. It also returns + // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + // MMR peaks and chain MMR nodes. + // + // For preserving some degree of privacy, note tags and nullifiers filters contain only high + // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + // additional filtering of that data on its side. + rpc SyncState(rpc.SyncStateRequest) returns (rpc.SyncStateResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} + + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncStorageMaps(rpc.SyncStorageMapsRequest) returns (rpc.SyncStorageMapsResponse) {} + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(rpc.SyncTransactionsRequest) returns (rpc.SyncTransactionsResponse) {} +} + +// BLOCK PRODUCER STORE API +// ================================================================================================ + +// Store API for the BlockProducer component +service BlockProducer { + // Applies changes of a new block to the DB and in-memory data structures. + rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns data required to prove the next block. + rpc GetBlockInputs(BlockInputsRequest) returns (BlockInputs) {} + + // Returns the inputs for a transaction batch. + rpc GetBatchInputs(BatchInputsRequest) returns (BatchInputs) {} + + // Returns data required to validate a new transaction. + rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} +} + +// GET BLOCK INPUTS +// ================================================================================================ + +// Returns data required to prove the next block. +message BlockInputsRequest { + // IDs of all accounts updated in the proposed block for which to retrieve account witnesses. + repeated account.AccountId account_ids = 1; + + // Nullifiers of all notes consumed by the block for which to retrieve witnesses. + // + // Due to note erasure it will generally not be possible to know the exact set of nullifiers + // a block will create, unless we pre-execute note erasure. So in practice, this set of + // nullifiers will be the set of nullifiers of all proven batches in the block, which is a + // superset of the nullifiers the block may create. + // + // However, if it is known that a certain note will be erased, it would not be necessary to + // provide a nullifier witness for it. + repeated primitives.Digest nullifiers = 2; + + // Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. + repeated primitives.Digest unauthenticated_notes = 3; + + // Array of block numbers referenced by all batches in the block. + repeated fixed32 reference_blocks = 4; +} + +// Represents the result of getting block inputs. +message BlockInputs { + // A nullifier returned as a response to the `GetBlockInputs`. + message NullifierWitness { + // The nullifier. + primitives.Digest nullifier = 1; + + // The SMT proof to verify the nullifier's inclusion in the nullifier tree. + primitives.SmtOpening opening = 2; + } + // The latest block header. + blockchain.BlockHeader latest_block_header = 1; + + // Proof of each requested unauthenticated note's inclusion in a block, **if it existed in + // the store**. + repeated note.NoteInclusionInBlockProof unauthenticated_note_proofs = 2; + + // The serialized chain MMR which includes proofs for all blocks referenced by the + // above note inclusion proofs as well as proofs for inclusion of the requested blocks + // referenced by the batches in the block. + bytes partial_block_chain = 3; + + // The state commitments of the requested accounts and their authentication paths. + repeated account.AccountWitness account_witnesses = 4; + + // The requested nullifiers and their authentication paths. + repeated NullifierWitness nullifier_witnesses = 5; +} + +// GET BATCH INPUTS +// ================================================================================================ + +// Returns the inputs for a transaction batch. +message BatchInputsRequest { + // List of unauthenticated note commitments to be queried from the database. + repeated primitives.Digest note_commitments = 1; + // Set of block numbers referenced by transactions. + repeated fixed32 reference_blocks = 2; +} + +// Represents the result of getting batch inputs. +message BatchInputs { + // The block header that the transaction batch should reference. + blockchain.BlockHeader batch_reference_block_header = 1; + + // Proof of each _found_ unauthenticated note's inclusion in a block. + repeated note.NoteInclusionInBlockProof note_proofs = 2; + + // The serialized chain MMR which includes proofs for all blocks referenced by the + // above note inclusion proofs as well as proofs for inclusion of the blocks referenced + // by the transactions in the batch. + bytes partial_block_chain = 3; +} + +// GET TRANSACTION INPUTS +// ================================================================================================ + +// Returns data required to validate a new transaction. +message TransactionInputsRequest { + // ID of the account against which a transaction is executed. + account.AccountId account_id = 1; + // Set of nullifiers consumed by this transaction. + repeated primitives.Digest nullifiers = 2; + // Set of unauthenticated note commitments to check for existence on-chain. + // + // These are notes which were not on-chain at the state the transaction was proven, + // but could by now be present. + repeated primitives.Digest unauthenticated_notes = 3; +} + +// Represents the result of getting transaction inputs. +message TransactionInputs { + // An account returned as a response to the `GetTransactionInputs`. + message AccountTransactionInputRecord { + // The account ID. + account.AccountId account_id = 1; + + // The latest account commitment, zero commitment if the account doesn't exist. + primitives.Digest account_commitment = 2; + } + + // A nullifier returned as a response to the `GetTransactionInputs`. + message NullifierTransactionInputRecord { + // The nullifier ID. + primitives.Digest nullifier = 1; + + // The block at which the nullifier has been consumed, zero if not consumed. + fixed32 block_num = 2; + } + + // Account state proof. + AccountTransactionInputRecord account_state = 1; + + // List of nullifiers that have been consumed. + repeated NullifierTransactionInputRecord nullifiers = 2; + + // List of unauthenticated notes that were not found in the database. + repeated primitives.Digest found_unauthenticated_notes = 3; + + // The node's current block height. + fixed32 block_height = 4; + + // Whether the account ID prefix is unique. Only relevant for account creation requests. + optional bool new_account_id_prefix_is_unique = 5; // TODO: Replace this with an error. When a general error message exists. +} + +// NTX BUILDER STORE API +// ================================================================================================ + +// Store API for the network transaction builder component +service NtxBuilder { + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns a paginated list of unconsumed network notes. + rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} + + // Returns a paginated list of a network account's unconsumed notes up to a specified block number. + rpc GetUnconsumedNetworkNotesForAccount(UnconsumedNetworkNotesForAccountRequest) returns (UnconsumedNetworkNotes) {} + + // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + // header for executing network transactions. If the block number is not provided, the latest + // header and peaks will be retrieved. + rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} + + // Returns the latest state of a network account with the specified account prefix. + rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} +} + +// GET NETWORK ACCOUNT DETAILS BY PREFIX +// ================================================================================================ + +// Account ID prefix. +message AccountIdPrefix { + // Account ID prefix. + fixed32 account_id_prefix = 1; +} + +// Represents the result of getting network account details by prefix. +message MaybeAccountDetails { + // Account details. + optional account.AccountDetails details = 1; +} + +// GET UNCONSUMED NETWORK NOTES +// ================================================================================================ + +// Returns a list of unconsumed network notes using pagination. +message UnconsumedNetworkNotesRequest { + // An opaque token used to paginate through the notes. + // + // This should be null on the first call, and set to the response token until the response token + // is null, at which point all data has been fetched. + optional uint64 page_token = 1; + + // Number of notes to retrieve per page. + uint64 page_size = 2; +} + +// Returns a paginated list of unconsumed network notes for an account. +// +// Notes created or consumed after the specified block are excluded from the result. +message UnconsumedNetworkNotesForAccountRequest { + // This should be null on the first call, and set to the response token until the response token + // is null, at which point all data has been fetched. + // + // Note that this token is only valid if used with the same parameters. + optional uint64 page_token = 1; + + // Number of notes to retrieve per page. + uint64 page_size = 2; + + // The network account ID prefix to filter notes by. + uint32 network_account_id_prefix = 3; + + // The block number to filter the returned notes by. + // + // Notes that are created or consumed after this block are excluded from the result. + fixed32 block_num = 4; +} + +// Represents the result of getting the unconsumed network notes. +message UnconsumedNetworkNotes { + // An opaque pagination token. + // + // Use this in your next request to get the next + // set of data. + // + // Will be null once there is no more data remaining. + optional uint64 next_token = 1; + + // The list of unconsumed network notes. + repeated note.NetworkNote notes = 2; +} + +// GET CURRENT BLOCKCHAIN DATA +// ================================================================================================ + +// Current blockchain data based on the requested block number. +message CurrentBlockchainData { + // Commitments that represent the current state according to the MMR. + repeated primitives.Digest current_peaks = 1; + // Current block header. + optional blockchain.BlockHeader current_block_header = 2; +} diff --git a/proto/proto/validator.proto b/proto/proto/internal/validator.proto similarity index 100% rename from proto/proto/validator.proto rename to proto/proto/internal/validator.proto diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index dd3f1d6d6c..dd46b33f7b 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -7,9 +7,6 @@ import "types/blockchain.proto"; import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "block_producer.proto"; -import "store/rpc.proto"; -import "store/shared.proto"; import "google/protobuf/empty.proto"; // RPC API @@ -21,29 +18,29 @@ service Api { rpc Status(google.protobuf.Empty) returns (RpcStatus) {} // Returns a nullifier proof for each of the requested nullifiers. - rpc CheckNullifiers(rpc_store.NullifierList) returns (rpc_store.CheckNullifiersResponse) {} + rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} // Returns the latest state of an account with the specified ID. rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} // Returns the latest state proof of the specified account. - rpc GetAccountProof(rpc_store.AccountProofRequest) returns (rpc_store.AccountProofResponse) {} + rpc GetAccountProof(AccountProofRequest) returns (AccountProofResponse) {} // Returns raw block data for the specified block number. rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} + rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} // Returns a list of notes matching the provided note IDs. rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} - // Submits proven transaction to the Miden network. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (block_producer.SubmitProvenTransactionResponse) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} // Submits a proven batch of transactions to the Miden network. // @@ -55,15 +52,17 @@ service Api { // // All transactions in the batch but not in the mempool must build on the current mempool // state following normal transaction submission rules. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (block_producer.SubmitProvenBatchResponse) {} + // + // Returns the node's current block height. + rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(rpc_store.SyncNullifiersRequest) returns (rpc_store.SyncNullifiersResponse) {} + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(rpc_store.SyncAccountVaultRequest) returns (rpc_store.SyncAccountVaultResponse) {} + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. // @@ -74,7 +73,7 @@ service Api { // // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the // tip of the chain. - rpc SyncNotes(rpc_store.SyncNotesRequest) returns (rpc_store.SyncNotesResponse) {} + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} // Returns info which can be used by the client to sync up to the latest state of the chain // for the objects (accounts and notes) the client is interested in. @@ -91,13 +90,13 @@ service Api { // For preserving some degree of privacy, note tags contain only high // part of hashes. Thus, returned data contains excessive notes, client can make // additional filtering of that data on its side. - rpc SyncState(rpc_store.SyncStateRequest) returns (rpc_store.SyncStateResponse) {} + rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(rpc_store.SyncStorageMapsRequest) returns (rpc_store.SyncStorageMapsResponse) {} + rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(rpc_store.SyncTransactionsRequest) returns (rpc_store.SyncTransactionsResponse) {} + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} } // RPC STATUS @@ -112,8 +111,501 @@ message RpcStatus { primitives.Digest genesis_commitment = 2; // The store status. - rpc_store.StoreStatus store = 3; + StoreStatus store = 3; // The block producer status. - block_producer.BlockProducerStatus block_producer = 4; + BlockProducerStatus block_producer = 4; +} + + +// BLOCK PRODUCER STATUS +// ================================================================================================ + + +// Represents the status of the block producer. +message BlockProducerStatus { + // The block producer's running version. + string version = 1; + + // The block producer's status. + string status = 2; + + // The block producer's current view of the chain tip height. + // + // This is the height of the latest block that the block producer considers + // to be part of the canonical chain. + fixed32 chain_tip = 4; + + // Statistics about the mempool. + MempoolStats mempool_stats = 3; +} + +// Statistics about the mempool. +message MempoolStats { + // Number of transactions currently in the mempool waiting to be batched. + uint64 unbatched_transactions = 1; + + // Number of batches currently being proven. + uint64 proposed_batches = 2; + + // Number of proven batches waiting for block inclusion. + uint64 proven_batches = 3; +} + +// STORE STATUS +// ================================================================================================ + +// Represents the status of the store. +message StoreStatus { + // The store's running version. + string version = 1; + + // The store's status. + string status = 2; + + // Number of the latest block in the chain. + fixed32 chain_tip = 3; +} + +// GET BLOCK HEADER BY NUMBER +// ================================================================================================ + +// Returns the block header corresponding to the requested block number, as well as the merkle +// path and current forest which validate the block's inclusion in the chain. +// +// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. +message BlockHeaderByNumberRequest { + // The target block height, defaults to latest if not provided. + optional uint32 block_num = 1; + // Whether or not to return authentication data for the block header. + optional bool include_mmr_proof = 2; +} + +// Represents the result of getting a block header by block number. +message BlockHeaderByNumberResponse { + // The requested block header. + blockchain.BlockHeader block_header = 1; + + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + optional primitives.MerklePath mmr_path = 2; + + // Current chain length. + optional fixed32 chain_length = 3; +} + +// GET NOTE SCRIPT BY ROOT +// ================================================================================================ + +// Represents a note script or nothing. +message MaybeNoteScript { + // The script for a note by its root. + optional note.NoteScript script = 1; +} + +// GET ACCOUNT PROOF +// ================================================================================================ + +// Returns the latest state proof of the specified account. +message AccountProofRequest { + // Request the details for a public account. + message AccountDetailRequest { + // Represents a storage slot index and the associated map keys. + message StorageMapDetailRequest { + // Indirection required for use in `oneof {..}` block. + message MapKeys { + // A list of map keys associated with this storage slot. + repeated primitives.Digest map_keys = 1; + } + // Storage slot index (`[0..255]`). + uint32 slot_index = 1; + + oneof slot_data { + // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + // the response will not contain them but must be requested separately. + bool all_entries = 2; + + // A list of map keys associated with the given storage slot identified by `slot_index`. + MapKeys map_keys = 3; + } + } + + // Last known code commitment to the requester. The response will include account code + // only if its commitment is different from this value. + // + // If the field is ommiteed, the response will not include the account code. + optional primitives.Digest code_commitment = 1; + + // Last known asset vault commitment to the requester. The response will include asset vault data + // only if its commitment is different from this value. If the value is not present in the + // request, the response will not contain one either. + // If the number of to-be-returned asset entries exceed a threshold, they have to be requested + // separately, which is signaled in the response message with dedicated flag. + optional primitives.Digest asset_vault_commitment = 2; + + // Additional request per storage map. + repeated StorageMapDetailRequest storage_maps = 3; + } + + // ID of the account for which we want to get data + account.AccountId account_id = 1; + + // Optional block height at which to return the proof. + // + // Defaults to current chain tip if unspecified. + optional blockchain.BlockNumber block_num = 2; + + // Request for additional account details; valid only for public accounts. + optional AccountDetailRequest details = 3; +} + +// Represents the result of getting account proof. +message AccountProofResponse { + + message AccountDetails { + // Account header. + account.AccountHeader header = 1; + + // Account storage data + AccountStorageDetails storage_details = 2; + + // Account code; empty if code commitments matched or none was requested. + optional bytes code = 3; + + // Account asset vault data; empty if vault commitments matched or the requester + // omitted it in the request. + optional AccountVaultDetails vault_details = 4; + } + + // The block number at which the account witness was created and the account details were observed. + blockchain.BlockNumber block_num = 1; + + // Account ID, current state commitment, and SMT path. + account.AccountWitness witness = 2; + + // Additional details for public accounts. + optional AccountDetails details = 3; +} + +// Account vault details for AccountProofResponse +message AccountVaultDetails { + // A flag that is set to true if the account contains too many assets. This indicates + // to the user that `SyncAccountVault` endpoint should be used to retrieve the + // account's assets + bool too_many_assets = 1; + + // When too_many_assets == false, this will contain the list of assets in the + // account's vault + repeated primitives.Asset assets = 2; +} + +// Account storage details for AccountProofResponse +message AccountStorageDetails { + message AccountStorageMapDetails { + // Wrapper for repeated storage map entries + message MapEntries { + // Definition of individual storage entries. + message StorageMapEntry { + primitives.Digest key = 1; + primitives.Digest value = 2; + } + + repeated StorageMapEntry entries = 1; + } + // slot index of the storage map + uint32 slot_index = 1; + + // A flag that is set to `true` if the number of to-be-returned entries in the + // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` + // endpoint should be used to get all storage map data. + bool too_many_entries = 2; + + // By default we provide all storage entries. + MapEntries entries = 3; + } + + // Account storage header (storage slot info for up to 256 slots) + account.AccountStorageHeader header = 1; + + // Additional data for the requested storage maps + repeated AccountStorageMapDetails map_details = 2; +} + +// CHECK NULLIFIERS +// ================================================================================================ + +// List of nullifiers to return proofs for. +message NullifierList { + // List of nullifiers to return proofs for. + repeated primitives.Digest nullifiers = 1; +} + +// Represents the result of checking nullifiers. +message CheckNullifiersResponse { + // Each requested nullifier has its corresponding nullifier proof at the same position. + repeated primitives.SmtOpening proofs = 1; +} + +// SYNC NULLIFIERS +// ================================================================================================ + +// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. +message SyncNullifiersRequest { + // Block number from which the nullifiers are requested (inclusive). + BlockRange block_range = 1; + + // Number of bits used for nullifier prefix. Currently the only supported value is 16. + uint32 prefix_len = 2; + + // List of nullifiers to check. Each nullifier is specified by its prefix with length equal + // to `prefix_len`. + repeated uint32 nullifiers = 3; +} + +// Represents the result of syncing nullifiers. +message SyncNullifiersResponse { + // Represents a single nullifier update. + message NullifierUpdate { + // Nullifier ID. + primitives.Digest nullifier = 1; + + // Block number. + fixed32 block_num = 2; + } + + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of nullifiers matching the prefixes specified in the request. + repeated NullifierUpdate nullifiers = 2; +} + +// SYNC ACCOUNT VAULT +// ================================================================================================ + +// Account vault synchronization request. +// +// Allows requesters to sync asset values for specific public accounts within a block range. +message SyncAccountVaultRequest { + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; + + // Account for which we want to sync asset vault. + account.AccountId account_id = 2; +} + +message SyncAccountVaultResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of asset updates for the account. + // + // Multiple updates can be returned for a single asset, and the one with a higher `block_num` + // is expected to be retained by the caller. + repeated AccountVaultUpdate updates = 2; +} + +message AccountVaultUpdate { + // Vault key associated with the asset. + primitives.Digest vault_key = 1; + + // Asset value related to the vault key. + // If not present, the asset was removed from the vault. + optional primitives.Asset asset = 2; + + // Block number at which the above asset was updated in the account vault. + fixed32 block_num = 3; +} + +// SYNC NOTES +// ================================================================================================ + +// Note synchronization request. +// +// Specifies note tags that requester is interested in. The server will return the first block which +// contains a note matching `note_tags` or the chain tip. +message SyncNotesRequest { + // Block range from which to start synchronizing. + BlockRange block_range = 1; + + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 2; +} + +// Represents the result of syncing notes request. +message SyncNotesResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; + + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + // + // An MMR proof can be constructed for the leaf of index `block_header.block_num` of + // an MMR of forest `chain_tip` with this path. + primitives.MerklePath mmr_path = 3; + + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 4; +} + +// SYNC STATE +// ================================================================================================ + +// State synchronization request. +// +// Specifies state updates the requester is interested in. The server will return the first block which +// contains a note matching `note_tags` or the chain tip. And the corresponding updates to +// `account_ids` for that block range. +message SyncStateRequest { + // Last block known by the requester. The response will contain data starting from the next block, + // until the first block which contains a note of matching the requested tag, or the chain tip + // if there are no notes. + fixed32 block_num = 1; + + // Accounts' commitment to include in the response. + // + // An account commitment will be included if-and-only-if it is the latest update. Meaning it is + // possible there was an update to the account for the given range, but if it is not the latest, + // it won't be included in the response. + repeated account.AccountId account_ids = 2; + + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 3; +} + +// Represents the result of syncing state request. +message SyncStateResponse { + // Number of the latest block in the chain. + fixed32 chain_tip = 1; + + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; + + // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. + primitives.MmrDelta mmr_delta = 3; + + // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. + repeated account.AccountSummary accounts = 5; + + // List of transactions executed against requested accounts between `request.block_num + 1` and + // `response.block_header.block_num`. + repeated transaction.TransactionSummary transactions = 6; + + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 7; +} + +// SYNC STORAGE MAP +// ================================================================================================ + +// Storage map synchronization request. +// +// Allows requesters to sync storage map values for specific public accounts within a block range, +// with support for cursor-based pagination to handle large storage maps. +message SyncStorageMapsRequest { + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; + + // Account for which we want to sync storage maps. + account.AccountId account_id = 3; +} + +message SyncStorageMapsResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // The list of storage map updates. + // + // Multiple updates can be returned for a single slot index and key combination, and the one + // with a higher `block_num` is expected to be retained by the caller. + repeated StorageMapUpdate updates = 2; +} + +// Represents a single storage map update. +message StorageMapUpdate { + // Block number in which the slot was updated. + fixed32 block_num = 1; + + // Slot index ([0..255]). + uint32 slot_index = 2; + + // The storage map key. + primitives.Digest key = 3; + + // The storage map value. + primitives.Digest value = 4; +} + +// BLOCK RANGE +// ================================================================================================ + +// Represents a block range. +message BlockRange { + // Block number from which to start (inclusive). + fixed32 block_from = 1; + + // Block number up to which to check (inclusive). If not specified, checks up to the latest block. + optional fixed32 block_to = 2; +} + +// PAGINATION INFO +// ================================================================================================ + +// Represents pagination information for chunked responses. +// +// Pagination is done using block numbers as the axis, allowing requesters to request +// data in chunks by specifying block ranges and continuing from where the previous +// response left off. +// +// To request the next chunk, the requester should use `block_num + 1` from the previous response +// as the `block_from` for the next request. +message PaginationInfo { + // Current chain tip + fixed32 chain_tip = 1; + + // The block number of the last check included in this response. + // + // For chunked responses, this may be less than `request.block_range.block_to`. + // If it is less than request.block_range.block_to, the user is expected to make a subsequent request + // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + fixed32 block_num = 2; +} + +// SYNC TRANSACTIONS +// ================================================================================================ + +// Transactions synchronization request. +// +// Allows requesters to sync transactions for specific accounts within a block range. +message SyncTransactionsRequest { + // Block range from which to start synchronizing. + BlockRange block_range = 1; + + // Accounts to sync transactions for. + repeated account.AccountId account_ids = 2; +} + +// Represents the result of syncing transactions request. +message SyncTransactionsResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of transaction records. + repeated TransactionRecord transactions = 2; +} + +// Represents a transaction record. +message TransactionRecord { + // Block number in which the transaction was included. + fixed32 block_num = 1; + + // A transaction header. + transaction.TransactionHeader header = 2; } diff --git a/proto/proto/store/block_producer.proto b/proto/proto/store/block_producer.proto deleted file mode 100644 index e0218bd0fa..0000000000 --- a/proto/proto/store/block_producer.proto +++ /dev/null @@ -1,164 +0,0 @@ -// Specification of the Block Producer store RPC. -syntax = "proto3"; -package block_producer_store; - -import "google/protobuf/empty.proto"; -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// BLOCK PRODUCER STORE API -// ================================================================================================ - -// Store API for the BlockProducer component -service BlockProducer { - // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns data required to prove the next block. - rpc GetBlockInputs(BlockInputsRequest) returns (BlockInputs) {} - - // Returns the inputs for a transaction batch. - rpc GetBatchInputs(BatchInputsRequest) returns (BatchInputs) {} - - // Returns data required to validate a new transaction. - rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} -} - -// GET BLOCK INPUTS -// ================================================================================================ - -// Returns data required to prove the next block. -message BlockInputsRequest { - // IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - repeated account.AccountId account_ids = 1; - - // Nullifiers of all notes consumed by the block for which to retrieve witnesses. - // - // Due to note erasure it will generally not be possible to know the exact set of nullifiers - // a block will create, unless we pre-execute note erasure. So in practice, this set of - // nullifiers will be the set of nullifiers of all proven batches in the block, which is a - // superset of the nullifiers the block may create. - // - // However, if it is known that a certain note will be erased, it would not be necessary to - // provide a nullifier witness for it. - repeated primitives.Digest nullifiers = 2; - - // Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - repeated primitives.Digest unauthenticated_notes = 3; - - // Array of block numbers referenced by all batches in the block. - repeated fixed32 reference_blocks = 4; -} - -// Represents the result of getting block inputs. -message BlockInputs { - // A nullifier returned as a response to the `GetBlockInputs`. - message NullifierWitness { - // The nullifier. - primitives.Digest nullifier = 1; - - // The SMT proof to verify the nullifier's inclusion in the nullifier tree. - primitives.SmtOpening opening = 2; - } - // The latest block header. - blockchain.BlockHeader latest_block_header = 1; - - // Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - // the store**. - repeated note.NoteInclusionInBlockProof unauthenticated_note_proofs = 2; - - // The serialized chain MMR which includes proofs for all blocks referenced by the - // above note inclusion proofs as well as proofs for inclusion of the requested blocks - // referenced by the batches in the block. - bytes partial_block_chain = 3; - - // The state commitments of the requested accounts and their authentication paths. - repeated account.AccountWitness account_witnesses = 4; - - // The requested nullifiers and their authentication paths. - repeated NullifierWitness nullifier_witnesses = 5; -} - -// GET BATCH INPUTS -// ================================================================================================ - -// Returns the inputs for a transaction batch. -message BatchInputsRequest { - // List of unauthenticated note commitments to be queried from the database. - repeated primitives.Digest note_commitments = 1; - // Set of block numbers referenced by transactions. - repeated fixed32 reference_blocks = 2; -} - -// Represents the result of getting batch inputs. -message BatchInputs { - // The block header that the transaction batch should reference. - blockchain.BlockHeader batch_reference_block_header = 1; - - // Proof of each _found_ unauthenticated note's inclusion in a block. - repeated note.NoteInclusionInBlockProof note_proofs = 2; - - // The serialized chain MMR which includes proofs for all blocks referenced by the - // above note inclusion proofs as well as proofs for inclusion of the blocks referenced - // by the transactions in the batch. - bytes partial_block_chain = 3; -} - -// GET TRANSACTION INPUTS -// ================================================================================================ - -// Returns data required to validate a new transaction. -message TransactionInputsRequest { - // ID of the account against which a transaction is executed. - account.AccountId account_id = 1; - // Set of nullifiers consumed by this transaction. - repeated primitives.Digest nullifiers = 2; - // Set of unauthenticated note commitments to check for existence on-chain. - // - // These are notes which were not on-chain at the state the transaction was proven, - // but could by now be present. - repeated primitives.Digest unauthenticated_notes = 3; -} - -// Represents the result of getting transaction inputs. -message TransactionInputs { - // An account returned as a response to the `GetTransactionInputs`. - message AccountTransactionInputRecord { - // The account ID. - account.AccountId account_id = 1; - - // The latest account commitment, zero commitment if the account doesn't exist. - primitives.Digest account_commitment = 2; - } - - // A nullifier returned as a response to the `GetTransactionInputs`. - message NullifierTransactionInputRecord { - // The nullifier ID. - primitives.Digest nullifier = 1; - - // The block at which the nullifier has been consumed, zero if not consumed. - fixed32 block_num = 2; - } - - // Account state proof. - AccountTransactionInputRecord account_state = 1; - - // List of nullifiers that have been consumed. - repeated NullifierTransactionInputRecord nullifiers = 2; - - // List of unauthenticated notes that were not found in the database. - repeated primitives.Digest found_unauthenticated_notes = 3; - - // The node's current block height. - fixed32 block_height = 4; - - // Whether the account ID prefix is unique. Only relevant for account creation requests. - optional bool new_account_id_prefix_is_unique = 5; // TODO: Replace this with an error. When a general error message exists. -} diff --git a/proto/proto/store/ntx_builder.proto b/proto/proto/store/ntx_builder.proto deleted file mode 100644 index 15144447ba..0000000000 --- a/proto/proto/store/ntx_builder.proto +++ /dev/null @@ -1,113 +0,0 @@ -// Specification of the NTX Builder store RPC. -syntax = "proto3"; -package ntx_builder_store; - -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// NTX BUILDER STORE API -// ================================================================================================ - -// Store API for the network transaction builder component -service NtxBuilder { - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns a paginated list of unconsumed network notes. - rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} - - // Returns a paginated list of a network account's unconsumed notes up to a specified block number. - rpc GetUnconsumedNetworkNotesForAccount(UnconsumedNetworkNotesForAccountRequest) returns (UnconsumedNetworkNotes) {} - - // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - // header for executing network transactions. If the block number is not provided, the latest - // header and peaks will be retrieved. - rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} - - // Returns the latest state of a network account with the specified account prefix. - rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} -} - -// GET NETWORK ACCOUNT DETAILS BY PREFIX -// ================================================================================================ - -// Account ID prefix. -message AccountIdPrefix { - // Account ID prefix. - fixed32 account_id_prefix = 1; -} - -// Represents the result of getting network account details by prefix. -message MaybeAccountDetails { - // Account details. - optional account.AccountDetails details = 1; -} - -// GET UNCONSUMED NETWORK NOTES -// ================================================================================================ - -// Returns a list of unconsumed network notes using pagination. -message UnconsumedNetworkNotesRequest { - // An opaque token used to paginate through the notes. - // - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; -} - -// Returns a paginated list of unconsumed network notes for an account. -// -// Notes created or consumed after the specified block are excluded from the result. -message UnconsumedNetworkNotesForAccountRequest { - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - // - // Note that this token is only valid if used with the same parameters. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; - - // The network account ID prefix to filter notes by. - uint32 network_account_id_prefix = 3; - - // The block number to filter the returned notes by. - // - // Notes that are created or consumed after this block are excluded from the result. - fixed32 block_num = 4; -} - -// Represents the result of getting the unconsumed network notes. -message UnconsumedNetworkNotes { - // An opaque pagination token. - // - // Use this in your next request to get the next - // set of data. - // - // Will be null once there is no more data remaining. - optional uint64 next_token = 1; - - // The list of unconsumed network notes. - repeated note.NetworkNote notes = 2; -} - -// GET CURRENT BLOCKCHAIN DATA -// ================================================================================================ - -// Current blockchain data based on the requested block number. -message CurrentBlockchainData { - // Commitments that represent the current state according to the MMR. - repeated primitives.Digest current_peaks = 1; - // Current block header. - optional blockchain.BlockHeader current_block_header = 2; -} diff --git a/proto/proto/store/rpc.proto b/proto/proto/store/rpc.proto deleted file mode 100644 index 6ac9352255..0000000000 --- a/proto/proto/store/rpc.proto +++ /dev/null @@ -1,510 +0,0 @@ -// Specification of the store RPC. -// -// This provided access to the blockchain data to the other nodes. -syntax = "proto3"; -package rpc_store; - -import "google/protobuf/empty.proto"; -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/transaction.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// RPC STORE API -// ================================================================================================ - -// Store API for the RPC component -service Rpc { - // Returns the status info. - rpc Status(google.protobuf.Empty) returns (StoreStatus) {} - - // Returns a nullifier proof for each of the requested nullifiers. - rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - - // Returns the latest state of an account with the specified ID. - rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} - - // Returns the latest state proof of the specified account. - rpc GetAccountProof(AccountProofRequest) returns (AccountProofResponse) {} - - // Returns raw block data for the specified block number. - rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns a list of committed notes matching the provided note IDs. - rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} - - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} - - // Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - // - // requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - - // Returns info which can be used by the requester to sync up to the latest state of the chain - // for the objects (accounts, notes, nullifiers) the requester is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. requester is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the requester is fully synchronized with the chain. - // - // Each request also returns info about new notes, nullifiers etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags and nullifiers filters contain only high - // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - // additional filtering of that data on its side. - rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} - - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} -} - -// STORE STATUS -// ================================================================================================ - -// Represents the status of the store. -message StoreStatus { - // The store's running version. - string version = 1; - - // The store's status. - string status = 2; - - // Number of the latest block in the chain. - fixed32 chain_tip = 3; -} - -// GET ACCOUNT PROOF -// ================================================================================================ - -// Returns the latest state proof of the specified account. -message AccountProofRequest { - // Request the details for a public account. - message AccountDetailRequest { - // Represents a storage slot index and the associated map keys. - message StorageMapDetailRequest { - // Indirection required for use in `oneof {..}` block. - message MapKeys { - // A list of map keys associated with this storage slot. - repeated primitives.Digest map_keys = 1; - } - // Storage slot index (`[0..255]`). - uint32 slot_index = 1; - - oneof slot_data { - // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - // the response will not contain them but must be requested separately. - bool all_entries = 2; - - // A list of map keys associated with the given storage slot identified by `slot_index`. - MapKeys map_keys = 3; - } - } - - // Last known code commitment to the requester. The response will include account code - // only if its commitment is different from this value. - // - // If the field is ommiteed, the response will not include the account code. - optional primitives.Digest code_commitment = 1; - - // Last known asset vault commitment to the requester. The response will include asset vault data - // only if its commitment is different from this value. If the value is not present in the - // request, the response will not contain one either. - // If the number of to-be-returned asset entries exceed a threshold, they have to be requested - // separately, which is signaled in the response message with dedicated flag. - optional primitives.Digest asset_vault_commitment = 2; - - // Additional request per storage map. - repeated StorageMapDetailRequest storage_maps = 3; - } - - // ID of the account for which we want to get data - account.AccountId account_id = 1; - - // Optional block height at which to return the proof. - // - // Defaults to current chain tip if unspecified. - optional blockchain.BlockNumber block_num = 2; - - // Request for additional account details; valid only for public accounts. - optional AccountDetailRequest details = 3; -} - -// Represents the result of getting account proof. -message AccountProofResponse { - - message AccountDetails { - // Account header. - account.AccountHeader header = 1; - - // Account storage data - AccountStorageDetails storage_details = 2; - - // Account code; empty if code commitments matched or none was requested. - optional bytes code = 3; - - // Account asset vault data; empty if vault commitments matched or the requester - // omitted it in the request. - optional AccountVaultDetails vault_details = 4; - } - - // The block number at which the account witness was created and the account details were observed. - blockchain.BlockNumber block_num = 1; - - // Account ID, current state commitment, and SMT path. - account.AccountWitness witness = 2; - - // Additional details for public accounts. - optional AccountDetails details = 3; -} - -// Account vault details for AccountProofResponse -message AccountVaultDetails { - // A flag that is set to true if the account contains too many assets. This indicates - // to the user that `SyncAccountVault` endpoint should be used to retrieve the - // account's assets - bool too_many_assets = 1; - - // When too_many_assets == false, this will contain the list of assets in the - // account's vault - repeated primitives.Asset assets = 2; -} - -// Account storage details for AccountProofResponse -message AccountStorageDetails { - message AccountStorageMapDetails { - // Wrapper for repeated storage map entries - message MapEntries { - // Definition of individual storage entries. - message StorageMapEntry { - primitives.Digest key = 1; - primitives.Digest value = 2; - } - - repeated StorageMapEntry entries = 1; - } - // slot index of the storage map - uint32 slot_index = 1; - - // A flag that is set to `true` if the number of to-be-returned entries in the - // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - // endpoint should be used to get all storage map data. - bool too_many_entries = 2; - - // By default we provide all storage entries. - MapEntries entries = 3; - } - - // Account storage header (storage slot info for up to 256 slots) - account.AccountStorageHeader header = 1; - - // Additional data for the requested storage maps - repeated AccountStorageMapDetails map_details = 2; -} - - -// CHECK NULLIFIERS -// ================================================================================================ - -// List of nullifiers to return proofs for. -message NullifierList { - // List of nullifiers to return proofs for. - repeated primitives.Digest nullifiers = 1; -} - -// Represents the result of checking nullifiers. -message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the same position. - repeated primitives.SmtOpening proofs = 1; -} - -// SYNC NULLIFIERS -// ================================================================================================ - -// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -message SyncNullifiersRequest { - // Block number from which the nullifiers are requested (inclusive). - BlockRange block_range = 1; - - // Number of bits used for nullifier prefix. Currently the only supported value is 16. - uint32 prefix_len = 2; - - // List of nullifiers to check. Each nullifier is specified by its prefix with length equal - // to `prefix_len`. - repeated uint32 nullifiers = 3; -} - -// Represents the result of syncing nullifiers. -message SyncNullifiersResponse { - // Represents a single nullifier update. - message NullifierUpdate { - // Nullifier ID. - primitives.Digest nullifier = 1; - - // Block number. - fixed32 block_num = 2; - } - - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of nullifiers matching the prefixes specified in the request. - repeated NullifierUpdate nullifiers = 2; -} - -// SYNC STATE -// ================================================================================================ - -// State synchronization request. -// -// Specifies state updates the requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -// `account_ids` for that block range. -message SyncStateRequest { - // Last block known by the requester. The response will contain data starting from the next block, - // until the first block which contains a note of matching the requested tag, or the chain tip - // if there are no notes. - fixed32 block_num = 1; - - // Accounts' commitment to include in the response. - // - // An account commitment will be included if-and-only-if it is the latest update. Meaning it is - // possible there was an update to the account for the given range, but if it is not the latest, - // it won't be included in the response. - repeated account.AccountId account_ids = 2; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 3; -} - -// Represents the result of syncing state request. -message SyncStateResponse { - // Number of the latest block in the chain. - fixed32 chain_tip = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - primitives.MmrDelta mmr_delta = 3; - - // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - repeated account.AccountSummary accounts = 5; - - // List of transactions executed against requested accounts between `request.block_num + 1` and - // `response.block_header.block_num`. - repeated transaction.TransactionSummary transactions = 6; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 7; -} - -// SYNC ACCOUNT VAULT -// ================================================================================================ - -// Account vault synchronization request. -// -// Allows requesters to sync asset values for specific public accounts within a block range. -message SyncAccountVaultRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; - - // Account for which we want to sync asset vault. - account.AccountId account_id = 2; -} - -message SyncAccountVaultResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of asset updates for the account. - // - // Multiple updates can be returned for a single asset, and the one with a higher `block_num` - // is expected to be retained by the caller. - repeated AccountVaultUpdate updates = 2; -} - -message AccountVaultUpdate { - // Vault key associated with the asset. - primitives.Digest vault_key = 1; - - // Asset value related to the vault key. - // If not present, the asset was removed from the vault. - optional primitives.Asset asset = 2; - - // Block number at which the above asset was updated in the account vault. - fixed32 block_num = 3; -} - -// SYNC NOTES -// ================================================================================================ - -// Note synchronization request. -// -// Specifies note tags that requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. -message SyncNotesRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 2; -} - -// Represents the result of syncing notes request. -message SyncNotesResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - // - // An MMR proof can be constructed for the leaf of index `block_header.block_num` of - // an MMR of forest `chain_tip` with this path. - primitives.MerklePath mmr_path = 3; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 4; -} - -// SYNC STORAGE MAP -// ================================================================================================ - -// Storage map synchronization request. -// -// Allows requesters to sync storage map values for specific public accounts within a block range, -// with support for cursor-based pagination to handle large storage maps. -message SyncStorageMapsRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; - - // Account for which we want to sync storage maps. - account.AccountId account_id = 3; -} - -message SyncStorageMapsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // The list of storage map updates. - // - // Multiple updates can be returned for a single slot index and key combination, and the one - // with a higher `block_num` is expected to be retained by the caller. - repeated StorageMapUpdate updates = 2; -} - -// Represents a single storage map update. -message StorageMapUpdate { - // Block number in which the slot was updated. - fixed32 block_num = 1; - - // Slot index ([0..255]). - uint32 slot_index = 2; - - // The storage map key. - primitives.Digest key = 3; - - // The storage map value. - primitives.Digest value = 4; -} - -// BLOCK RANGE -// ================================================================================================ - -// Represents a block range. -message BlockRange { - // Block number from which to start (inclusive). - fixed32 block_from = 1; - - // Block number up to which to check (inclusive). If not specified, checks up to the latest block. - optional fixed32 block_to = 2; -} - -// PAGINATION INFO -// ================================================================================================ - -// Represents pagination information for chunked responses. -// -// Pagination is done using block numbers as the axis, allowing requesters to request -// data in chunks by specifying block ranges and continuing from where the previous -// response left off. -// -// To request the next chunk, the requester should use `block_num + 1` from the previous response -// as the `block_from` for the next request. -message PaginationInfo { - // Current chain tip - fixed32 chain_tip = 1; - - // The block number of the last check included in this response. - // - // For chunked responses, this may be less than `request.block_range.block_to`. - // If it is less than request.block_range.block_to, the user is expected to make a subsequent request - // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - fixed32 block_num = 2; -} - -// SYNC TRANSACTIONS -// ================================================================================================ - -// Transactions synchronization request. -// -// Allows requesters to sync transactions for specific accounts within a block range. -message SyncTransactionsRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; - - // Accounts to sync transactions for. - repeated account.AccountId account_ids = 2; -} - -// Represents the result of syncing transactions request. -message SyncTransactionsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of transaction records. - repeated TransactionRecord transactions = 2; -} - -// Represents a transaction record. -message TransactionRecord { - // Block number in which the transaction was included. - fixed32 block_num = 1; - - // A transaction header. - transaction.TransactionHeader header = 2; -} diff --git a/proto/proto/store/shared.proto b/proto/proto/store/shared.proto deleted file mode 100644 index 1d162087ef..0000000000 --- a/proto/proto/store/shared.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Shared messages for the store RPC. -syntax = "proto3"; -package shared; - -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/transaction.proto"; -import "types/primitives.proto"; -import "types/note.proto"; -import "google/protobuf/empty.proto"; - -// GET BLOCK HEADER BY NUMBER -// ================================================================================================ - -// Returns the block header corresponding to the requested block number, as well as the merkle -// path and current forest which validate the block's inclusion in the chain. -// -// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -message BlockHeaderByNumberRequest { - // The target block height, defaults to latest if not provided. - optional uint32 block_num = 1; - // Whether or not to return authentication data for the block header. - optional bool include_mmr_proof = 2; -} - -// Represents the result of getting a block header by block number. -message BlockHeaderByNumberResponse { - // The requested block header. - blockchain.BlockHeader block_header = 1; - - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - optional primitives.MerklePath mmr_path = 2; - - // Current chain length. - optional fixed32 chain_length = 3; -} - -// GET NOTE SCRIPT BY ROOT -// ================================================================================================ - -// Represents a note script or nothing. -message MaybeNoteScript { - // The script for a note by its root. - optional note.NoteScript script = 1; -} diff --git a/proto/src/lib.rs b/proto/src/lib.rs index 14ded322fb..8e8440d19d 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -41,14 +41,6 @@ pub fn store_block_producer_api_descriptor() -> FileDescriptorSet { .expect("bytes should be a valid file descriptor created by build.rs") } -/// Returns the Protobuf file descriptor for the store shared API. -#[cfg(feature = "internal")] -pub fn store_shared_api_descriptor() -> FileDescriptorSet { - let bytes = include_bytes!(concat!(env!("OUT_DIR"), "/", "store_shared_file_descriptor.bin")); - FileDescriptorSet::decode(&bytes[..]) - .expect("bytes should be a valid file descriptor created by build.rs") -} - /// Returns the Protobuf file descriptor for the block-producer API. #[cfg(feature = "internal")] pub fn block_producer_api_descriptor() -> FileDescriptorSet { From 4b4030a6e42bd97146101a7eed3a19f64c7251fc Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 9 Dec 2025 08:13:38 +0200 Subject: [PATCH 040/125] feat: configurable mempool tx capacity (#1433) --- CHANGELOG.md | 16 ++++++---- bin/node/src/commands/block_producer.rs | 5 ++++ bin/node/src/commands/bundled.rs | 1 + bin/node/src/commands/mod.rs | 34 ++++++++++++++++++---- crates/block-producer/src/errors.rs | 3 ++ crates/block-producer/src/lib.rs | 18 ++++++++++++ crates/block-producer/src/mempool/mod.rs | 19 +++++++++++- crates/block-producer/src/mempool/nodes.rs | 8 +++++ crates/block-producer/src/server/mod.rs | 5 ++++ 9 files changed, 97 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c5d82b346..8e71be2d61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,16 @@ ## v0.13.0 (TBD) +### Enhancements + +- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). +- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). +- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). +- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). +- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). +- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). +- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). + ### Changes - [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). @@ -10,14 +20,8 @@ - Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). -- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). -- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). -- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). -- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). -- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). -- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). - [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). ### Fixes diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index f65097e346..c099a70024 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -95,6 +95,7 @@ impl BlockProducerCommand { max_batches_per_block: block_producer.max_batches_per_block, production_checkpoint: Arc::new(Barrier::new(1)), grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() .await @@ -109,6 +110,8 @@ impl BlockProducerCommand { #[cfg(test)] mod tests { + use std::num::NonZeroUsize; + use url::Url; use super::*; @@ -130,6 +133,7 @@ mod tests { batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, max_batches_per_block: miden_objects::MAX_BATCHES_PER_BLOCK + 1, // Invalid value + mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, grpc_timeout: Duration::from_secs(10), @@ -155,6 +159,7 @@ mod tests { * limit * (should fail) */ max_batches_per_block: 8, + mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, grpc_timeout: Duration::from_secs(10), diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 1ea4129d35..4e2e769720 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -215,6 +215,7 @@ impl BundledCommand { max_txs_per_batch: block_producer.max_txs_per_batch, production_checkpoint: checkpoint, grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() .await diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 9d99a46ac2..3193cfd2c4 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,6 +1,12 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_node_block_producer::{DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +use miden_node_block_producer::{ + DEFAULT_BATCH_INTERVAL, + DEFAULT_BLOCK_INTERVAL, + DEFAULT_MAX_BATCHES_PER_BLOCK, + DEFAULT_MAX_TXS_PER_BATCH, +}; use url::Url; pub mod block_producer; @@ -24,9 +30,8 @@ const ENV_ENABLE_OTEL: &str = "MIDEN_NODE_ENABLE_OTEL"; const ENV_GENESIS_CONFIG_FILE: &str = "MIDEN_GENESIS_CONFIG_FILE"; const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; +const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; -const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(5); -const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(2); const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -89,10 +94,29 @@ pub struct BlockProducerConfig { pub block_prover_url: Option, /// The number of transactions per batch. - #[arg(long = "max-txs-per-batch", env = ENV_MAX_TXS_PER_BATCH, value_name = "NUM", default_value_t = DEFAULT_MAX_TXS_PER_BATCH)] + #[arg( + long = "max-txs-per-batch", + env = ENV_MAX_TXS_PER_BATCH, + value_name = "NUM", + default_value_t = DEFAULT_MAX_TXS_PER_BATCH + )] pub max_txs_per_batch: usize, /// Maximum number of batches per block. - #[arg(long = "max-batches-per-block", env = ENV_MAX_BATCHES_PER_BLOCK, value_name = "NUM", default_value_t = DEFAULT_MAX_BATCHES_PER_BLOCK)] + #[arg( + long = "max-batches-per-block", + env = ENV_MAX_BATCHES_PER_BLOCK, + value_name = "NUM", + default_value_t = DEFAULT_MAX_BATCHES_PER_BLOCK + )] pub max_batches_per_block: usize, + + /// Maximum number of uncommitted transactions allowed in the mempool. + #[arg( + long = "mempool.tx-capacity", + default_value_t = miden_node_block_producer::DEFAULT_MEMPOOL_TX_CAPACITY, + env = ENV_MEMPOOL_TX_CAPACITY, + value_name = "NUM" + )] + mempool_tx_capacity: NonZeroUsize, } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index e60bdc9995..74064b0387 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -128,6 +128,9 @@ pub enum AddTransactionError { expired_at: BlockNumber, limit: BlockNumber, }, + + #[error("the mempool is at capacity")] + CapacityExceeded, } impl From for AddTransactionError { diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 81fc5b83eb..e85fcb3a51 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -50,6 +50,24 @@ const SERVER_MEMPOOL_EXPIRATION_SLACK: u32 = 2; /// The interval at which to update the cached mempool statistics. const CACHED_MEMPOOL_STATS_UPDATE_INTERVAL: Duration = Duration::from_secs(5); +/// How often a block is created. +pub const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(5); + +/// How often a batch is created. +pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(2); + +/// The default transaction capacity of the mempool. +/// +/// The value is selected such that all transactions should approximately be processed within one +/// minutes with a block time of 5s. +#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( + DEFAULT_MAX_BATCHES_PER_BLOCK + * DEFAULT_MAX_TXS_PER_BATCH + * (Duration::from_secs(60).div_duration_f32(DEFAULT_BLOCK_INTERVAL)) as usize, +) +.unwrap(); + const _: () = assert!( DEFAULT_MAX_BATCHES_PER_BLOCK <= miden_objects::MAX_BATCHES_PER_BLOCK, "Server constraint cannot exceed the protocol's constraint" diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 15c63dddd9..659b21f727 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -57,7 +57,12 @@ use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::{AddTransactionError, VerifyTxError}; use crate::mempool::budget::BudgetStatus; use crate::mempool::nodes::{BlockNode, Node, NodeId, ProposedBatchNode, TransactionNode}; -use crate::{COMPONENT, SERVER_MEMPOOL_EXPIRATION_SLACK, SERVER_MEMPOOL_STATE_RETENTION}; +use crate::{ + COMPONENT, + DEFAULT_MEMPOOL_TX_CAPACITY, + SERVER_MEMPOOL_EXPIRATION_SLACK, + SERVER_MEMPOOL_STATE_RETENTION, +}; mod budget; pub use budget::{BatchBudget, BlockBudget}; @@ -102,6 +107,13 @@ pub struct MempoolConfig { /// guarantees that the mempool can verify the data against the additional changes so long as /// the data was authenticated against one of the retained blocks. pub state_retention: NonZeroUsize, + + /// The maximum number of uncommitted transactions allowed in the mempool at once. + /// + /// The mempool will reject transactions once it is at capacity. + /// + /// Transactions in batches and uncommitted blocks _do count_ towards this. + pub tx_capacity: NonZeroUsize, } impl Default for MempoolConfig { @@ -111,6 +123,7 @@ impl Default for MempoolConfig { batch_budget: BatchBudget::default(), expiration_slack: SERVER_MEMPOOL_EXPIRATION_SLACK, state_retention: SERVER_MEMPOOL_STATE_RETENTION, + tx_capacity: DEFAULT_MEMPOOL_TX_CAPACITY, } } } @@ -195,6 +208,10 @@ impl Mempool { &mut self, tx: Arc, ) -> Result { + if self.nodes.uncommitted_tx_count() >= self.config.tx_capacity.get() { + return Err(AddTransactionError::CapacityExceeded); + } + self.authentication_staleness_check(tx.authentication_height())?; self.expiration_check(tx.expires_at())?; diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index 8b9b4ca476..a551c77818 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -319,8 +319,16 @@ impl Nodes { pub(super) fn inject_telemetry(&self, span: &tracing::Span) { use miden_node_utils::tracing::OpenTelemetrySpanExt; + span.set_attribute("mempool.transactions.uncommitted", self.uncommitted_tx_count()); span.set_attribute("mempool.transactions.unbatched", self.txs.len()); span.set_attribute("mempool.batches.proposed", self.proposed_batches.len()); span.set_attribute("mempool.batches.proven", self.proven_batches.len()); } + + pub(super) fn uncommitted_tx_count(&self) -> usize { + self.txs.len() + + self.proposed_batches.values().map(|b| b.0.len()).sum::() + + self.proven_batches.values().map(|b| b.txs.len()).sum::() + + self.proposed_block.as_ref().map(|b| b.1.txs.len()).unwrap_or_default() + } } diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 9b74a32573..c523fee6a4 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::net::SocketAddr; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; @@ -73,6 +74,9 @@ pub struct BlockProducer { /// /// If the handler takes longer than this duration, the server cancels the call. pub grpc_timeout: Duration, + + /// The maximum number of inflight transactions allowed in the mempool at once. + pub mempool_tx_capacity: NonZeroUsize, } impl BlockProducer { @@ -135,6 +139,7 @@ impl BlockProducer { ..BatchBudget::default() }, block_budget: BlockBudget { batches: self.max_batches_per_block }, + tx_capacity: self.mempool_tx_capacity, ..Default::default() }; let mempool = Mempool::shared(chain_tip, mempool); From e6623344e4ddd908c8159993ec23b608f8350111 Mon Sep 17 00:00:00 2001 From: igamigo Date: Tue, 9 Dec 2025 10:51:37 -0300 Subject: [PATCH 041/125] feat(RPC): skip content negotiation on preflight requests (#1436) --- CHANGELOG.md | 1 + crates/rpc/src/server/accept.rs | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e71be2d61..72863d0e10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). - Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). +- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). diff --git a/crates/rpc/src/server/accept.rs b/crates/rpc/src/server/accept.rs index 4e0e1d06d9..b08028b223 100644 --- a/crates/rpc/src/server/accept.rs +++ b/crates/rpc/src/server/accept.rs @@ -224,12 +224,20 @@ where } fn call(&mut self, request: http::Request) -> Self::Future { + // Skip negotiation entirely for CORS preflight/non-gRPC requests. + // + // Browsers often automatically perform an `OPTIONS` check _before_ the client + // SDK can inject the appropriate `ACCEPT` header, causing a rejection. + // Since an `OPTIONS` request does nothing its safe for us to simply allow them. + if request.method() == http::Method::OPTIONS { + return self.inner.call(request).boxed(); + } + // Determine if this RPC method requires the `genesis` parameter. let path = request.uri().path(); let method_name = path.rsplit('/').next().unwrap_or_default(); - let requires_genesis = self.verifier.require_genesis_methods.contains(&method_name); - dbg!(request.headers()); + let requires_genesis = self.verifier.require_genesis_methods.contains(&method_name); // If `genesis` is required but the header is missing entirely, reject early. let Some(header) = request.headers().get(ACCEPT) else { From 75d42e13a01bd7b05a2378f16aba4a71fa5146ed Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 10 Dec 2025 10:25:59 -0300 Subject: [PATCH 042/125] chore(monitor): improve card naming (#1441) --- CHANGELOG.md | 1 + bin/network-monitor/README.md | 10 +- bin/network-monitor/assets/index.html | 114 +++++++++-------------- bin/network-monitor/src/counter.rs | 4 +- bin/network-monitor/src/monitor/tasks.rs | 6 +- bin/network-monitor/src/status.rs | 6 +- 6 files changed, 58 insertions(+), 83 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72863d0e10..b2f9d9aabf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). - Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). - The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). +- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). ### Changes diff --git a/bin/network-monitor/README.md b/bin/network-monitor/README.md index 2bab71c902..abde03a7e9 100644 --- a/bin/network-monitor/README.md +++ b/bin/network-monitor/README.md @@ -175,18 +175,18 @@ The monitor application provides real-time status monitoring for the following M - Transaction and note ID tracking from successful mints - Automated testing on a configurable interval to verify faucet functionality -### Counter Increment Service -- **Service Health**: End-to-end transaction submission for counter increment +### Local Transactions (Counter Increment) +- **Service Health**: End-to-end local transaction submission for counter increment - **Metrics**: - Success/Failure counts for increment transactions - Last TX ID with copy-to-clipboard -### Counter Tracking Service -- **Service Health**: Real-time monitoring of counter value changes +### Network Transactions (Counter Tracking) +- **Service Health**: Real-time monitoring of on-chain counter value changes - **Metrics**: - Current network account counter value (queried from RPC periodically) - Expected counter value based on successful increments sent - - Pending increments: How many transactions are queued/unprocessed + - Pending notes: How many transactions are queued/unprocessed - Last updated timestamp ## User Interface diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index 2b05e52621..f3fd32bc4d 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -72,79 +72,53 @@ } } - function groupServices(services) { - const grouped = {}; - const result = []; + // Merge Remote Prover status and test entries into a single card per prover. + function mergeProverStatusAndTests(services) { + const testsByName = new Map(); + const merged = []; + const usedTests = new Set(); - // First pass: collect all services and group prover-related ones services.forEach(service => { - if (service.name.startsWith('Remote Prover (') && service.name.endsWith(')')) { - // Extract prover name from "Remote Prover (Prover-1)" -> "Prover-1" - const match = service.name.match(/Remote Prover \((.*)\)/); - if (match) { - const proverName = match[1]; - if (!grouped[proverName]) { - grouped[proverName] = { - name: proverName, - status: service.status, - last_checked: service.last_checked, - error: service.error, - details: service.details, - testDetails: null, - testStatus: null, - testError: null - }; - } else { - // Update if this status is newer - if (service.last_checked > grouped[proverName].last_checked) { - grouped[proverName].status = service.status; - grouped[proverName].last_checked = service.last_checked; - grouped[proverName].error = service.error; - grouped[proverName].details = service.details; - } - } - } - } else if (service.details && service.details.RemoteProverTest) { - // This is a prover test result - find corresponding prover group - const proverName = service.name; - if (!grouped[proverName]) { - grouped[proverName] = { - name: proverName, - status: service.status, - last_checked: service.last_checked, - error: service.error, - details: null, - testDetails: service.details.RemoteProverTest, - testStatus: service.status, - testError: service.error - }; - } else { - grouped[proverName].testDetails = service.details.RemoteProverTest; - grouped[proverName].testStatus = service.status; - grouped[proverName].testError = service.error; - - // Combine status - if either test or status check is unhealthy, mark as unhealthy - if (service.status === 'Unhealthy' || grouped[proverName].status === 'Unhealthy') { - grouped[proverName].status = 'Unhealthy'; - } + if (service.details && service.details.RemoteProverTest) { + testsByName.set(service.name, service); + } + }); - // Use the most recent timestamp - if (service.last_checked > grouped[proverName].last_checked) { - grouped[proverName].last_checked = service.last_checked; - } + services.forEach(service => { + if (service.details && service.details.RemoteProverStatus) { + const test = testsByName.get(service.name); + if (test) { + usedTests.add(service.name); } - } else { - // Regular service (RPC, etc.) - result.push(service); + merged.push({ + ...service, + testDetails: test?.details?.RemoteProverTest ?? null, + testStatus: test?.status ?? null, + testError: test?.error ?? null + }); + } else if (!(service.details && service.details.RemoteProverTest)) { + // Non-prover entries pass through unchanged + merged.push(service); } }); - // Add all grouped prover services - Object.values(grouped).forEach(proverService => { - result.push(proverService); + // Add orphaned tests (in case a test arrives before a status) + testsByName.forEach((test, name) => { + if (!usedTests.has(name)) { + merged.push({ + name, + status: test.status, + last_checked: test.last_checked, + error: test.error, + details: null, + testDetails: test.details.RemoteProverTest, + testStatus: test.status, + testError: test.error + }); + } }); - return result; + return merged; } function updateDisplay() { @@ -159,8 +133,8 @@ const lastUpdateTime = new Date(statusData.last_updated * 1000); lastUpdated.textContent = lastUpdateTime.toLocaleString(); - // Group and process services - const processedServices = groupServices(statusData.services); + // Group remote prover status + test into single cards + const processedServices = mergeProverStatusAndTests(statusData.services); // Count healthy vs unhealthy services const healthyServices = processedServices.filter(s => s.status === 'Healthy').length; @@ -270,7 +244,7 @@ ` : ''} ${details.FaucetTest ? `
- Faucet Testing: + Faucet:
Success Rate: @@ -354,7 +328,7 @@ ` : ''} ${details.NtxIncrement ? `
- Counter Increment: + Local Transactions:
Success Rate: @@ -378,7 +352,7 @@ ` : ''} ${details.NtxTracking ? `
- Counter Tracking: + Network Transactions:
Current Value: @@ -392,7 +366,7 @@ ` : ''} ${details.NtxTracking.pending_increments !== null && details.NtxTracking.pending_increments !== undefined ? `
- Pending Increments: + Pending Notes: ${details.NtxTracking.pending_increments}
` : ''} diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index bc258cb846..b44e17dc99 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -300,7 +300,7 @@ fn build_increment_status(details: &IncrementDetails, last_error: Option }; ServiceStatus { - name: "Counter Increment".to_string(), + name: "Local Transactions".to_string(), status, last_checked: crate::monitor::tasks::current_unix_timestamp_secs(), error: last_error, @@ -438,7 +438,7 @@ fn build_tracking_status( }; ServiceStatus { - name: "Counter Tracking".to_string(), + name: "Network Transactions".to_string(), status, last_checked: crate::monitor::tasks::current_unix_timestamp_secs(), error: last_error, diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 233994afbb..6c8063a860 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -91,7 +91,7 @@ impl Tasks { let mut prover_rxs = Vec::new(); for (i, prover_url) in config.remote_prover_urls.iter().enumerate() { - let name = format!("Prover-{}", i + 1); + let name = format!("Remote Prover ({})", i + 1); let mut remote_prover = ClientBuilder::new(prover_url.clone()) .with_tls() @@ -253,7 +253,7 @@ impl Tasks { // Create initial increment status let initial_increment_status = ServiceStatus { - name: "Counter Increment".to_string(), + name: "Local Transactions".to_string(), status: crate::status::Status::Unknown, last_checked: current_time, error: None, @@ -266,7 +266,7 @@ impl Tasks { // Create initial tracking status let initial_tracking_status = ServiceStatus { - name: "Counter Tracking".to_string(), + name: "Network Transactions".to_string(), status: crate::status::Status::Unknown, last_checked: current_time, error: None, diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 9107b3d134..1c4b6e326b 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -431,7 +431,7 @@ pub async fn run_remote_prover_status_task( #[instrument(target = COMPONENT, name = "check-status.remote-prover", skip_all, ret(level = "info"))] pub(crate) async fn check_remote_prover_status( remote_prover: &mut miden_node_proto::clients::RemoteProverProxyStatusClient, - name: String, + display_name: String, url: String, current_time: u64, ) -> ServiceStatus { @@ -452,7 +452,7 @@ pub(crate) async fn check_remote_prover_status( }; ServiceStatus { - name: format!("Remote Prover ({name})"), + name: display_name.clone(), status: overall_health, last_checked: current_time, error: None, @@ -460,7 +460,7 @@ pub(crate) async fn check_remote_prover_status( } }, Err(e) => ServiceStatus { - name: format!("Remote Prover ({name})"), + name: display_name, status: Status::Unhealthy, last_checked: current_time, error: Some(e.to_string()), From b078a4c0aeb9aa06bc18328d29615af317d4db0b Mon Sep 17 00:00:00 2001 From: Philipp Gackstatter Date: Thu, 11 Dec 2025 06:19:22 +0700 Subject: [PATCH 043/125] chore: update node to use named storage slots (#1440) --- Cargo.lock | 52 +++++--- .../src/assets/counter_program.masm | 9 +- bin/network-monitor/src/deploy/counter.rs | 24 ++-- bin/stress-test/src/seeding/mod.rs | 8 +- crates/ntx-builder/src/transaction.rs | 10 +- crates/proto/src/domain/account.rs | 69 +++++------ crates/proto/src/errors/mod.rs | 6 +- crates/proto/src/generated/account.rs | 9 +- crates/proto/src/generated/rpc.rs | 22 ++-- crates/store/Cargo.toml | 4 +- .../db/migrations/2025062000000_setup/up.sql | 5 +- crates/store/src/db/models/conv.rs | 29 +++-- .../store/src/db/models/queries/accounts.rs | 56 ++++----- crates/store/src/db/schema.rs | 4 +- crates/store/src/db/tests.rs | 111 +++++++++++++----- crates/store/src/genesis/config/mod.rs | 4 +- crates/store/src/genesis/config/tests.rs | 6 +- crates/store/src/server/rpc_api.rs | 2 +- crates/store/src/state.rs | 12 +- proto/proto/rpc.proto | 15 +-- proto/proto/types/account.proto | 7 +- 21 files changed, 281 insertions(+), 183 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a0360c0f3..1aaa00589f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.16", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -1336,7 +1347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -1680,6 +1691,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -2110,7 +2124,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -2485,7 +2499,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2581,7 +2595,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "fs-err", "miden-assembly", @@ -2945,7 +2959,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2991,7 +3005,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "proc-macro2", "quote", @@ -3097,7 +3111,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3115,7 +3129,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "miden-lib", "miden-objects", @@ -3128,7 +3142,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#463c116812a7e008a8333ca5e7ec982190fae13c" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" dependencies = [ "miden-objects", "miden-tx", @@ -3744,7 +3758,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3780,7 +3794,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -3870,7 +3884,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -3902,7 +3916,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.1", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] @@ -4233,7 +4247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ "heck 0.5.0", - "itertools 0.14.0", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -4255,7 +4269,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.10.5", "proc-macro2", "quote", "syn 2.0.111", @@ -4685,7 +4699,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4698,7 +4712,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -5331,7 +5345,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6349,7 +6363,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] diff --git a/bin/network-monitor/src/assets/counter_program.masm b/bin/network-monitor/src/assets/counter_program.masm index 60cd146bad..9daad60720 100644 --- a/bin/network-monitor/src/assets/counter_program.masm +++ b/bin/network-monitor/src/assets/counter_program.masm @@ -11,6 +11,9 @@ use.miden::tx use.std::sys +# The slot in this component's storage layout where the counter is stored. +const COUNTER_SLOT = word("miden::monitor::counter_contract::counter") + # Increment function with note authentication # => [] export.increment @@ -27,13 +30,13 @@ export.increment assert.err="Note sender not authorized" drop drop # => [] - push.0 exec.active_account::get_item + push.COUNTER_SLOT[0..2] exec.active_account::get_item # => [count, 0, 0, 0] push.1 add # => [count+1] - push.0 exec.native_account::set_item + push.COUNTER_SLOT[0..2] exec.native_account::set_item # => [count, 0, 0, 0] dropw @@ -43,7 +46,7 @@ end # Get the counter (no auth required) # => [count] export.get_count - push.0 exec.active_account::get_item + push.COUNTER_SLOT[0..2] exec.active_account::get_item # => [count, 0, 0, 0] exec.sys::truncate_stack diff --git a/bin/network-monitor/src/deploy/counter.rs b/bin/network-monitor/src/deploy/counter.rs index fa62b15754..c7720fa0df 100644 --- a/bin/network-monitor/src/deploy/counter.rs +++ b/bin/network-monitor/src/deploy/counter.rs @@ -14,12 +14,24 @@ use miden_objects::account::{ AccountStorageMode, AccountType, StorageSlot, + StorageSlotName, }; +use miden_objects::utils::sync::LazyLock; use miden_objects::{Felt, FieldElement, Word}; use tracing::instrument; use crate::COMPONENT; +static OWNER_SLOT_NAME: LazyLock = LazyLock::new(|| { + StorageSlotName::new("miden::monitor::counter_contract::owner") + .expect("storage slot name should be valid") +}); + +static COUNTER_SLOT_NAME: LazyLock = LazyLock::new(|| { + StorageSlotName::new("miden::monitor::counter_contract::counter") + .expect("storage slot name should be valid") +}); + /// Create a counter program account with custom MASM script. #[instrument(target = COMPONENT, name = "create-counter-account", skip_all, ret(level = "debug"))] pub fn create_counter_account(owner_account_id: AccountId) -> Result { @@ -31,14 +43,12 @@ pub fn create_counter_account(owner_account_id: AccountId) -> Result { let owner_account_id_prefix = owner_account_id.prefix().as_felt(); let owner_account_id_suffix = owner_account_id.suffix(); - let owner_id_slot = StorageSlot::Value(Word::from([ - Felt::ZERO, - Felt::ZERO, - owner_account_id_suffix, - owner_account_id_prefix, - ])); + let owner_id_slot = StorageSlot::with_value( + OWNER_SLOT_NAME.clone(), + Word::from([Felt::ZERO, Felt::ZERO, owner_account_id_suffix, owner_account_id_prefix]), + ); - let counter_slot = StorageSlot::Value(Word::empty()); + let counter_slot = StorageSlot::with_value(COUNTER_SLOT_NAME.clone(), Word::empty()); let account_code = AccountComponent::compile( script, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index ea845572b4..f1cd50617d 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -23,6 +23,7 @@ use miden_objects::account::{ AccountBuilder, AccountDelta, AccountId, + AccountStorage, AccountStorageMode, AccountType, }; @@ -439,10 +440,13 @@ fn create_emit_note_tx( ) -> ProvenTransaction { let initial_account_hash = faucet.commitment(); - let slot = faucet.storage().get_item(2).unwrap(); + let slot = faucet.storage().get_item(BasicFungibleFaucet::metadata_slot_name()).unwrap(); faucet .storage_mut() - .set_item(0, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) + .set_item( + AccountStorage::faucet_metadata_slot(), + [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into(), + ) .unwrap(); faucet.increment_nonce(ONE).unwrap(); diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/transaction.rs index a61d3244e5..b955a91019 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/transaction.rs @@ -3,7 +3,13 @@ use std::num::NonZeroUsize; use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::account::{Account, AccountId, PartialAccount, StorageMapWitness, StorageSlot}; +use miden_objects::account::{ + Account, + AccountId, + PartialAccount, + StorageMapWitness, + StorageSlotContent, +}; use miden_objects::asset::{AssetVaultKey, AssetWitness}; use miden_objects::block::{BlockHeader, BlockNumber}; use miden_objects::note::{Note, NoteScript}; @@ -384,7 +390,7 @@ impl DataStore for NtxDataStore { let mut map_witness = None; for slot in self.account.storage().slots() { - if let StorageSlot::Map(map) = slot { + if let StorageSlotContent::Map(map) = slot.content() { if map.root() == map_root { map_witness = Some(map.open(&map_key)); } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 011957209f..f442e01157 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -8,6 +8,7 @@ use miden_objects::account::{ AccountId, AccountStorageHeader, StorageMap, + StorageSlotName, StorageSlotType, }; use miden_objects::asset::{Asset, AssetVault}; @@ -169,14 +170,15 @@ impl TryFrom for AccountStorageHeader { let items = slots .into_iter() .map(|slot| { + let slot_name = StorageSlotName::new(slot.slot_name)?; let slot_type = storage_slot_type_from_raw(slot.slot_type)?; let commitment = slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; - Ok((slot_type, commitment)) + Ok((slot_name, slot_type, commitment)) }) .collect::, ConversionError>>()?; - Ok(AccountStorageHeader::new(items)) + Ok(AccountStorageHeader::new(items)?) } } @@ -189,12 +191,12 @@ impl TryFrom value: proto::rpc::account_storage_details::AccountStorageMapDetails, ) -> Result { let proto::rpc::account_storage_details::AccountStorageMapDetails { - slot_index, + slot_name, too_many_entries, entries, } = value; - let slot_index = slot_index.try_into().map_err(ConversionError::TryFromIntError)?; + let slot_name = StorageSlotName::new(slot_name)?; // Extract map_entries from the MapEntries message let map_entries = if let Some(entries) = entries { @@ -221,17 +223,13 @@ impl TryFrom Vec::new() }; - Ok(Self { - slot_index, - too_many_entries, - map_entries, - }) + Ok(Self { slot_name, too_many_entries, map_entries }) } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { - pub slot_index: u8, + pub slot_name: StorageSlotName, pub slot_data: SlotData, } @@ -244,14 +242,14 @@ impl TryFrom Result { let proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest { - slot_index, + slot_name, slot_data, } = value; - let slot_index = slot_index.try_into()?; + let slot_name = StorageSlotName::new(slot_name)?; let slot_data = slot_data.ok_or(proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; - Ok(StorageMapRequest { slot_index, slot_data }) + Ok(StorageMapRequest { slot_name, slot_data }) } } @@ -335,9 +333,12 @@ impl From for proto::account::AccountStorageHeader { fn from(value: AccountStorageHeader) -> Self { let slots = value .slots() - .map(|(slot_type, slot_value)| proto::account::account_storage_header::StorageSlot { - slot_type: storage_slot_type_to_raw(*slot_type), - commitment: Some(proto::primitives::Digest::from(*slot_value)), + .map(|(slot_name, slot_type, slot_value)| { + proto::account::account_storage_header::StorageSlot { + slot_name: slot_name.to_string(), + slot_type: storage_slot_type_to_raw(*slot_type), + commitment: Some(proto::primitives::Digest::from(*slot_value)), + } }) .collect(); @@ -412,7 +413,7 @@ impl From for proto::rpc::AccountVaultDetails { #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageMapDetails { - pub slot_index: u8, + pub slot_name: StorageSlotName, pub too_many_entries: bool, pub map_entries: Vec<(Word, Word)>, } @@ -420,38 +421,42 @@ pub struct AccountStorageMapDetails { impl AccountStorageMapDetails { const MAX_RETURN_ENTRIES: usize = 1000; - pub fn new(slot_index: u8, slot_data: SlotData, storage_map: &StorageMap) -> Self { + pub fn new(slot_name: StorageSlotName, slot_data: SlotData, storage_map: &StorageMap) -> Self { match slot_data { - SlotData::All => Self::from_all_entries(slot_index, storage_map), - SlotData::MapKeys(keys) => Self::from_specific_keys(slot_index, &keys[..], storage_map), + SlotData::All => Self::from_all_entries(slot_name, storage_map), + SlotData::MapKeys(keys) => Self::from_specific_keys(slot_name, &keys[..], storage_map), } } - fn from_all_entries(slot_index: u8, storage_map: &StorageMap) -> Self { + fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_index) + Self::too_many_entries(slot_name) } else { let map_entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); Self { - slot_index, + slot_name, too_many_entries: false, map_entries, } } } - fn from_specific_keys(slot_index: u8, keys: &[Word], storage_map: &StorageMap) -> Self { + fn from_specific_keys( + slot_name: StorageSlotName, + keys: &[Word], + storage_map: &StorageMap, + ) -> Self { if keys.len() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_index) + Self::too_many_entries(slot_name) } else { // TODO For now, we return all entries instead of specific keys with proofs - Self::from_all_entries(slot_index, storage_map) + Self::from_all_entries(slot_name, storage_map) } } - pub fn too_many_entries(slot_index: u8) -> Self { + pub fn too_many_entries(slot_name: StorageSlotName) -> Self { Self { - slot_index, + slot_name, too_many_entries: true, map_entries: Vec::new(), } @@ -623,11 +628,7 @@ impl From fn from(value: AccountStorageMapDetails) -> Self { use proto::rpc::account_storage_details::account_storage_map_details; - let AccountStorageMapDetails { - slot_index, - too_many_entries, - map_entries, - } = value; + let AccountStorageMapDetails { slot_name, too_many_entries, map_entries } = value; let entries = Some(account_storage_map_details::MapEntries { entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { @@ -639,7 +640,7 @@ impl From }); Self { - slot_index: u32::from(slot_index), + slot_name: slot_name.to_string(), too_many_entries, entries, } diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index 5e461315cd..2d52b11406 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -5,7 +5,7 @@ use std::num::TryFromIntError; pub use miden_node_grpc_error_macro::GrpcError; use miden_objects::crypto::merkle::{SmtLeafError, SmtProofError}; use miden_objects::utils::DeserializationError; -use miden_objects::{AssetError, FeeError}; +use miden_objects::{AccountError, AssetError, FeeError, SlotNameError}; use thiserror::Error; use crate::domain::note::NetworkNoteError; @@ -17,6 +17,8 @@ mod test_macro; pub enum ConversionError { #[error("asset error")] AssetError(#[from] AssetError), + #[error("account error")] + AccountError(#[from] AccountError), #[error("fee parameters error")] FeeError(#[from] FeeError), #[error("hex error")] @@ -29,6 +31,8 @@ pub enum ConversionError { SmtLeafError(#[from] SmtLeafError), #[error("SMT proof error")] SmtProofError(#[from] SmtProofError), + #[error("storage slot name error")] + StorageSlotNameError(#[from] SlotNameError), #[error("integer conversion error: {0}")] TryFromIntError(#[from] TryFromIntError), #[error("too much data, expected {expected}, got {got}")] diff --git a/crates/proto/src/generated/account.rs b/crates/proto/src/generated/account.rs index d30e8c888c..e9733c0153 100644 --- a/crates/proto/src/generated/account.rs +++ b/crates/proto/src/generated/account.rs @@ -34,13 +34,16 @@ pub struct AccountStorageHeader { /// Nested message and enum types in `AccountStorageHeader`. pub mod account_storage_header { /// A single storage slot in the account storage header. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct StorageSlot { + /// The name of the storage slot. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, /// The type of the storage slot. - #[prost(uint32, tag = "1")] + #[prost(uint32, tag = "2")] pub slot_type: u32, /// The commitment (Word) for this storage slot. - #[prost(message, optional, tag = "2")] + #[prost(message, optional, tag = "3")] pub commitment: ::core::option::Option, } } diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 0e443d09a1..78aa4b7cee 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -139,9 +139,9 @@ pub mod account_proof_request { /// Represents a storage slot index and the associated map keys. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StorageMapDetailRequest { - /// Storage slot index (`\[0..255\]`). - #[prost(uint32, tag = "1")] - pub slot_index: u32, + /// Storage slot name. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] pub slot_data: ::core::option::Option, } @@ -162,7 +162,7 @@ pub mod account_proof_request { /// the response will not contain them but must be requested separately. #[prost(bool, tag = "2")] AllEntries(bool), - /// A list of map keys associated with the given storage slot identified by `slot_index`. + /// A list of map keys associated with the given storage slot identified by `slot_name`. #[prost(message, tag = "3")] MapKeys(MapKeys), } @@ -230,9 +230,9 @@ pub struct AccountStorageDetails { pub mod account_storage_details { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountStorageMapDetails { - /// slot index of the storage map - #[prost(uint32, tag = "1")] - pub slot_index: u32, + /// Storage slot name. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, /// A flag that is set to `true` if the number of to-be-returned entries in the /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` /// endpoint should be used to get all storage map data. @@ -465,14 +465,14 @@ pub struct SyncStorageMapsResponse { pub updates: ::prost::alloc::vec::Vec, } /// Represents a single storage map update. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct StorageMapUpdate { /// Block number in which the slot was updated. #[prost(fixed32, tag = "1")] pub block_num: u32, - /// Slot index (\[0..255\]). - #[prost(uint32, tag = "2")] - pub slot_index: u32, + /// Storage slot name. + #[prost(string, tag = "2")] + pub slot_name: ::prost::alloc::string::String, /// The storage map key. #[prost(message, optional, tag = "3")] pub key: ::core::option::Option, diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 97fb701ba2..41d82fd969 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -19,8 +19,8 @@ anyhow = { workspace = true } deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } deadpool-diesel = { features = ["sqlite"], version = "0.6" } deadpool-sync = { version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.2" } -diesel_migrations = { features = ["sqlite"], version = "2.2" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 75cc90146b..15b9f09fa2 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -91,13 +91,12 @@ CREATE TABLE note_scripts ( CREATE TABLE account_storage_map_values ( account_id BLOB NOT NULL, block_num INTEGER NOT NULL, - slot INTEGER NOT NULL, + slot_name BLOB NOT NULL, key BLOB NOT NULL, value BLOB NOT NULL, is_latest BOOLEAN NOT NULL, - PRIMARY KEY (account_id, block_num, slot, key), - CONSTRAINT slot_is_u8 CHECK (slot BETWEEN 0 AND 0xFF), + PRIMARY KEY (account_id, block_num, slot_name, key), FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index ffc7b80f6b..1feb19e848 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -36,6 +36,7 @@ use std::any::type_name; use miden_node_proto::domain::account::{NetworkAccountError, NetworkAccountPrefix}; use miden_objects::Felt; +use miden_objects::account::StorageSlotName; use miden_objects::block::BlockNumber; use miden_objects::note::{NoteExecutionMode, NoteTag}; @@ -116,6 +117,24 @@ impl SqlTypeConvert for NoteTag { } } +impl SqlTypeConvert for StorageSlotName { + type Raw = Vec; + type Error = DatabaseTypeConversionError; + + fn from_raw_sql(raw: Self::Raw) -> Result { + String::from_utf8(raw) + .map_err(|_| DatabaseTypeConversionError(type_name::())) + .and_then(|name| { + StorageSlotName::new(name) + .map_err(|_| DatabaseTypeConversionError(type_name::())) + }) + } + + fn to_raw_sql(self) -> Self::Raw { + self.as_str().as_bytes().to_vec() + } +} + // Raw type conversions - eventually introduce wrapper types // =========================================================== @@ -139,16 +158,6 @@ pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { nonce.as_int() as i64 } -#[inline(always)] -pub(crate) fn raw_sql_to_slot(raw: i32) -> u8 { - debug_assert!(raw >= 0); - raw as u8 -} -#[inline(always)] -pub(crate) fn slot_to_raw_sql(slot: u8) -> i32 { - i32::from(slot) -} - #[inline(always)] pub(crate) fn raw_sql_to_fungible_delta(raw: i64) -> i64 { raw diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 5899c53589..2bf5e8f21c 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -29,24 +29,21 @@ use miden_objects::account::{ AccountId, AccountStorage, NonFungibleDeltaAction, - StorageSlot, + StorageSlotContent, + StorageSlotName, }; use miden_objects::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_objects::block::{BlockAccountUpdate, BlockNumber}; use miden_objects::{Felt, Word}; use crate::constants::MAX_PAYLOAD_BYTES; -use crate::db::models::conv::{ - SqlTypeConvert, - nonce_to_raw_sql, - raw_sql_to_nonce, - raw_sql_to_slot, - slot_to_raw_sql, -}; +use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; +type StorageMapValueRow = (i64, Vec, Vec, Vec); + /// Select the latest account details by account id from the DB using the given /// [`SqliteConnection`]. /// @@ -413,7 +410,7 @@ pub(crate) fn select_all_accounts( #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapValue { pub block_num: BlockNumber, - pub slot_index: u8, + pub slot_name: StorageSlotName, pub key: Word, pub value: Word, } @@ -427,11 +424,11 @@ pub struct StorageMapValuesPage { } impl StorageMapValue { - pub fn from_raw_row(row: (i64, i32, Vec, Vec)) -> Result { - let (block_num, slot_index, key, value) = row; + pub fn from_raw_row(row: StorageMapValueRow) -> Result { + let (block_num, slot_name, key, value) = row; Ok(Self { block_num: BlockNumber::from_raw_sql(block_num)?, - slot_index: raw_sql_to_slot(slot_index), + slot_name: StorageSlotName::from_raw_sql(slot_name)?, key: Word::read_from_bytes(&key)?, value: Word::read_from_bytes(&value)?, }) @@ -503,8 +500,8 @@ pub(crate) fn select_account_storage_map_values( }); } - let raw: Vec<(i64, i32, Vec, Vec)> = - SelectDsl::select(t::table, (t::block_num, t::slot, t::key, t::value)) + let raw: Vec = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) .filter( t::account_id .eq(account_id.to_bytes()) @@ -705,21 +702,21 @@ pub(crate) fn insert_account_storage_map_value( conn: &mut SqliteConnection, account_id: AccountId, block_num: BlockNumber, - slot: u8, + slot_name: StorageSlotName, key: Word, value: Word, ) -> Result { let account_id = account_id.to_bytes(); let key = key.to_bytes(); let value = value.to_bytes(); - let slot = slot_to_raw_sql(slot); + let slot_name = slot_name.to_raw_sql(); let block_num = block_num.to_raw_sql(); let update_count = diesel::update(schema::account_storage_map_values::table) .filter( schema::account_storage_map_values::account_id .eq(&account_id) - .and(schema::account_storage_map_values::slot.eq(slot)) + .and(schema::account_storage_map_values::slot_name.eq(&slot_name)) .and(schema::account_storage_map_values::key.eq(&key)) .and(schema::account_storage_map_values::is_latest.eq(true)), ) @@ -730,7 +727,7 @@ pub(crate) fn insert_account_storage_map_value( account_id, key, value, - slot, + slot_name, block_num, is_latest: true, }; @@ -807,17 +804,10 @@ pub(crate) fn upsert_accounts( // collect storage-map inserts to apply after account upsert let mut storage = Vec::new(); - for (slot_idx, slot) in account.storage().slots().iter().enumerate() { - if let StorageSlot::Map(storage_map) = slot { - // SAFETY: We can safely unwrap the conversion to u8 because - // accounts have a limit of 255 storage elements + for slot in account.storage().slots() { + if let StorageSlotContent::Map(storage_map) = slot.content() { for (key, value) in storage_map.entries() { - storage.push(( - account_id, - u8::try_from(slot_idx).unwrap(), - *key, - *value, - )); + storage.push((account_id, slot.name().clone(), *key, *value)); } } } @@ -834,9 +824,9 @@ pub(crate) fn upsert_accounts( // --- collect storage map updates ---------------------------- let mut storage = Vec::new(); - for (&slot, map_delta) in delta.storage().maps() { + for (slot_name, map_delta) in delta.storage().maps() { for (key, value) in map_delta.entries() { - storage.push((account_id, slot, (*key).into(), *value)); + storage.push((account_id, slot_name.clone(), (*key).into(), *value)); } } @@ -912,8 +902,8 @@ pub(crate) fn upsert_accounts( .execute(conn)?; // insert pending storage map entries - for (acc_id, slot, key, value) in pending_storage_inserts { - insert_account_storage_map_value(conn, acc_id, block_num, slot, key, value)?; + for (acc_id, slot_name, key, value) in pending_storage_inserts { + insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; } // insert pending vault-asset entries @@ -1005,7 +995,7 @@ impl AccountAssetRowInsert { pub(crate) struct AccountStorageMapRowInsert { pub(crate) account_id: Vec, pub(crate) block_num: i64, - pub(crate) slot: i32, + pub(crate) slot_name: Vec, pub(crate) key: Vec, pub(crate) value: Vec, pub(crate) is_latest: bool, diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index f269aee3b4..db389e1c4b 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -1,10 +1,10 @@ // @generated automatically by Diesel CLI. diesel::table! { - account_storage_map_values (account_id, block_num, slot, key) { + account_storage_map_values (account_id, block_num, slot_name, key) { account_id -> Binary, block_num -> BigInt, - slot -> Integer, + slot_name -> Binary, key -> Binary, value -> Binary, is_latest -> Bool, diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 96fd77666b..3c837f2509 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -24,6 +24,7 @@ use miden_objects::account::{ AccountType, AccountVaultDelta, StorageSlot, + StorageSlotName, }; use miden_objects::asset::{Asset, AssetVaultKey, FungibleAsset}; use miden_objects::block::{ @@ -1250,10 +1251,17 @@ fn insert_account_delta( block_number: BlockNumber, delta: &AccountDelta, ) { - for (slot, slot_delta) in delta.storage().maps() { + for (slot_name, slot_delta) in delta.storage().maps() { for (k, v) in slot_delta.entries() { - insert_account_storage_map_value(conn, account_id, block_number, *slot, *k.inner(), *v) - .unwrap(); + insert_account_storage_map_value( + conn, + account_id, + block_number, + slot_name.clone(), + *k.inner(), + *v, + ) + .unwrap(); } } } @@ -1276,7 +1284,7 @@ fn sql_account_storage_map_values_insertion() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); - let slot = 3u8; + let slot_name = StorageSlotName::mock(3); let key1 = Word::from([1u32, 2, 3, 4]); let key2 = Word::from([5u32, 6, 7, 8]); let value1 = Word::from([10u32, 11, 12, 13]); @@ -1287,7 +1295,7 @@ fn sql_account_storage_map_values_insertion() { let mut map1 = StorageMapDelta::default(); map1.insert(key1, value1); map1.insert(key2, value2); - let maps1: BTreeMap<_, _> = [(slot, map1)].into_iter().collect(); + let maps1: BTreeMap<_, _> = [(slot_name.clone(), map1)].into_iter().collect(); let storage1 = AccountStorageDelta::from_parts(BTreeMap::new(), maps1).unwrap(); let delta1 = AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); @@ -1301,7 +1309,7 @@ fn sql_account_storage_map_values_insertion() { // Update key1 at block 2 let mut map2 = StorageMapDelta::default(); map2.insert(key1, value3); - let maps2 = BTreeMap::from_iter([(slot, map2)]); + let maps2 = BTreeMap::from_iter([(slot_name.clone(), map2)]); let storage2 = AccountStorageDelta::from_parts(BTreeMap::new(), maps2).unwrap(); let delta2 = AccountDelta::new(account_id, storage2, AccountVaultDelta::default(), Felt::new(2)) @@ -1318,14 +1326,14 @@ fn sql_account_storage_map_values_insertion() { storage_map_values .values .iter() - .any(|val| val.slot_index == slot && val.key == key1 && val.value == value3), + .any(|val| val.slot_name == slot_name && val.key == key1 && val.value == value3), "key1 should point to new value at block2" ); assert!( storage_map_values .values .iter() - .any(|val| val.slot_index == slot && val.key == key2 && val.value == value2), + .any(|val| val.slot_name == slot_name && val.key == key2 && val.value == value2), "key2 should stay the same (from block1)" ); } @@ -1334,7 +1342,7 @@ fn sql_account_storage_map_values_insertion() { fn select_storage_map_sync_values() { let mut conn = create_db(); let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let slot = 5u8; + let slot_name = StorageSlotName::mock(5); let key1 = num_to_word(1); let key2 = num_to_word(2); @@ -1349,20 +1357,55 @@ fn select_storage_map_sync_values() { // Insert data across multiple blocks using individual inserts // Block 1: key1 -> value1, key2 -> value2 - queries::insert_account_storage_map_value(&mut conn, account_id, block1, slot, key1, value1) - .unwrap(); - queries::insert_account_storage_map_value(&mut conn, account_id, block1, slot, key2, value2) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + key1, + value1, + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + key2, + value2, + ) + .unwrap(); // Block 2: key2 -> value3 (update), key3 -> value3 (new) - queries::insert_account_storage_map_value(&mut conn, account_id, block2, slot, key2, value3) - .unwrap(); - queries::insert_account_storage_map_value(&mut conn, account_id, block2, slot, key3, value3) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + key2, + value3, + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + key3, + value3, + ) + .unwrap(); // Block 3: key1 -> value2 (update) - queries::insert_account_storage_map_value(&mut conn, account_id, block3, slot, key1, value2) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block3, + slot_name.clone(), + key1, + value2, + ) + .unwrap(); let page = queries::select_account_storage_map_values( &mut conn, @@ -1376,19 +1419,19 @@ fn select_storage_map_sync_values() { // Compare ordered by key using a tuple view to avoid relying on the concrete struct name let expected = vec![ StorageMapValue { - slot_index: slot, + slot_name: slot_name.clone(), key: key2, value: value3, block_num: block2, }, StorageMapValue { - slot_index: slot, + slot_name: slot_name.clone(), key: key3, value: value3, block_num: block2, }, StorageMapValue { - slot_index: slot, + slot_name, key: key1, value: value2, block_num: block3, @@ -1489,12 +1532,12 @@ fn mock_account_code_and_storage( "; let component_storage = vec![ - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(1)), - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(3)), - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(5)), + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + StorageSlot::with_value(StorageSlotName::mock(2), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(3), num_to_word(3)), + StorageSlot::with_value(StorageSlotName::mock(4), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(5), num_to_word(5)), ]; let component = AccountComponent::compile( @@ -1567,7 +1610,10 @@ fn genesis_with_account_storage_map() { ]) .unwrap(); - let component_storage = vec![StorageSlot::Map(storage_map), StorageSlot::Value(Word::empty())]; + let component_storage = vec![ + StorageSlot::with_map(StorageSlotName::mock(0), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(1)), + ]; let component = AccountComponent::compile( "export.foo push.1 end", @@ -1608,7 +1654,10 @@ fn genesis_with_account_assets_and_storage() { )]) .unwrap(); - let component_storage = vec![StorageSlot::Value(Word::empty()), StorageSlot::Map(storage_map)]; + let component_storage = vec![ + StorageSlot::with_empty_value(StorageSlotName::mock(0)), + StorageSlot::with_map(StorageSlotName::mock(2), storage_map), + ]; let component = AccountComponent::compile( "export.foo push.1 end", @@ -1678,7 +1727,7 @@ fn genesis_with_multiple_accounts() { )]) .unwrap(); - let component_storage = vec![StorageSlot::Map(storage_map)]; + let component_storage = vec![StorageSlot::with_map(StorageSlotName::mock(0), storage_map)]; let component3 = AccountComponent::compile( "export.baz push.3 end", diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 193d2f105a..d5b16add00 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -8,7 +8,6 @@ use miden_lib::AuthScheme; use miden_lib::account::auth::AuthRpoFalcon512; use miden_lib::account::faucets::BasicFungibleFaucet; use miden_lib::account::wallets::create_basic_wallet; -use miden_lib::transaction::memory; use miden_node_utils::crypto::get_rpo_random_coin; use miden_objects::account::auth::AuthSecretKey; use miden_objects::account::{ @@ -17,6 +16,7 @@ use miden_objects::account::{ AccountDelta, AccountFile, AccountId, + AccountStorage, AccountStorageDelta, AccountStorageMode, AccountType, @@ -215,7 +215,7 @@ impl GenesisConfig { if total_issuance != 0 { // slot 0 storage_delta.set_item( - memory::FAUCET_STORAGE_DATA_SLOT, + AccountStorage::faucet_metadata_slot().clone(), [ZERO, ZERO, ZERO, Felt::new(total_issuance)].into(), ); tracing::debug!( diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 2f7fd15e45..e347f11603 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,5 +1,4 @@ use assert_matches::assert_matches; -use miden_lib::transaction::memory; use miden_objects::ONE; use super::*; @@ -45,7 +44,10 @@ fn parsing_yields_expected_default_values() -> TestResult { // check total issuance of the faucet assert_eq!( - native_faucet.storage().get_item(memory::FAUCET_STORAGE_DATA_SLOT).unwrap()[3], + native_faucet + .storage() + .get_item(AccountStorage::faucet_metadata_slot()) + .unwrap()[3], Felt::new(999_777), "Issuance mismatch" ); diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 5919b72480..02a5725188 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -455,7 +455,7 @@ impl rpc_server::Rpc for StoreApi { .values .into_iter() .map(|map_value| proto::rpc::StorageMapUpdate { - slot_index: u32::from(map_value.slot_index), + slot_name: map_value.slot_name.to_string(), key: Some(map_value.key.into()), value: Some(map_value.value.into()), block_num: map_value.block_num.as_u32(), diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index fd7d622706..8fb314f393 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -23,7 +23,7 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; -use miden_objects::account::{AccountHeader, AccountId, StorageSlot}; +use miden_objects::account::{AccountHeader, AccountId, StorageSlot, StorageSlotContent}; use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_objects::block::nullifier_tree::NullifierTree; use miden_objects::block::{ @@ -1007,13 +1007,13 @@ impl State { let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); - for StorageMapRequest { slot_index, slot_data } in storage_requests { - let Some(StorageSlot::Map(storage_map)) = - account.storage().slots().get(slot_index as usize) + for StorageMapRequest { slot_name, slot_data } in storage_requests { + let Some(StorageSlotContent::Map(storage_map)) = + account.storage().get(&slot_name).map(StorageSlot::content) else { - return Err(AccountError::StorageSlotNotMap(slot_index).into()); + return Err(AccountError::StorageSlotNotMap(slot_name).into()); }; - let details = AccountStorageMapDetails::new(slot_index, slot_data, storage_map); + let details = AccountStorageMapDetails::new(slot_name, slot_data, storage_map); storage_map_details.push(details); } diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index dd46b33f7b..cadaefc043 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -216,15 +216,15 @@ message AccountProofRequest { // A list of map keys associated with this storage slot. repeated primitives.Digest map_keys = 1; } - // Storage slot index (`[0..255]`). - uint32 slot_index = 1; + // Storage slot name. + string slot_name = 1; oneof slot_data { // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, // the response will not contain them but must be requested separately. bool all_entries = 2; - // A list of map keys associated with the given storage slot identified by `slot_index`. + // A list of map keys associated with the given storage slot identified by `slot_name`. MapKeys map_keys = 3; } } @@ -311,8 +311,9 @@ message AccountStorageDetails { repeated StorageMapEntry entries = 1; } - // slot index of the storage map - uint32 slot_index = 1; + + // Storage slot name. + string slot_name = 1; // A flag that is set to `true` if the number of to-be-returned entries in the // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` @@ -533,8 +534,8 @@ message StorageMapUpdate { // Block number in which the slot was updated. fixed32 block_num = 1; - // Slot index ([0..255]). - uint32 slot_index = 2; + // Storage slot name. + string slot_name = 2; // The storage map key. primitives.Digest key = 3; diff --git a/proto/proto/types/account.proto b/proto/proto/types/account.proto index 6953c228be..f668d54b08 100644 --- a/proto/proto/types/account.proto +++ b/proto/proto/types/account.proto @@ -32,11 +32,14 @@ message AccountSummary { message AccountStorageHeader { // A single storage slot in the account storage header. message StorageSlot { + // The name of the storage slot. + string slot_name = 1; + // The type of the storage slot. - uint32 slot_type = 1; + uint32 slot_type = 2; // The commitment (Word) for this storage slot. - primitives.Digest commitment = 2; + primitives.Digest commitment = 3; } // Storage slots with their types and commitments. From 281db9735db1cc28b2877baa280b95fcbded67c0 Mon Sep 17 00:00:00 2001 From: Philipp Gackstatter Date: Thu, 11 Dec 2025 11:34:23 +0700 Subject: [PATCH 044/125] chore(store): named storage slot update follow-up (#1445) --- Cargo.lock | 30 +++++++++---------- crates/proto/src/errors/mod.rs | 4 +-- .../db/migrations/2025062000000_setup/up.sql | 4 +-- crates/store/src/db/models/conv.rs | 10 ++----- .../store/src/db/models/queries/accounts.rs | 4 +-- crates/store/src/db/schema.rs | 2 +- 6 files changed, 25 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1aaa00589f..787efe801b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1347,7 +1347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2124,7 +2124,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2499,7 +2499,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2595,7 +2595,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "fs-err", "miden-assembly", @@ -2959,7 +2959,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "bech32", "getrandom 0.3.4", @@ -3005,7 +3005,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "proc-macro2", "quote", @@ -3111,7 +3111,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3129,7 +3129,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "miden-lib", "miden-objects", @@ -3142,7 +3142,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#a303700580478ed8e84d5042674838f5fbdc6028" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" dependencies = [ "miden-objects", "miden-tx", @@ -3368,7 +3368,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -4699,7 +4699,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4712,7 +4712,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5345,7 +5345,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5354,7 +5354,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -6363,7 +6363,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index 2d52b11406..f28be3204e 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -5,7 +5,7 @@ use std::num::TryFromIntError; pub use miden_node_grpc_error_macro::GrpcError; use miden_objects::crypto::merkle::{SmtLeafError, SmtProofError}; use miden_objects::utils::DeserializationError; -use miden_objects::{AccountError, AssetError, FeeError, SlotNameError}; +use miden_objects::{AccountError, AssetError, FeeError, StorageSlotNameError}; use thiserror::Error; use crate::domain::note::NetworkNoteError; @@ -32,7 +32,7 @@ pub enum ConversionError { #[error("SMT proof error")] SmtProofError(#[from] SmtProofError), #[error("storage slot name error")] - StorageSlotNameError(#[from] SlotNameError), + StorageSlotNameError(#[from] StorageSlotNameError), #[error("integer conversion error: {0}")] TryFromIntError(#[from] TryFromIntError), #[error("too much data, expected {expected}, got {got}")] diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 15b9f09fa2..aaafb91a81 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -89,9 +89,9 @@ CREATE TABLE note_scripts ( ) WITHOUT ROWID; CREATE TABLE account_storage_map_values ( - account_id BLOB NOT NULL, + account_id BLOB NOT NULL, block_num INTEGER NOT NULL, - slot_name BLOB NOT NULL, + slot_name TEXT NOT NULL, key BLOB NOT NULL, value BLOB NOT NULL, is_latest BOOLEAN NOT NULL, diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 1feb19e848..a48a2ccfd4 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -118,20 +118,16 @@ impl SqlTypeConvert for NoteTag { } impl SqlTypeConvert for StorageSlotName { - type Raw = Vec; + type Raw = String; type Error = DatabaseTypeConversionError; fn from_raw_sql(raw: Self::Raw) -> Result { - String::from_utf8(raw) + StorageSlotName::new(raw) .map_err(|_| DatabaseTypeConversionError(type_name::())) - .and_then(|name| { - StorageSlotName::new(name) - .map_err(|_| DatabaseTypeConversionError(type_name::())) - }) } fn to_raw_sql(self) -> Self::Raw { - self.as_str().as_bytes().to_vec() + String::from(self) } } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 2bf5e8f21c..9a178e21a1 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -42,7 +42,7 @@ use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; -type StorageMapValueRow = (i64, Vec, Vec, Vec); +type StorageMapValueRow = (i64, String, Vec, Vec); /// Select the latest account details by account id from the DB using the given /// [`SqliteConnection`]. @@ -995,7 +995,7 @@ impl AccountAssetRowInsert { pub(crate) struct AccountStorageMapRowInsert { pub(crate) account_id: Vec, pub(crate) block_num: i64, - pub(crate) slot_name: Vec, + pub(crate) slot_name: String, pub(crate) key: Vec, pub(crate) value: Vec, pub(crate) is_latest: bool, diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index db389e1c4b..6f36594b9a 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -4,7 +4,7 @@ diesel::table! { account_storage_map_values (account_id, block_num, slot_name, key) { account_id -> Binary, block_num -> BigInt, - slot_name -> Binary, + slot_name -> Text, key -> Binary, value -> Binary, is_latest -> Bool, From 0428fa9d07f721211c8e366318d6d5bb2c1bbe8f Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 12 Dec 2025 08:22:02 +1300 Subject: [PATCH 045/125] feat: NTX Builder Actor Refactor (#1435) --- CHANGELOG.md | 1 + Cargo.lock | 2 + bin/node/src/commands/bundled.rs | 10 +- bin/node/src/commands/mod.rs | 4 +- crates/block-producer/src/server/mod.rs | 6 +- crates/ntx-builder/Cargo.toml | 4 +- crates/ntx-builder/src/actor/account_state.rs | 350 ++++++++++++ .../src/{transaction.rs => actor/execute.rs} | 26 +- crates/ntx-builder/src/actor/inflight_note.rs | 71 +++ crates/ntx-builder/src/actor/mod.rs | 352 ++++++++++++ .../{state/account.rs => actor/note_state.rs} | 146 +---- crates/ntx-builder/src/block_producer.rs | 4 +- crates/ntx-builder/src/builder.rs | 261 +++++++++ crates/ntx-builder/src/builder/mod.rs | 205 ------- crates/ntx-builder/src/coordinator.rs | 273 ++++++++++ crates/ntx-builder/src/lib.rs | 4 +- crates/ntx-builder/src/state/mod.rs | 512 ------------------ crates/ntx-builder/src/state/tests.rs | 84 --- crates/ntx-builder/src/store.rs | 84 ++- crates/proto/src/generated/store.rs | 160 +++--- crates/store/src/db/mod.rs | 21 +- .../store/src/db/models/queries/accounts.rs | 24 + crates/store/src/db/models/queries/notes.rs | 109 ---- crates/store/src/db/tests.rs | 133 ----- crates/store/src/server/ntx_builder.rs | 63 +-- crates/store/src/state.rs | 15 +- proto/proto/internal/store.proto | 34 +- 27 files changed, 1564 insertions(+), 1394 deletions(-) create mode 100644 crates/ntx-builder/src/actor/account_state.rs rename crates/ntx-builder/src/{transaction.rs => actor/execute.rs} (95%) create mode 100644 crates/ntx-builder/src/actor/inflight_note.rs create mode 100644 crates/ntx-builder/src/actor/mod.rs rename crates/ntx-builder/src/{state/account.rs => actor/note_state.rs} (67%) create mode 100644 crates/ntx-builder/src/builder.rs delete mode 100644 crates/ntx-builder/src/builder/mod.rs create mode 100644 crates/ntx-builder/src/coordinator.rs delete mode 100644 crates/ntx-builder/src/state/mod.rs delete mode 100644 crates/ntx-builder/src/state/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index b2f9d9aabf..29e3ee30f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,6 +122,7 @@ - [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/miden-node/pull/#1045)). - Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/miden-node/pull/1140), [#1132](https://github.com/0xMiden/miden-node/pull/1132)). - Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/miden-node/pull/1176)). +- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/miden-node/pull/1219)). ### Changes diff --git a/Cargo.lock b/Cargo.lock index 787efe801b..79315c56c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2760,6 +2760,7 @@ version = "0.13.0" dependencies = [ "anyhow", "futures", + "indexmap 2.12.1", "miden-lib", "miden-node-proto", "miden-node-test-macro", @@ -2771,6 +2772,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tokio-stream", + "tokio-util", "tonic", "tracing", "url", diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 4e2e769720..09bdcf0257 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -125,7 +125,6 @@ impl BundledCommand { block_producer: BlockProducerConfig, grpc_timeout: Duration, ) -> anyhow::Result<()> { - let should_start_ntb = !ntx_builder.disabled; // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. // @@ -186,8 +185,9 @@ impl BundledCommand { }) .id(); - // A sync point between the ntb and block-producer components. - let checkpoint = if should_start_ntb { + // A sync point between the ntx-builder and block-producer components. + let should_start_ntx_builder = !ntx_builder.disabled; + let checkpoint = if should_start_ntx_builder { Barrier::new(2) } else { Barrier::new(1) @@ -266,7 +266,7 @@ impl BundledCommand { let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) .context("Failed to parse URL")?; - if should_start_ntb { + if should_start_ntx_builder { let id = join_set .spawn(async move { let block_producer_url = @@ -279,7 +279,7 @@ impl BundledCommand { ntx_builder.ticker_interval, checkpoint, ) - .serve_new() + .run() .await .context("failed while serving ntx builder component") }) diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 3193cfd2c4..ef7386bb6b 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -44,7 +44,7 @@ fn duration_to_human_readable_string(duration: Duration) -> String { #[derive(clap::Args)] pub struct NtxBuilderConfig { /// Disable spawning the network transaction builder. - #[arg(long = "no-ntb", default_value_t = false)] + #[arg(long = "no-ntx-builder", default_value_t = false)] pub disabled: bool, /// The remote transaction prover's gRPC url, used for the ntx builder. If unset, @@ -54,7 +54,7 @@ pub struct NtxBuilderConfig { /// Interval at which to run the network transaction builder's ticker. #[arg( - long = "ntb.interval", + long = "ntx-builder.interval", default_value = &duration_to_human_readable_string(DEFAULT_NTX_TICKER_INTERVAL), value_parser = humantime::parse_duration, value_name = "DURATION" diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index c523fee6a4..73b5a7b3c9 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -154,10 +154,10 @@ impl BlockProducer { // Launch the gRPC server and wait at the checkpoint for any other components to be in sync. // - // This is used to ensure the ntb can subscribe to the mempool events without playing catch - // up caused by block-production. + // This is used to ensure the ntx-builder can subscribe to the mempool events without + // playing catch up caused by block-production. // - // This is a temporary work-around until the ntb can resync on the fly. + // This is a temporary work-around until the ntx-builder can resync on the fly. let rpc_id = tasks .spawn({ let mempool = mempool.clone(); diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 4c2f9ab530..fe98ffe01c 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -16,6 +16,7 @@ workspace = true [dependencies] anyhow = { workspace = true } futures = { workspace = true } +indexmap = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-objects = { default-features = true, workspace = true } @@ -24,7 +25,8 @@ miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } -tonic = { default-features = true, workspace = true } +tokio-util = { version = "0.7" } +tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs new file mode 100644 index 0000000000..ee100bd8cd --- /dev/null +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -0,0 +1,350 @@ +use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::num::NonZeroUsize; + +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_objects::account::Account; +use miden_objects::account::delta::AccountUpdateDetails; +use miden_objects::block::{BlockHeader, BlockNumber}; +use miden_objects::note::{Note, Nullifier}; +use miden_objects::transaction::{PartialBlockchain, TransactionId}; +use tracing::instrument; + +use super::ActorShutdownReason; +use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; +use crate::COMPONENT; +use crate::actor::inflight_note::InflightNetworkNote; +use crate::builder::ChainState; +use crate::store::{StoreClient, StoreError}; + +// TRANSACTION CANDIDATE +// ================================================================================================ + +/// A candidate network transaction. +/// +/// Contains the data pertaining to a specific network account which can be used to build a network +/// transaction. +#[derive(Clone, Debug)] +pub struct TransactionCandidate { + /// The current inflight state of the account. + pub account: Account, + + /// A set of notes addressed to this network account. + pub notes: Vec, + + /// The latest locally committed block header. + /// + /// This should be used as the reference block during transaction execution. + pub chain_tip_header: BlockHeader, + + /// The chain MMR, which lags behind the tip by one block. + pub chain_mmr: PartialBlockchain, +} + +// NETWORK ACCOUNT STATE +// ================================================================================================ + +/// The current state of a network account. +#[derive(Clone)] +pub struct NetworkAccountState { + /// The network account prefix corresponding to the network account this state represents. + account_prefix: NetworkAccountPrefix, + + /// Component of this state which Contains the committed and inflight account updates as well + /// as available and nullified notes. + account: NetworkAccountNoteState, + + /// Uncommitted transactions which have some impact on the network state. + /// + /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ + /// an impact are ignored. + inflight_txs: BTreeMap, + + /// A set of nullifiers which have been registered for the network account. + nullifier_idx: HashSet, +} + +impl NetworkAccountState { + /// Maximum number of attempts to execute a network note. + const MAX_NOTE_ATTEMPTS: usize = 30; + + /// Load's all available network notes from the store, along with the required account states. + #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] + pub async fn load( + account: Account, + account_prefix: NetworkAccountPrefix, + store: &StoreClient, + block_num: BlockNumber, + ) -> Result { + let notes = store.get_unconsumed_network_notes(account_prefix, block_num.as_u32()).await?; + let notes = notes + .into_iter() + .filter_map(|note| { + if let NetworkNote::SingleTarget(note) = note { + Some(note) + } else { + None + } + }) + .collect::>(); + let account = NetworkAccountNoteState::new(account, notes); + + let state = Self { + account, + account_prefix, + inflight_txs: BTreeMap::default(), + nullifier_idx: HashSet::default(), + }; + + state.inject_telemetry(); + + Ok(state) + } + + /// Selects the next candidate network transaction. + #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] + pub fn select_candidate( + &mut self, + limit: NonZeroUsize, + chain_state: ChainState, + ) -> Option { + // Remove notes that have failed too many times. + self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); + + // Skip empty accounts, and prune them. + // This is how we keep the number of accounts bounded. + if self.account.is_empty() { + return None; + } + + // Select notes from the account that can be consumed or are ready for a retry. + let notes = self + .account + .available_notes(&chain_state.chain_tip_header.block_num()) + .take(limit.get()) + .cloned() + .collect::>(); + + // Skip accounts with no available notes. + if notes.is_empty() { + return None; + } + + let (chain_tip_header, chain_mmr) = chain_state.into_parts(); + TransactionCandidate { + account: self.account.latest_account(), + notes, + chain_tip_header, + chain_mmr, + } + .into() + } + + /// Marks notes of a previously selected candidate as failed. + /// + /// Does not remove the candidate from the in-progress pool. + #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] + pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { + let nullifiers = notes.iter().map(Note::nullifier).collect::>(); + self.account.fail_notes(nullifiers.as_slice(), block_num); + } + + /// Updates state with the mempool event. + #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] + pub fn mempool_update(&mut self, update: &MempoolEvent) -> Option { + let span = tracing::Span::current(); + span.set_attribute("mempool_event.kind", update.kind()); + + match update { + MempoolEvent::TransactionAdded { + id, + nullifiers, + network_notes, + account_delta, + } => { + // Filter network notes relevant to this account. + let network_notes = filter_by_prefix_and_map_to_single_target( + self.account_prefix, + network_notes.clone(), + ); + self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); + }, + MempoolEvent::TransactionsReverted(txs) => { + for tx in txs { + let shutdown_reason = self.revert_transaction(*tx); + if shutdown_reason.is_some() { + return shutdown_reason; + } + } + }, + MempoolEvent::BlockCommitted { txs, .. } => { + for tx in txs { + self.commit_transaction(*tx); + } + }, + } + self.inject_telemetry(); + + // No shutdown, continue running actor. + None + } + + /// Handles a [`MempoolEvent::TransactionAdded`] event. + fn add_transaction( + &mut self, + id: TransactionId, + nullifiers: &[Nullifier], + network_notes: &[SingleTargetNetworkNote], + account_delta: Option<&AccountUpdateDetails>, + ) { + // Skip transactions we already know about. + // + // This can occur since both ntx builder and the mempool might inform us of the same + // transaction. Once when it was submitted to the mempool, and once by the mempool event. + if self.inflight_txs.contains_key(&id) { + return; + } + + let mut tx_impact = TransactionImpact::default(); + if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { + let account_prefix = update.prefix(); + if account_prefix == self.account_prefix { + match update { + NetworkAccountEffect::Updated(account_delta) => { + self.account.add_delta(&account_delta); + }, + NetworkAccountEffect::Created(_) => {}, + } + tx_impact.account_delta = Some(account_prefix); + } + } + for note in network_notes { + assert_eq!( + note.account_prefix(), + self.account_prefix, + "transaction note prefix does not match network account actor's prefix" + ); + tx_impact.notes.insert(note.nullifier()); + self.nullifier_idx.insert(note.nullifier()); + self.account.add_note(note.clone()); + } + for nullifier in nullifiers { + // Ignore nullifiers that aren't network note nullifiers. + if !self.nullifier_idx.contains(nullifier) { + continue; + } + tx_impact.nullifiers.insert(*nullifier); + // We don't use the entry wrapper here because the account must already exist. + let _ = self.account.add_nullifier(*nullifier); + } + + if !tx_impact.is_empty() { + self.inflight_txs.insert(id, tx_impact); + } + } + + /// Handles [`MempoolEvent::BlockCommitted`] events. + fn commit_transaction(&mut self, tx: TransactionId) { + // We only track transactions which have an impact on the network state. + let Some(impact) = self.inflight_txs.remove(&tx) else { + return; + }; + + if let Some(prefix) = impact.account_delta { + if prefix == self.account_prefix { + self.account.commit_delta(); + } + } + + for nullifier in impact.nullifiers { + if self.nullifier_idx.remove(&nullifier) { + // Its possible for the account to no longer exist if the transaction creating it + // was reverted. + self.account.commit_nullifier(nullifier); + } + } + } + + /// Handles [`MempoolEvent::TransactionsReverted`] events. + fn revert_transaction(&mut self, tx: TransactionId) -> Option { + // We only track transactions which have an impact on the network state. + let Some(impact) = self.inflight_txs.remove(&tx) else { + tracing::debug!("transaction {tx} not found in inflight transactions"); + return None; + }; + + // Revert account creation. + if let Some(account_prefix) = impact.account_delta { + // Account creation reverted, actor must stop. + if account_prefix == self.account_prefix && self.account.revert_delta() { + return Some(ActorShutdownReason::AccountReverted(account_prefix)); + } + } + + // Revert notes. + for note_nullifier in impact.notes { + if self.nullifier_idx.contains(¬e_nullifier) { + self.account.revert_note(note_nullifier); + self.nullifier_idx.remove(¬e_nullifier); + } + } + + // Revert nullifiers. + for nullifier in impact.nullifiers { + if self.nullifier_idx.contains(&nullifier) { + self.account.revert_nullifier(nullifier); + self.nullifier_idx.remove(&nullifier); + } + } + + None + } + + /// Adds stats to the current tracing span. + /// + /// Note that these are only visible in the OpenTelemetry context, as conventional tracing + /// does not track fields added dynamically. + fn inject_telemetry(&self) { + let span = tracing::Span::current(); + + span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); + span.set_attribute("ntx.state.notes.total", self.nullifier_idx.len()); + } +} + +/// The impact a transaction has on the state. +#[derive(Clone, Default)] +struct TransactionImpact { + /// The network account this transaction added an account delta to. + account_delta: Option, + + /// Network notes this transaction created. + notes: BTreeSet, + + /// Network notes this transaction consumed. + nullifiers: BTreeSet, +} + +impl TransactionImpact { + fn is_empty(&self) -> bool { + self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() + } +} + +/// Filters network notes by prefix and maps them to single target network notes. +fn filter_by_prefix_and_map_to_single_target( + account_prefix: NetworkAccountPrefix, + notes: Vec, +) -> Vec { + notes + .into_iter() + .filter_map(|note| match note { + NetworkNote::SingleTarget(note) if note.account_prefix() == account_prefix => { + Some(note) + }, + _ => None, + }) + .collect::>() +} diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/actor/execute.rs similarity index 95% rename from crates/ntx-builder/src/transaction.rs rename to crates/ntx-builder/src/actor/execute.rs index b955a91019..ff306b84df 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -1,5 +1,4 @@ use std::collections::BTreeSet; -use std::num::NonZeroUsize; use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; @@ -21,6 +20,7 @@ use miden_objects::transaction::{ PartialBlockchain, ProvenTransaction, TransactionArgs, + TransactionId, TransactionInputs, }; use miden_objects::vm::FutureMaybeSend; @@ -45,8 +45,8 @@ use tokio::task::JoinError; use tracing::{Instrument, instrument}; use crate::COMPONENT; +use crate::actor::account_state::TransactionCandidate; use crate::block_producer::BlockProducerClient; -use crate::state::TransactionCandidate; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -69,7 +69,7 @@ pub enum NtxError { type NtxResult = Result; -// Context and execution of network transactions +// NETWORK TRANSACTION CONTEXT // ================================================================================================ /// Provides the context for execution [network transaction candidates](TransactionCandidate). @@ -91,23 +91,18 @@ pub struct NtxContext { } impl NtxContext { - /// Default cache size for note scripts. - /// - /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage - /// depends on the complexity of the scripts being cached. - const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); - /// Creates a new [`NtxContext`] instance. pub fn new( block_producer: BlockProducerClient, prover: Option, store: StoreClient, + script_cache: LruCache, ) -> Self { Self { block_producer, prover, store, - script_cache: LruCache::new(Self::DEFAULT_SCRIPT_CACHE_SIZE), + script_cache, } } @@ -123,8 +118,8 @@ impl NtxContext { /// /// # Returns /// - /// On success, returns the list of [`FailedNote`]s representing notes that were - /// filtered out before execution. + /// On success, returns the [`TransactionId`] of the executed transaction and a list of + /// [`FailedNote`]s representing notes that were filtered out before execution. /// /// # Errors /// @@ -137,7 +132,7 @@ impl NtxContext { pub fn execute_transaction( self, tx: TransactionCandidate, - ) -> impl FutureMaybeSend>> { + ) -> impl FutureMaybeSend)>> { let TransactionCandidate { account, notes, @@ -165,8 +160,9 @@ impl NtxContext { let (successful, failed) = self.filter_notes(&data_store, notes).await?; let executed = Box::pin(self.execute(&data_store, successful)).await?; let proven = Box::pin(self.prove(executed.into())).await?; + let tx_id = proven.id(); self.submit(proven).await?; - Ok(failed) + Ok((tx_id, failed)) } .in_current_span() .await @@ -270,7 +266,7 @@ impl NtxContext { } } -// Data store implementation for the transaction execution +// NETWORK TRANSACTION DATA STORE // ================================================================================================ /// A [`DataStore`] implementation which provides transaction inputs for a single account and diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs new file mode 100644 index 0000000000..1fcfc36fdc --- /dev/null +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -0,0 +1,71 @@ +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_objects::block::BlockNumber; +use miden_objects::note::Note; + +use crate::actor::has_backoff_passed; + +// INFLIGHT NETWORK NOTE +// ================================================================================================ + +/// An unconsumed network note that may have failed to execute. +/// +/// The block number at which the network note was attempted are approximate and may not +/// reflect the exact block number for which the execution attempt failed. The actual block +/// will likely be soon after the number that is recorded here. +#[derive(Debug, Clone)] +pub struct InflightNetworkNote { + note: SingleTargetNetworkNote, + attempt_count: usize, + last_attempt: Option, +} + +impl InflightNetworkNote { + /// Creates a new inflight network note. + pub fn new(note: SingleTargetNetworkNote) -> Self { + Self { + note, + attempt_count: 0, + last_attempt: None, + } + } + + /// Consumes the inflight network note and returns the inner network note. + pub fn into_inner(self) -> SingleTargetNetworkNote { + self.note + } + + /// Returns a reference to the inner network note. + pub fn to_inner(&self) -> &SingleTargetNetworkNote { + &self.note + } + + /// Returns the number of attempts made to execute the network note. + pub fn attempt_count(&self) -> usize { + self.attempt_count + } + + /// Checks if the network note is available for execution. + /// + /// The note is available if it can be consumed and the backoff period has passed. + pub fn is_available(&self, block_num: BlockNumber) -> bool { + let can_consume = self + .to_inner() + .metadata() + .execution_hint() + .can_be_consumed(block_num) + .unwrap_or(true); + can_consume && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) + } + + /// Registers a failed attempt to execute the network note at the specified block number. + pub fn fail(&mut self, block_num: BlockNumber) { + self.last_attempt = Some(block_num); + self.attempt_count += 1; + } +} + +impl From for Note { + fn from(value: InflightNetworkNote) -> Self { + value.into_inner().into() + } +} diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs new file mode 100644 index 0000000000..947890ede1 --- /dev/null +++ b/crates/ntx-builder/src/actor/mod.rs @@ -0,0 +1,352 @@ +pub mod account_state; +mod execute; +mod inflight_note; +mod note_state; + +use std::sync::Arc; + +use account_state::{NetworkAccountState, TransactionCandidate}; +use execute::NtxError; +use futures::FutureExt; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::ErrorReport; +use miden_node_utils::lru_cache::LruCache; +use miden_objects::Word; +use miden_objects::account::{Account, AccountDelta}; +use miden_objects::block::BlockNumber; +use miden_objects::note::NoteScript; +use miden_objects::transaction::TransactionId; +use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; +use tokio_util::sync::CancellationToken; +use url::Url; + +use crate::block_producer::BlockProducerClient; +use crate::builder::ChainState; +use crate::store::StoreClient; + +// ACTOR SHUTDOWN REASON +// ================================================================================================ + +/// The reason an actor has shut down. +pub enum ActorShutdownReason { + /// Occurs when the transaction that created the actor is reverted. + AccountReverted(NetworkAccountPrefix), + /// Occurs when an account actor detects failure in the messaging channel used by the + /// coordinator. + EventChannelClosed, + /// Occurs when an account actor detects failure in acquiring the rate-limiting semaphore. + SemaphoreFailed(AcquireError), + /// Occurs when an account actor detects its corresponding cancellation token has been triggered + /// by the coordinator. Cancellation tokens are triggered by the coordinator to initiate + /// graceful shutdown of actors. + Cancelled(NetworkAccountPrefix), +} + +// ACCOUNT ACTOR CONFIG +// ================================================================================================ + +/// Contains miscellaneous resources that are required by all account actors. +#[derive(Clone)] +pub struct AccountActorContext { + /// Client for interacting with the store in order to load account state. + pub store: StoreClient, + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + /// Address of the remote prover. If `None`, transactions will be proven locally, which is + // undesirable due to the performance impact. + pub tx_prover_url: Option, + /// The latest chain state that account all actors can rely on. A single chain state is shared + /// among all actors. + pub chain_state: Arc>, + /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. + /// This cache is shared across all account actors to maximize cache efficiency. + pub script_cache: LruCache, +} + +// ACCOUNT ORIGIN +// ================================================================================================ + +/// The origin of the account which the actor will use to initialize the account state. +#[derive(Debug)] +pub enum AccountOrigin { + /// Accounts that have just been created by a transaction but have not been committed to the + /// store yet. + Transaction(Box), + /// Accounts that already exist in the store. + Store(NetworkAccountPrefix), +} + +impl AccountOrigin { + /// Returns an [`AccountOrigin::Transaction`] if the account is a network account. + pub fn transaction(delta: &AccountDelta) -> Option { + let account = Account::try_from(delta).ok()?; + if account.is_network() { + Some(AccountOrigin::Transaction(account.clone().into())) + } else { + None + } + } + + /// Returns an [`AccountOrigin::Store`]. + pub fn store(prefix: NetworkAccountPrefix) -> Self { + AccountOrigin::Store(prefix) + } + + /// Returns the [`NetworkAccountPrefix`] of the account. + pub fn prefix(&self) -> NetworkAccountPrefix { + match self { + AccountOrigin::Transaction(account) => NetworkAccountPrefix::try_from(account.id()) + .expect("actor accounts are always network accounts"), + AccountOrigin::Store(prefix) => *prefix, + } + } +} + +// ACTOR MODE +// ================================================================================================ + +/// The mode of operation that the account actor is currently performing. +#[derive(Debug)] +enum ActorMode { + NoViableNotes, + NotesAvailable, + TransactionInflight(TransactionId), +} + +// ACCOUNT ACTOR +// ================================================================================================ + +/// A long-running asynchronous task that handles the complete lifecycle of network transaction +/// processing. Each actor operates independently and is managed by a single coordinator that +/// spawns, monitors, and messages all actors. +/// +/// ## Core Responsibilities +/// +/// - **State Management**: Loads and maintains the current state of network accounts, including +/// available notes, pending transactions, and account commitments. +/// - **Transaction Selection**: Selects viable notes and constructs a [`TransactionCandidate`] +/// based on current chain state. +/// - **Transaction Execution**: Executes selected transactions using either local or remote +/// proving. +/// - **Mempool Integration**: Listens for mempool events to stay synchronized with the network +/// state and adjust behavior based on transaction confirmations. +/// +/// ## Lifecycle +/// +/// 1. **Initialization**: Loads account state from the store or uses provided account data. +/// 2. **Event Loop**: Continuously processes mempool events and executes transactions. +/// 3. **Transaction Processing**: Selects, executes, and proves transactions, and submits them to +/// block producer. +/// 4. **State Updates**: Updates internal state based on mempool events and execution results. +/// 5. **Shutdown**: Terminates gracefully when cancelled or encounters unrecoverable errors. +/// +/// ## Concurrency +/// +/// Each actor runs in its own async task and communicates with other system components through +/// channels and shared state. The actor uses a cancellation token for graceful shutdown +/// coordination. +pub struct AccountActor { + origin: AccountOrigin, + store: StoreClient, + mode: ActorMode, + event_rx: mpsc::Receiver>, + cancel_token: CancellationToken, + block_producer: BlockProducerClient, + prover: Option, + chain_state: Arc>, + script_cache: LruCache, +} + +impl AccountActor { + /// Constructs a new account actor and corresponding messaging channel with the given + /// configuration. + pub fn new( + origin: AccountOrigin, + actor_context: &AccountActorContext, + event_rx: mpsc::Receiver>, + cancel_token: CancellationToken, + ) -> Self { + let block_producer = BlockProducerClient::new(actor_context.block_producer_url.clone()); + let prover = actor_context.tx_prover_url.clone().map(RemoteTransactionProver::new); + Self { + origin, + store: actor_context.store.clone(), + mode: ActorMode::NoViableNotes, + event_rx, + cancel_token, + block_producer, + prover, + chain_state: actor_context.chain_state.clone(), + script_cache: actor_context.script_cache.clone(), + } + } + + /// Runs the account actor, processing events and managing state until a reason to shutdown is + /// encountered. + pub async fn run(mut self, semaphore: Arc) -> ActorShutdownReason { + // Load the account state from the store and set up the account actor state. + let account = { + match self.origin { + AccountOrigin::Store(account_prefix) => self + .store + .get_network_account(account_prefix) + .await + .expect("actor should be able to load account") + .expect("actor account should exist"), + AccountOrigin::Transaction(ref account) => *(account.clone()), + } + }; + let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let mut state = + NetworkAccountState::load(account, self.origin.prefix(), &self.store, block_num) + .await + .expect("actor should be able to load account state"); + + loop { + // Enable or disable transaction execution based on actor mode. + let tx_permit_acquisition = match self.mode { + // Disable transaction execution. + ActorMode::NoViableNotes | ActorMode::TransactionInflight(_) => { + std::future::pending().boxed() + }, + // Enable transaction execution. + ActorMode::NotesAvailable => semaphore.acquire().boxed(), + }; + tokio::select! { + _ = self.cancel_token.cancelled() => { + return ActorShutdownReason::Cancelled(self.origin.prefix()); + } + // Handle mempool events. + event = self.event_rx.recv() => { + let Some(event) = event else { + return ActorShutdownReason::EventChannelClosed; + }; + // Re-enable transaction execution if the transaction being waited on has been + // added to the mempool. + if let ActorMode::TransactionInflight(awaited_id) = self.mode { + if let MempoolEvent::TransactionAdded { id, .. } = *event { + if id == awaited_id { + self.mode = ActorMode::NotesAvailable; + } + } + } else { + self.mode = ActorMode::NotesAvailable; + } + // Update state. + if let Some(shutdown_reason) = state.mempool_update(event.as_ref()) { + return shutdown_reason; + } + }, + // Execute transactions. + permit = tx_permit_acquisition => { + match permit { + Ok(_permit) => { + // Read the chain state. + let chain_state = self.chain_state.read().await.clone(); + // Find a candidate transaction and execute it. + if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { + self.execute_transactions(&mut state, tx_candidate).await; + } else { + // No transactions to execute, wait for events. + self.mode = ActorMode::NoViableNotes; + } + } + Err(err) => { + return ActorShutdownReason::SemaphoreFailed(err); + } + } + } + } + } + } + + /// Execute a transaction candidate and mark notes as failed as required. + /// + /// Updates the state of the actor based on the execution result. + #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, state, tx_candidate))] + async fn execute_transactions( + &mut self, + state: &mut NetworkAccountState, + tx_candidate: TransactionCandidate, + ) { + let block_num = tx_candidate.chain_tip_header.block_num(); + + // Execute the selected transaction. + let context = execute::NtxContext::new( + self.block_producer.clone(), + self.prover.clone(), + self.store.clone(), + self.script_cache.clone(), + ); + + let execution_result = context.execute_transaction(tx_candidate).await; + match execution_result { + // Execution completed without failed notes. + Ok((tx_id, failed)) if failed.is_empty() => { + self.mode = ActorMode::TransactionInflight(tx_id); + }, + // Execution completed with some failed notes. + Ok((tx_id, failed)) => { + let notes = failed.into_iter().map(|note| note.note).collect::>(); + state.notes_failed(notes.as_slice(), block_num); + self.mode = ActorMode::TransactionInflight(tx_id); + }, + // Transaction execution failed. + Err(err) => { + tracing::error!(err = err.as_report(), "network transaction failed"); + match err { + NtxError::AllNotesFailed(failed) => { + let notes = failed.into_iter().map(|note| note.note).collect::>(); + state.notes_failed(notes.as_slice(), block_num); + self.mode = ActorMode::NoViableNotes; + }, + NtxError::InputNotes(_) + | NtxError::NoteFilter(_) + | NtxError::Execution(_) + | NtxError::Proving(_) + | NtxError::Submission(_) + | NtxError::Panic(_) => { + self.mode = ActorMode::NoViableNotes; + }, + } + }, + } + } +} + +// HELPERS +// ================================================================================================ + +/// Checks if the backoff block period has passed. +/// +/// The number of blocks passed since the last attempt must be greater than or equal to +/// e^(0.25 * `attempt_count`) rounded to the nearest integer. +/// +/// This evaluates to the following: +/// - After 1 attempt, the backoff period is 1 block. +/// - After 3 attempts, the backoff period is 2 blocks. +/// - After 10 attempts, the backoff period is 12 blocks. +/// - After 20 attempts, the backoff period is 148 blocks. +/// - etc... +#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +fn has_backoff_passed( + chain_tip: BlockNumber, + last_attempt: Option, + attempts: usize, +) -> bool { + if attempts == 0 { + return true; + } + // Compute the number of blocks passed since the last attempt. + let blocks_passed = last_attempt + .and_then(|last| chain_tip.checked_sub(last.as_u32())) + .unwrap_or_default(); + + // Compute the exponential backoff threshold: Δ = e^(0.25 * n). + let backoff_threshold = (0.25 * attempts as f64).exp().round() as usize; + + // Check if the backoff period has passed. + blocks_passed.as_usize() > backoff_threshold +} diff --git a/crates/ntx-builder/src/state/account.rs b/crates/ntx-builder/src/actor/note_state.rs similarity index 67% rename from crates/ntx-builder/src/state/account.rs rename to crates/ntx-builder/src/actor/note_state.rs index 7ca410fb87..cce87037d4 100644 --- a/crates/ntx-builder/src/state/account.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -5,79 +5,16 @@ use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_objects::account::delta::AccountUpdateDetails; use miden_objects::account::{Account, AccountDelta, AccountId}; use miden_objects::block::BlockNumber; -use miden_objects::note::{Note, Nullifier}; +use miden_objects::note::Nullifier; -// INFLIGHT NETWORK NOTE -// ================================================================================================ - -/// An unconsumed network note that may have failed to execute. -/// -/// The block number at which the network note was attempted are approximate and may not -/// reflect the exact block number for which the execution attempt failed. The actual block -/// will likely be soon after the number that is recorded here. -#[derive(Debug, Clone)] -pub struct InflightNetworkNote { - note: SingleTargetNetworkNote, - attempt_count: usize, - last_attempt: Option, -} - -impl InflightNetworkNote { - /// Creates a new inflight network note. - pub fn new(note: SingleTargetNetworkNote) -> Self { - Self { - note, - attempt_count: 0, - last_attempt: None, - } - } - - /// Consumes the inflight network note and returns the inner network note. - pub fn into_inner(self) -> SingleTargetNetworkNote { - self.note - } - - /// Returns a reference to the inner network note. - pub fn to_inner(&self) -> &SingleTargetNetworkNote { - &self.note - } - - /// Returns the number of attempts made to execute the network note. - pub fn attempt_count(&self) -> usize { - self.attempt_count - } - - /// Checks if the network note is available for execution. - /// - /// The note is available if it can be consumed and the backoff period has passed. - pub fn is_available(&self, block_num: BlockNumber) -> bool { - let can_consume = self - .to_inner() - .metadata() - .execution_hint() - .can_be_consumed(block_num) - .unwrap_or(true); - can_consume && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) - } - - /// Registers a failed attempt to execute the network note at the specified block number. - pub fn fail(&mut self, block_num: BlockNumber) { - self.last_attempt = Some(block_num); - self.attempt_count += 1; - } -} - -impl From for Note { - fn from(value: InflightNetworkNote) -> Self { - value.into_inner().into() - } -} +use crate::actor::inflight_note::InflightNetworkNote; // ACCOUNT STATE // ================================================================================================ /// Tracks the state of a network account and its notes. -pub struct AccountState { +#[derive(Clone)] +pub struct NetworkAccountNoteState { /// The committed account state, if any. /// /// Its possible this is `None` if the account creation transaction is still inflight. @@ -93,25 +30,29 @@ pub struct AccountState { nullified_notes: HashMap, } -impl AccountState { - /// Creates a new account state using the given value as the committed state. - pub fn from_committed_account(account: Account) -> Self { - Self { +impl NetworkAccountNoteState { + /// Creates a new account state from the supplied account and notes. + pub fn new(account: Account, notes: Vec) -> Self { + let account_prefix = NetworkAccountPrefix::try_from(account.id()) + .expect("only network accounts are used for account state"); + + let mut state = Self { committed: Some(account), inflight: VecDeque::default(), available_notes: HashMap::default(), nullified_notes: HashMap::default(), - } - } + }; - /// Creates a new account state where the creating transaction is still inflight. - pub fn from_uncommitted_account(account: Account) -> Self { - Self { - inflight: VecDeque::from([account]), - committed: None, - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), + for note in notes { + // Currently only support single target network notes in NTB. + assert!( + note.account_prefix() == account_prefix, + "Notes supplied into account state must match expected account prefix" + ); + state.add_note(note); } + + state } /// Returns an iterator over inflight notes that are not currently within their respective @@ -257,16 +198,16 @@ pub enum NetworkAccountEffect { } impl NetworkAccountEffect { - pub fn from_protocol(update: AccountUpdateDetails) -> Option { + pub fn from_protocol(update: &AccountUpdateDetails) -> Option { let update = match update { AccountUpdateDetails::Private => return None, AccountUpdateDetails::Delta(update) if update.is_full_state() => { NetworkAccountEffect::Created( - Account::try_from(&update) + Account::try_from(update) .expect("Account should be derivable by full state AccountDelta"), ) }, - AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update), + AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), }; update.account_id().is_network().then_some(update) @@ -285,41 +226,6 @@ impl NetworkAccountEffect { } } -// HELPERS -// ================================================================================================ - -/// Checks if the backoff block period has passed. -/// -/// The number of blocks passed since the last attempt must be greater than or equal to -/// e^(0.25 * `attempt_count`) rounded to the nearest integer. -/// -/// This evaluates to the following: -/// - After 1 attempt, the backoff period is 1 block. -/// - After 3 attempts, the backoff period is 2 blocks. -/// - After 10 attempts, the backoff period is 12 blocks. -/// - After 20 attempts, the backoff period is 148 blocks. -/// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] -fn has_backoff_passed( - chain_tip: BlockNumber, - last_attempt: Option, - attempts: usize, -) -> bool { - if attempts == 0 { - return true; - } - // Compute the number of blocks passed since the last attempt. - let blocks_passed = last_attempt - .and_then(|last| chain_tip.checked_sub(last.as_u32())) - .unwrap_or_default(); - - // Compute the exponential backoff threshold: Δ = e^(0.25 * n). - let backoff_threshold = (0.25 * attempts as f64).exp().round() as usize; - - // Check if the backoff period has passed. - blocks_passed.as_usize() > backoff_threshold -} - #[cfg(test)] mod tests { use miden_objects::block::BlockNumber; @@ -342,9 +248,11 @@ mod tests { #[case] attempt_count: usize, #[case] backoff_should_have_passed: bool, ) { + use crate::actor::has_backoff_passed; + assert_eq!( backoff_should_have_passed, - super::has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) + has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) ); } } diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index cd0f0c1aed..b8926e1ec3 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -41,7 +41,7 @@ impl BlockProducerClient { Self { client: block_producer } } - #[instrument(target = COMPONENT, name = "block_producer.client.submit_proven_transaction", skip_all, err)] + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.submit_proven_transaction", skip_all, err)] pub async fn submit_proven_transaction( &self, proven_tx: ProvenTransaction, @@ -56,7 +56,7 @@ impl BlockProducerClient { Ok(()) } - #[instrument(target = COMPONENT, name = "block_producer.client.subscribe_to_mempool", skip_all, err)] + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.subscribe_to_mempool", skip_all, err)] pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs new file mode 100644 index 0000000000..c74f2dacd5 --- /dev/null +++ b/crates/ntx-builder/src/builder.rs @@ -0,0 +1,261 @@ +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use futures::TryStreamExt; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::lru_cache::LruCache; +use miden_objects::Word; +use miden_objects::account::delta::AccountUpdateDetails; +use miden_objects::block::BlockHeader; +use miden_objects::crypto::merkle::PartialMmr; +use miden_objects::note::NoteScript; +use miden_objects::transaction::PartialBlockchain; +use tokio::sync::{Barrier, RwLock}; +use tokio::time; +use url::Url; + +use crate::MAX_IN_PROGRESS_TXS; +use crate::actor::{AccountActorContext, AccountOrigin}; +use crate::block_producer::BlockProducerClient; +use crate::coordinator::Coordinator; +use crate::store::StoreClient; + +// CONSTANTS +// ================================================================================================= + +/// The maximum number of blocks to keep in memory while tracking the chain tip. +const MAX_BLOCK_COUNT: usize = 4; + +// CHAIN STATE +// ================================================================================================ + +/// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and +/// all account actors managed by the [`Coordinator`] +#[derive(Debug, Clone)] +pub struct ChainState { + /// The current tip of the chain. + pub chain_tip_header: BlockHeader, + /// A partial representation of the latest state of the chain. + pub chain_mmr: PartialBlockchain, +} + +impl ChainState { + /// Constructs a new instance of [`ChainState`]. + fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + let chain_mmr = PartialBlockchain::new(chain_mmr, []) + .expect("partial blockchain should build from partial mmr"); + Self { chain_tip_header, chain_mmr } + } + + /// Consumes the chain state and returns the chain tip header and the partial blockchain as a + /// tuple. + pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + (self.chain_tip_header, self.chain_mmr) + } +} + +// NETWORK TRANSACTION BUILDER +// ================================================================================================ + +/// Network transaction builder component. +/// +/// The network transaction builder is in in charge of building transactions that consume notes +/// against network accounts. These notes are identified and communicated by the block producer. +/// The service maintains a list of unconsumed notes and periodically executes and proves +/// transactions that consume them (reaching out to the store to retrieve state as necessary). +/// +/// The builder manages the tasks for every network account on the chain through the coordinator. +pub struct NetworkTransactionBuilder { + /// Address of the store gRPC server. + store_url: Url, + /// Address of the block producer gRPC server. + block_producer_url: Url, + /// Address of the remote prover. If `None`, transactions will be proven locally, which is + /// undesirable due to the performance impact. + tx_prover_url: Option, + /// Interval for checking pending notes and executing network transactions. + ticker_interval: Duration, + /// A checkpoint used to sync start-up process with the block-producer. + /// + /// This informs the block-producer when we have subscribed to mempool events and that it is + /// safe to begin block-production. + bp_checkpoint: Arc, + /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. + /// This cache is shared across all account actors. + script_cache: LruCache, + /// Coordinator for managing actor tasks. + coordinator: Coordinator, +} + +impl NetworkTransactionBuilder { + /// Default cache size for note scripts. + /// + /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage + /// depends on the complexity of the scripts being cached. + const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); + + /// Creates a new instance of the network transaction builder. + pub fn new( + store_url: Url, + block_producer_url: Url, + tx_prover_url: Option, + ticker_interval: Duration, + bp_checkpoint: Arc, + ) -> Self { + let script_cache = LruCache::new(Self::DEFAULT_SCRIPT_CACHE_SIZE); + let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); + Self { + store_url, + block_producer_url, + tx_prover_url, + ticker_interval, + bp_checkpoint, + script_cache, + coordinator, + } + } + + /// Runs the network transaction builder until a fatal error occurs. + pub async fn run(mut self) -> anyhow::Result<()> { + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .expect("store should contain a latest block"); + let mut mempool_events = block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + .context("failed to subscribe to mempool events")?; + + // Unlock the block-producer's block production. The block-producer is prevented from + // producing blocks until we have subscribed to mempool events. + // + // This is a temporary work-around until the ntx-builder can resync on the fly. + self.bp_checkpoint.wait().await; + + let mut interval = tokio::time::interval(self.ticker_interval); + interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip); + + // Create chain state that will be updated by the coordinator and read by actors. + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache: self.script_cache.clone(), + }; + + // Create initial set of actors based on all known network accounts. + let account_ids = store.get_network_account_ids().await?; + for account_id in account_ids { + if let Ok(account_prefix) = NetworkAccountPrefix::try_from(account_id) { + self.coordinator + .spawn_actor(AccountOrigin::store(account_prefix), &actor_context) + .await?; + } + } + + // Main loop which manages actors and passes mempool events to them. + loop { + tokio::select! { + // Handle actor result. + result = self.coordinator.next() => { + result?; + }, + // Handle mempool events. + event = mempool_events.try_next() => { + let event = event + .context("mempool event stream ended")? + .context("mempool event stream failed")?; + + self.handle_mempool_event( + event.into(), + &actor_context, + chain_state.clone(), + ).await?; + }, + } + } + } + + /// Handles mempool events by sending them to actors via the coordinator and/or spawning new + /// actors as required. + #[tracing::instrument( + name = "ntx.builder.handle_mempool_event", + skip(self, event, actor_context, chain_state) + )] + async fn handle_mempool_event( + &mut self, + event: Arc, + actor_context: &AccountActorContext, + chain_state: Arc>, + ) -> Result<(), anyhow::Error> { + match event.as_ref() { + MempoolEvent::TransactionAdded { account_delta, .. } => { + // Handle account deltas in case an account is being created. + if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { + // Handle account deltas for network accounts only. + if let Some(network_account) = AccountOrigin::transaction(delta) { + // Spawn new actors if a transaction creates a new network account + let is_creating_account = delta.is_full_state(); + if is_creating_account { + self.coordinator.spawn_actor(network_account, actor_context).await?; + } + } + } + self.coordinator.send_targeted(&event).await?; + Ok(()) + }, + // Update chain state and broadcast. + MempoolEvent::BlockCommitted { header, txs } => { + self.update_chain_tip(header.clone(), chain_state).await; + self.coordinator.broadcast(event.clone()).await; + + // All transactions pertaining to predating events should now be available through + // the store. So we can now drain them. + for tx_id in txs { + self.coordinator.drain_predating_events(tx_id); + } + Ok(()) + }, + // Broadcast to all actors. + MempoolEvent::TransactionsReverted(txs) => { + self.coordinator.broadcast(event.clone()).await; + + // Reverted predating transactions need not be processed. + for tx_id in txs { + self.coordinator.drain_predating_events(tx_id); + } + Ok(()) + }, + } + } + + /// Updates the chain tip and MMR block count. + /// + /// Blocks in the MMR are pruned if the block count exceeds the maximum. + async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { + // Lock the chain state. + let mut chain_state = chain_state.write().await; + + // Update MMR which lags by one block. + let mmr_tip = chain_state.chain_tip_header.clone(); + chain_state.chain_mmr.add_block(&mmr_tip, true); + + // Set the new tip. + chain_state.chain_tip_header = tip; + + // Keep MMR pruned. + let pruned_block_height = + (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) + as u32; + chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + } +} diff --git a/crates/ntx-builder/src/builder/mod.rs b/crates/ntx-builder/src/builder/mod.rs deleted file mode 100644 index 706e804c9a..0000000000 --- a/crates/ntx-builder/src/builder/mod.rs +++ /dev/null @@ -1,205 +0,0 @@ -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Context; -use futures::TryStreamExt; -use miden_node_proto::domain::account::NetworkAccountPrefix; -use miden_node_utils::ErrorReport; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; -use tokio::sync::Barrier; -use tokio::time; -use url::Url; - -use crate::MAX_IN_PROGRESS_TXS; -use crate::block_producer::BlockProducerClient; -use crate::store::StoreClient; -use crate::transaction::{NtxContext, NtxError}; - -// NETWORK TRANSACTION BUILDER -// ================================================================================================ - -/// Network transaction builder component. -/// -/// The network transaction builder is in in charge of building transactions that consume notes -/// against network accounts. These notes are identified and communicated by the block producer. -/// The service maintains a list of unconsumed notes and periodically executes and proves -/// transactions that consume them (reaching out to the store to retrieve state as necessary). -pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the perofmrance impact. - tx_prover_url: Option, - /// Interval for checking pending notes and executing network transactions. - ticker_interval: Duration, - /// A checkpoint used to sync start-up process with the block-producer. - /// - /// This informs the block-producer when we have subscribed to mempool events and that it is - /// safe to begin block-production. - bp_checkpoint: Arc, -} - -impl NetworkTransactionBuilder { - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - tx_prover_url: Option, - ticker_interval: Duration, - bp_checkpoint: Arc, - ) -> Self { - Self { - store_url, - block_producer_url, - tx_prover_url, - ticker_interval, - bp_checkpoint, - } - } - - pub async fn serve_new(self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url); - let block_producer = BlockProducerClient::new(self.block_producer_url); - - let mut state = crate::state::State::load(store.clone()) - .await - .context("failed to load ntx state")?; - - let mut mempool_events = block_producer - .subscribe_to_mempool_with_retry(state.chain_tip()) - .await - .context("failed to subscribe to mempool events")?; - - // Unlock the block-producer's block production. The block-producer is prevented from - // producing blocks until we have subscribed to mempool events. - // - // This is a temporary work-around until the ntb can resync on the fly. - self.bp_checkpoint.wait().await; - - let prover = self.tx_prover_url.map(RemoteTransactionProver::new); - - let mut interval = tokio::time::interval(self.ticker_interval); - interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - - // Tracks network transaction tasks until they are submitted to the mempool. - // - // We also map the task ID to the network account so we can mark it as failed if it doesn't - // get submitted. - let mut inflight = JoinSet::new(); - let mut inflight_idx = HashMap::new(); - - let context = NtxContext::new(block_producer.clone(), prover, store); - - loop { - tokio::select! { - _next = interval.tick() => { - if inflight.len() > MAX_IN_PROGRESS_TXS { - tracing::info!("At maximum network tx capacity, skipping"); - continue; - } - - let Some(candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX) else { - tracing::debug!("No candidate network transaction available"); - continue; - }; - - let network_account_prefix = NetworkAccountPrefix::try_from(candidate.account.id()) - .expect("all accounts managed by NTB are network accounts"); - let indexed_candidate = (network_account_prefix, candidate.chain_tip_header.block_num()); - let task_id = inflight.spawn({ - let context = context.clone(); - context.execute_transaction(candidate) - }).id(); - - // SAFETY: This is definitely a network account. - inflight_idx.insert(task_id, indexed_candidate); - }, - event = mempool_events.try_next() => { - let event = event - .context("mempool event stream ended")? - .context("mempool event stream failed")?; - state.mempool_update(event).await.context("failed to update state")?; - }, - completed = inflight.join_next_with_id() => { - // Grab the task ID and associated network account reference. - let task_id = match &completed { - Ok((task_id, _)) => *task_id, - Err(join_handle) => join_handle.id(), - }; - // SAFETY: both inflights should have the same set. - let (candidate, block_num) = inflight_idx.remove(&task_id).unwrap(); - - match completed { - // Some notes failed. - Ok((_, Ok(failed))) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(candidate, notes.as_slice(), block_num); - }, - // Transaction execution failed. - Ok((_, Err(err))) => { - tracing::warn!(err=err.as_report(), "network transaction failed"); - match err { - NtxError::AllNotesFailed(failed) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(candidate, notes.as_slice(), block_num); - }, - NtxError::InputNotes(_) - | NtxError::NoteFilter(_) - | NtxError::Execution(_) - | NtxError::Proving(_) - | NtxError::Submission(_) - | NtxError::Panic(_) => {}, - } - state.candidate_failed(candidate); - }, - // Unexpected error occurred. - Err(err) => { - tracing::warn!(err=err.as_report(), "network transaction panicked"); - state.candidate_failed(candidate); - } - } - } - } - } - } -} - -/// A wrapper arounnd tokio's [`JoinSet`](tokio::task::JoinSet) which returns pending instead of -/// [`None`] if its empty. -/// -/// This makes it much more convenient to use in a `select!`. -struct JoinSet(tokio::task::JoinSet); - -impl JoinSet -where - T: 'static, -{ - fn new() -> Self { - Self(tokio::task::JoinSet::new()) - } - - fn spawn(&mut self, task: F) -> tokio::task::AbortHandle - where - F: Future, - F: Send + 'static, - T: Send, - { - self.0.spawn(task) - } - - async fn join_next_with_id(&mut self) -> Result<(tokio::task::Id, T), tokio::task::JoinError> { - if self.0.is_empty() { - std::future::pending().await - } else { - // Cannot be None as its not empty. - self.0.join_next_with_id().await.unwrap() - } - } - - fn len(&self) -> usize { - self.0.len() - } -} diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs new file mode 100644 index 0000000000..d8490aaf84 --- /dev/null +++ b/crates/ntx-builder/src/coordinator.rs @@ -0,0 +1,273 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use anyhow::Context; +use indexmap::IndexMap; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_proto::domain::note::NetworkNote; +use miden_objects::transaction::TransactionId; +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{Semaphore, mpsc}; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; + +use crate::actor::{AccountActor, AccountActorContext, AccountOrigin, ActorShutdownReason}; + +// ACTOR HANDLE +// ================================================================================================ + +/// Handle to account actors that are spawned by the coordinator. +#[derive(Clone)] +struct ActorHandle { + event_tx: mpsc::Sender>, + cancel_token: CancellationToken, +} + +impl ActorHandle { + fn new(event_tx: mpsc::Sender>, cancel_token: CancellationToken) -> Self { + Self { event_tx, cancel_token } + } +} + +// COORDINATOR +// ================================================================================================ + +/// Coordinator for managing [`AccountActor`] instances, tasks, and associated communication. +/// +/// The `Coordinator` is the central orchestrator of the network transaction builder system. +/// It manages the lifecycle of account actors. Each actor is responsible for handling transactions +/// for a specific network account prefix. The coordinator provides the following core +/// functionality: +/// +/// ## Actor Management +/// - Spawns new [`AccountActor`] instances for network accounts as needed. +/// - Maintains a registry of active actors with their communication channels. +/// - Gracefully handles actor shutdown and cleanup when actors complete or fail. +/// - Monitors actor tasks through a join set to detect completion or errors. +/// +/// ## Event Broadcasting +/// - Distributes mempool events to all account actors. +/// - Handles communication failures by canceling disconnected actors. +/// - Maintains reliable message delivery through dedicated channels per actor. +/// +/// ## Resource Management +/// - Controls transaction concurrency across all network accounts using a semaphore. +/// - Prevents resource exhaustion by limiting simultaneous transaction processing. +/// +/// The coordinator operates in an event-driven manner: +/// 1. Network accounts are registered and actors spawned as needed. +/// 2. Mempool events are broadcast to all active actors. +/// 3. Actor completion/failure events are monitored and handled. +/// 4. Failed or completed actors are cleaned up from the registry. +pub struct Coordinator { + /// Mapping of network account prefixes to their respective message channels and cancellation + /// tokens. + /// + /// This registry serves as the primary directory for communicating with active account actors. + /// When actors are spawned, they register their communication channel here. When events need + /// to be broadcast, this registry is used to locate the appropriate actors. The registry is + /// automatically cleaned up when actors complete their execution. + actor_registry: HashMap, + + /// Join set for managing actor tasks and monitoring their completion status. + /// + /// This join set allows the coordinator to wait for actor task completion and handle + /// different shutdown scenarios. When an actor task completes (either successfully or + /// due to an error), the corresponding entry is removed from the actor registry. + actor_join_set: JoinSet, + + /// Semaphore for controlling the maximum number of concurrent transactions across all network + /// accounts. + /// + /// This shared semaphore prevents the system from becoming overwhelmed by limiting the total + /// number of transactions that can be processed simultaneously across all account actors. + /// Each actor must acquire a permit from this semaphore before processing a transaction, + /// ensuring fair resource allocation and system stability under load. + semaphore: Arc, + + /// Cache of events received from the mempool that predate corresponding network accounts. + /// Grouped by account prefix to allow targeted event delivery to actors upon creation. + predating_events: HashMap>>, +} + +impl Coordinator { + /// Maximum number of messages of the message channel for each actor. + const ACTOR_CHANNEL_SIZE: usize = 100; + + /// Creates a new coordinator with the specified maximum number of inflight transactions + /// and shared script cache. + pub fn new(max_inflight_transactions: usize) -> Self { + Self { + actor_registry: HashMap::new(), + actor_join_set: JoinSet::new(), + semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), + predating_events: HashMap::new(), + } + } + + /// Spawns a new actor to manage the state of the provided network account. + /// + /// This method creates a new [`AccountActor`] instance for the specified account origin + /// and adds it to the coordinator's management system. The actor will be responsible for + /// processing transactions and managing state for accounts matching the network prefix. + #[tracing::instrument(name = "ntx.builder.spawn_actor", skip(self, origin, actor_context))] + pub async fn spawn_actor( + &mut self, + origin: AccountOrigin, + actor_context: &AccountActorContext, + ) -> Result<(), SendError>> { + let account_prefix = origin.prefix(); + + // If an actor already exists for this account prefix, something has gone wrong. + if let Some(handle) = self.actor_registry.remove(&account_prefix) { + tracing::error!("account actor already exists for prefix: {}", account_prefix); + handle.cancel_token.cancel(); + } + + let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let cancel_token = tokio_util::sync::CancellationToken::new(); + let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); + let handle = ActorHandle::new(event_tx, cancel_token); + + // Run the actor. + let semaphore = self.semaphore.clone(); + self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); + + // Send the new actor any events that contain notes that predate account creation. + if let Some(prefix_events) = self.predating_events.remove(&account_prefix) { + for event in prefix_events.values() { + Self::send(&handle, event.clone()).await?; + } + } + + self.actor_registry.insert(account_prefix, handle); + tracing::info!("created actor for account prefix: {}", account_prefix); + Ok(()) + } + + /// Broadcasts a mempool event to all active account actors. + /// + /// This method distributes the provided event to every actor currently registered + /// with the coordinator. Each actor will receive the event through its dedicated + /// message channel and can process it accordingly. + /// + /// If an actor fails to receive the event, it will be canceled. + pub async fn broadcast(&mut self, event: Arc) { + tracing::debug!( + actor_count = self.actor_registry.len(), + "broadcasting event to all actors" + ); + + let mut failed_actors = Vec::new(); + + // Send event to all actors. + for (account_prefix, handle) in &self.actor_registry { + if let Err(err) = Self::send(handle, event.clone()).await { + tracing::error!("failed to send event to actor {}: {}", account_prefix, err); + failed_actors.push(*account_prefix); + } + } + // Remove failed actors from registry and cancel them. + for prefix in failed_actors { + let handle = + self.actor_registry.remove(&prefix).expect("actor found in send loop above"); + handle.cancel_token.cancel(); + } + } + + /// Waits for the next actor to complete and processes the shutdown reason. + /// + /// This method monitors the join set for actor task completion and handles + /// different shutdown scenarios appropriately. It's designed to be called + /// in a loop to continuously monitor and manage actor lifecycles. + /// + /// If no actors are currently running, this method will wait indefinitely until + /// new actors are spawned. This prevents busy-waiting when the coordinator is idle. + pub async fn next(&mut self) -> anyhow::Result<()> { + let actor_result = self.actor_join_set.join_next().await; + match actor_result { + Some(Ok(shutdown_reason)) => match shutdown_reason { + ActorShutdownReason::Cancelled(account_prefix) => { + // Do not remove the actor from the registry, as it may be re-spawned. + // The coordinator should always remove actors immediately after cancellation. + tracing::info!("account actor cancelled: {}", account_prefix); + Ok(()) + }, + ActorShutdownReason::AccountReverted(account_prefix) => { + tracing::info!("account reverted: {}", account_prefix); + self.actor_registry.remove(&account_prefix); + Ok(()) + }, + ActorShutdownReason::EventChannelClosed => { + anyhow::bail!("event channel closed"); + }, + ActorShutdownReason::SemaphoreFailed(err) => Err(err).context("semaphore failed"), + }, + Some(Err(err)) => { + tracing::error!(err = %err, "actor task failed"); + Ok(()) + }, + None => { + // There are no actors to wait for. Wait indefinitely until actors are spawned. + std::future::pending().await + }, + } + } + + /// Sends a mempool event to all network account actors that are found in the corresponding + /// transaction's notes. + /// + /// Caches the mempool event for each network account found in the transaction's notes that does + /// not currently have a corresponding actor. If an actor does not exist for the account, it is + /// assumed that the account has not been created on the chain yet. + /// + /// Cached events will be fed to the corresponding actor when the account creation transaction + /// is processed. + pub async fn send_targeted( + &mut self, + event: &Arc, + ) -> Result<(), SendError>> { + let mut target_actors = HashMap::new(); + if let MempoolEvent::TransactionAdded { id, network_notes, .. } = event.as_ref() { + // Determine target actors for each note. + for note in network_notes { + if let NetworkNote::SingleTarget(note) = note { + let prefix = note.account_prefix(); + if let Some(actor) = self.actor_registry.get(&prefix) { + // Register actor as target. + target_actors.insert(prefix, actor); + } else { + // Cache event for every note that doesn't have a corresponding actor. + self.predating_events.entry(prefix).or_default().insert(*id, event.clone()); + } + } + } + } + // Send event to target actors. + for actor in target_actors.values() { + Self::send(actor, event.clone()).await?; + } + Ok(()) + } + + /// Removes any cached events for a given transaction ID from all account prefix caches. + pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { + // Remove the transaction from all prefix caches. + // This iterates over all predating events which is fine because the count is expected to be + // low. + self.predating_events.retain(|_, prefix_event| { + prefix_event.shift_remove(tx_id); + // Remove entries for account prefixes with no more cached events. + !prefix_event.is_empty() + }); + } + + /// Helper function to send an event to a single account actor. + async fn send( + handle: &ActorHandle, + event: Arc, + ) -> Result<(), SendError>> { + handle.event_tx.send(event).await + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index c3efd1351b..b0d89f94c8 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,10 +1,10 @@ use std::num::NonZeroUsize; +mod actor; mod block_producer; mod builder; -mod state; +mod coordinator; mod store; -mod transaction; pub use builder::NetworkTransactionBuilder; diff --git a/crates/ntx-builder/src/state/mod.rs b/crates/ntx-builder/src/state/mod.rs deleted file mode 100644 index 363f58f837..0000000000 --- a/crates/ntx-builder/src/state/mod.rs +++ /dev/null @@ -1,512 +0,0 @@ -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}; -use std::num::NonZeroUsize; - -use account::{AccountState, InflightNetworkNote, NetworkAccountEffect}; -use anyhow::Context; -use miden_node_proto::domain::account::NetworkAccountPrefix; -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::account::Account; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, Nullifier}; -use miden_objects::transaction::{PartialBlockchain, TransactionId}; -use tracing::instrument; - -use crate::COMPONENT; -use crate::store::{StoreClient, StoreError}; - -mod account; - -#[cfg(test)] -mod tests; - -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - -/// A candidate network transaction. -/// -/// Contains the data pertaining to a specific network account which can be used to build a network -/// transaction. -#[derive(Clone)] -pub struct TransactionCandidate { - /// The current inflight state of the account. - pub account: Account, - - /// A set of notes addressed to this network account. - pub notes: Vec, - - /// The latest locally committed block header. - /// - /// This should be used as the reference block during transaction execution. - pub chain_tip_header: BlockHeader, - - /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, -} - -/// Holds the state of the network transaction builder. -/// -/// It tracks inflight transactions, and their impact on network-related state. -pub struct State { - /// The latest committed block header. - chain_tip_header: BlockHeader, - - /// The chain MMR, which lags behind the tip by one block. - chain_mmr: PartialBlockchain, - - /// Tracks all network accounts with inflight state. - /// - /// This is network account deltas, network notes and their nullifiers. - accounts: HashMap, - - /// A rotating queue of all tracked network accounts. - /// - /// This is used to select the next transaction's account. - /// - /// Note that this _always_ includes _all_ network accounts. Filtering out accounts that aren't - /// viable is handled within the select method itself. - queue: VecDeque, - - /// Network accounts which have been selected but whose transaction has not yet completed. - /// - /// This locks these accounts so they cannot be selected. - in_progress: HashSet, - - /// Uncommitted transactions which have a some impact on the network state. - /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. - inflight_txs: BTreeMap, - - /// A mapping of network note's to their account. - nullifier_idx: BTreeMap, - - /// gRPC client used to retrieve the network account state from the store. - store: StoreClient, -} - -impl State { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 1; - - /// Load's all available network notes from the store, along with the required account states. - #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] - pub async fn load(store: StoreClient) -> Result { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - let chain_mmr = PartialBlockchain::new(chain_mmr, []) - .expect("PartialBlockchain should build from latest partial MMR"); - - let mut state = Self { - chain_tip_header, - chain_mmr, - store, - accounts: HashMap::default(), - queue: VecDeque::default(), - in_progress: HashSet::default(), - inflight_txs: BTreeMap::default(), - nullifier_idx: BTreeMap::default(), - }; - - let notes = state.store.get_unconsumed_network_notes().await?; - for note in notes { - // Currently only support single target network notes in NTB. - if let NetworkNote::SingleTarget(note) = note { - let prefix = note.account_prefix(); - // Ignore notes which don't target an existing account. - if let Some(account) = state.fetch_account(prefix).await? { - account.add_note(note); - } - } - } - state.inject_telemetry(); - - Ok(state) - } - - /// Creates a new State for testing purposes with minimal setup. - /// - /// This bypasses the need for a real store connection and provides a mock state - /// that can be used in unit tests. - #[cfg(test)] - pub(crate) fn new_for_testing( - chain_tip_header: BlockHeader, - chain_mmr: PartialBlockchain, - store: StoreClient, - ) -> Self { - Self { - chain_tip_header, - chain_mmr, - store, - accounts: HashMap::default(), - queue: VecDeque::default(), - in_progress: HashSet::default(), - inflight_txs: BTreeMap::default(), - nullifier_idx: BTreeMap::default(), - } - } - - /// Selects the next candidate network transaction. - /// - /// Note that this marks the candidate account as in-progress and that it cannot be selected - /// again until either: - /// - /// - it has been marked as failed if the transaction failed, or - /// - the transaction was submitted successfully, indicated by the associated mempool event - /// being submitted - #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] - pub fn select_candidate(&mut self, limit: NonZeroUsize) -> Option { - // Loop through the account queue until we find one that is selectable. - // - // Since the queue contains _all_ accounts, including unselectable accounts, we limit our - // search to once through the entire queue. - // - // There are smarter ways of doing this, but this should scale more than well enough for a - // long time. - for _ in 0..self.queue.len() { - // This is a rotating queue. - let candidate = self.queue.pop_front().unwrap(); - self.queue.push_back(candidate); - - // Skip accounts which are already in-progress. - if self.in_progress.contains(&candidate) { - continue; - } - - let account = self.accounts.get_mut(&candidate).expect("queue account must be tracked"); - - // Remove notes that have failed too many times. - account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); - - // Skip empty accounts, and prune them. - // This is how we keep the number of accounts bounded. - if account.is_empty() { - // We don't need to prune the inflight transactions because if the account is empty, - // then it would have no inflight txs. - self.accounts.remove(&candidate); - // We know this account is the backmost one since we just rotated it there. - self.queue.pop_back(); - continue; - } - - // Select notes from the account that can be consumed or are ready for a retry. - let notes = account - .available_notes(&self.chain_tip_header.block_num()) - .take(limit.get()) - .cloned() - .collect::>(); - - // Skip accounts with no available notes. - if notes.is_empty() { - continue; - } - - self.in_progress.insert(candidate); - return TransactionCandidate { - account: account.latest_account(), - notes, - chain_tip_header: self.chain_tip_header.clone(), - chain_mmr: self.chain_mmr.clone(), - } - .into(); - } - self.inject_telemetry(); - - None - } - - /// The latest block number the state knows of. - pub fn chain_tip(&self) -> BlockNumber { - self.chain_tip_header.block_num() - } - - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - fn update_chain_tip(&mut self, tip: BlockHeader) { - // Update MMR which lags by one block. - self.chain_mmr.add_block(&self.chain_tip_header, true); - - // Set the new tip. - self.chain_tip_header = tip; - - // Keep MMR pruned. - let pruned_block_height = - (self.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) as u32; - self.chain_mmr.prune_to(..pruned_block_height.into()); - } - - /// Marks notes of a previously selected candidate as failed. - /// - /// Does not remove the candidate from the in-progress pool. - #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] - pub fn notes_failed( - &mut self, - candidate: NetworkAccountPrefix, - notes: &[Note], - block_num: BlockNumber, - ) { - if let Some(account) = self.accounts.get_mut(&candidate) { - let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - account.fail_notes(nullifiers.as_slice(), block_num); - } else { - tracing::error!(account.prefix=%candidate, "failed network notes have no local account state"); - } - } - - /// Marks a previously selected candidate account as failed, allowing it to be available for - /// selection again. - /// - /// All notes in the candidate will be marked as failed. - #[instrument(target = COMPONENT, name = "ntx.state.candidate_failed", skip_all)] - pub fn candidate_failed(&mut self, candidate: NetworkAccountPrefix) { - self.in_progress.remove(&candidate); - - self.inject_telemetry(); - } - - /// Updates state with the mempool event. - #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] - pub async fn mempool_update(&mut self, update: MempoolEvent) -> anyhow::Result<()> { - let span = tracing::Span::current(); - span.set_attribute("mempool_event.kind", update.kind()); - - match update { - // Note: this event will get triggered by normal user transactions, as well as our - // network transactions. The mempool does not distinguish between the two. - MempoolEvent::TransactionAdded { - id, - nullifiers, - network_notes, - account_delta, - } => { - let network_notes = network_notes - .into_iter() - .filter_map(|note| match note { - NetworkNote::SingleTarget(note) => Some(note), - NetworkNote::MultiTarget(_) => None, - }) - .collect::>(); - self.add_transaction(id, nullifiers, network_notes, account_delta).await?; - }, - MempoolEvent::BlockCommitted { header, txs } => { - anyhow::ensure!( - header.prev_block_commitment() == self.chain_tip_header.commitment(), - "New block's parent commitment {} does not match local chain tip {}", - header.prev_block_commitment(), - self.chain_tip_header.commitment() - ); - self.update_chain_tip(header); - for tx in txs { - self.commit_transaction(tx); - } - }, - MempoolEvent::TransactionsReverted(txs) => { - for tx in txs { - self.revert_transaction(tx); - } - }, - } - self.inject_telemetry(); - - Ok(()) - } - - /// Handles a [`MempoolEvent::TransactionAdded`] event. - /// - /// Note that this will include our own network transactions as well as user submitted - /// transactions. - /// - /// This updates the state of network accounts affected by this transaction. Account state - /// may be loaded from the store if it isn't already known locally. This would be the case if - /// the network account has no inflight state changes. - async fn add_transaction( - &mut self, - id: TransactionId, - nullifiers: Vec, - network_notes: Vec, - account_delta: Option, - ) -> anyhow::Result<()> { - // Skip transactions we already know about. - // - // This can occur since both ntx builder and the mempool might inform us of the same - // transaction. Once when it was submitted to the mempool, and once by the mempool event. - if self.inflight_txs.contains_key(&id) { - return Ok(()); - } - - let mut tx_impact = TransactionImpact::default(); - if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let prefix = update.prefix(); - - match update { - NetworkAccountEffect::Created(account) => { - let account_state = AccountState::from_uncommitted_account(account); - self.accounts.insert(prefix, account_state); - self.queue.push_back(prefix); - }, - NetworkAccountEffect::Updated(account_delta) => { - self.fetch_account(prefix) - .await - .context("failed to load account")? - .context("account with delta not found")? - .add_delta(&account_delta); - }, - } - - // If this account was in-progress, then it should no longer be as this update is the - // result of our own network transaction. - self.in_progress.remove(&prefix); - tx_impact.account_delta = Some(prefix); - } - for note in network_notes { - let prefix = note.account_prefix(); - tx_impact.notes.insert(note.nullifier()); - - // Skip and ignore nullifier if note targets a non-existent network account - let Some(account) = self.fetch_account(prefix).await? else { - tracing::warn!("could not fetch account from network: {:?}", prefix); - continue; - }; - - account.add_note(note.clone()); - self.nullifier_idx.insert(note.nullifier(), prefix); - } - for nullifier in nullifiers { - // Ignore nullifiers that aren't network note nullifiers. - let Some(account) = self.nullifier_idx.get(&nullifier) else { - continue; - }; - tx_impact.nullifiers.insert(nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _res = self - .accounts - .get_mut(account) - .expect("nullifier account must exist") - .add_nullifier(nullifier); - } - - if !tx_impact.is_empty() { - self.inflight_txs.insert(id, tx_impact); - } - - Ok(()) - } - - /// Handles [`MempoolEvent::BlockCommitted`] events. - fn commit_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(prefix) = impact.account_delta { - self.accounts.get_mut(&prefix).unwrap().commit_delta(); - } - - for nullifier in impact.nullifiers { - let prefix = self.nullifier_idx.remove(&nullifier).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(&prefix) { - account.commit_nullifier(nullifier); - } - } - } - - /// Handles [`MempoolEvent::TransactionsReverted`] events. - fn revert_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(prefix) = impact.account_delta { - // We need to remove the account if this transaction created the account. - if self.accounts.get_mut(&prefix).unwrap().revert_delta() { - self.accounts.remove(&prefix); - } - } - - for note in impact.notes { - let prefix = self.nullifier_idx.remove(¬e).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(&prefix) { - account.revert_note(note); - } - } - - for nullifier in impact.nullifiers { - let prefix = self.nullifier_idx.get(&nullifier).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(prefix) { - account.revert_nullifier(nullifier); - } - } - } - - /// Returns the current inflight account, loading it from the store if it isn't present locally. - /// - /// Returns `None` if the account is unknown. - async fn fetch_account( - &mut self, - prefix: NetworkAccountPrefix, - ) -> Result, StoreError> { - match self.accounts.entry(prefix) { - Entry::Occupied(occupied_entry) => Ok(Some(occupied_entry.into_mut())), - Entry::Vacant(vacant_entry) => { - let Some(account) = self.store.get_network_account(prefix).await? else { - return Ok(None); - }; - - self.queue.push_back(prefix); - let entry = vacant_entry.insert(AccountState::from_committed_account(account)); - - Ok(Some(entry)) - }, - } - } - - /// Adds stats to the current tracing span. - /// - /// Note that these are only visible in the OpenTelemetry context, as conventional tracing - /// does not track fields added dynamically. - fn inject_telemetry(&self) { - let span = tracing::Span::current(); - - span.set_attribute("ntx.state.accounts.total", self.accounts.len()); - span.set_attribute("ntx.state.accounts.in_progress", self.in_progress.len()); - span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); - span.set_attribute("ntx.state.notes.total", self.nullifier_idx.len()); - } -} - -/// The impact a transaction has on the state. -#[derive(Default)] -struct TransactionImpact { - /// The network account this transaction added an account delta to. - account_delta: Option, - - /// Network notes this transaction created. - notes: BTreeSet, - - /// Network notes this transaction consumed. - nullifiers: BTreeSet, -} - -impl TransactionImpact { - fn is_empty(&self) -> bool { - self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() - } -} diff --git a/crates/ntx-builder/src/state/tests.rs b/crates/ntx-builder/src/state/tests.rs deleted file mode 100644 index d41a322b27..0000000000 --- a/crates/ntx-builder/src/state/tests.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::collections::HashSet; - -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_utils::fee::test_fee_params; -use miden_objects::Word; -use miden_objects::block::BlockHeader; -use miden_objects::note::Nullifier; -use miden_objects::transaction::{PartialBlockchain, TransactionId}; - -use crate::state::State; -use crate::store::StoreClient; - -/// Helper function to create a mock State for testing without needing a real store. -fn create_mock_state() -> State { - // Create a minimal genesis block header - let chain_tip_header = BlockHeader::new( - 1_u8.into(), // version - Word::default(), // prev_hash - 0_u32.into(), // block_num (genesis) - Word::default(), // chain_root - Word::default(), // account_root - Word::default(), // nullifier_root - Word::default(), // note_root - Word::default(), // tx_hash - Word::default(), // kernel_root - Word::default(), // proof_hash - test_fee_params(), // fee_parameters - 0_u32, // timestamp - ); - - // Create an empty partial blockchain - let chain_mmr = PartialBlockchain::default(); - // Create a mock store client (it won't be used in this test) - let store = StoreClient::new("http://localhost:9999".parse().unwrap()); - - State::new_for_testing(chain_tip_header, chain_mmr, store) -} - -/// Regression test for issue #1312 -/// -/// This test verifies that the `NtxBuilder`'s state handling correctly processes transactions -/// that contain nullifiers without corresponding network notes. This scenario can occur when: -/// - A transaction consumes a non-network note (e.g., a private note) -/// - The nullifier is included in the transaction but is not tracked by the `NtxBuilder` -/// -/// The test ensures... -/// 1. such transactions are accepted -/// 2. the state remains consistent after processing -/// 3. the nullifier is skipped, since it has no corresponding note -/// 4. subsequent operations continue to work correctly -#[tokio::test] -async fn issue_1312_nullifier_without_note() { - let mut state = create_mock_state(); - - let initial_chain_tip = state.chain_tip(); - - let tx_id = - TransactionId::new(Word::default(), Word::default(), Word::default(), Word::default()); - let nullifier = - Nullifier::new(Word::default(), Word::default(), Word::default(), Word::default()); - - // Add transaction with nullifier but no network notes. - let add_event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier], - network_notes: vec![], - account_delta: None, - }; - - state.mempool_update(add_event).await.unwrap(); - - assert_eq!(state.chain_tip(), initial_chain_tip); - - // Verify state integrity. - let candidate = state.select_candidate(std::num::NonZeroUsize::new(10).unwrap()); - assert!(candidate.is_none()); - - // Revert transaction. - let revert_event = - MempoolEvent::TransactionsReverted(std::iter::once(tx_id).collect::>()); - state.mempool_update(revert_event).await.unwrap(); - - assert_eq!(state.chain_tip(), initial_chain_tip); -} diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 2d53757407..9073ed3df6 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -7,7 +7,7 @@ use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_objects::Word; -use miden_objects::account::Account; +use miden_objects::account::{Account, AccountId}; use miden_objects::block::BlockHeader; use miden_objects::crypto::merkle::{Forest, MmrPeaks, PartialMmr}; use miden_objects::note::NoteScript; @@ -105,33 +105,6 @@ impl StoreClient { } } - /// Returns the list of unconsumed network notes. - #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] - pub async fn get_unconsumed_network_notes(&self) -> Result, StoreError> { - let mut all_notes = Vec::new(); - let mut page_token: Option = None; - - loop { - let req = proto::store::UnconsumedNetworkNotesRequest { page_token, page_size: 128 }; - let resp = self.inner.clone().get_unconsumed_network_notes(req).await?.into_inner(); - - let page: Vec = resp - .notes - .into_iter() - .map(NetworkNote::try_from) - .collect::, _>>()?; - - all_notes.extend(page); - - match resp.next_token { - Some(tok) => page_token = Some(tok), - None => break, - } - } - - Ok(all_notes) - } - #[instrument(target = COMPONENT, name = "store.client.get_network_account", skip_all, err)] pub async fn get_network_account( &self, @@ -161,6 +134,61 @@ impl StoreClient { Ok(account) } + /// Returns the list of unconsumed network notes for a specific network account up to a + /// specified block. + #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] + pub async fn get_unconsumed_network_notes( + &self, + network_account_prefix: NetworkAccountPrefix, + block_num: u32, + ) -> Result, StoreError> { + // Upper bound of each note is ~10KB. Limit page size to ~10MB. + const PAGE_SIZE: u64 = 1024; + + let mut all_notes = Vec::new(); + let mut page_token: Option = None; + + let mut store_client = self.inner.clone(); + loop { + let req = proto::store::UnconsumedNetworkNotesRequest { + page_token, + page_size: PAGE_SIZE, + network_account_id_prefix: network_account_prefix.inner(), + block_num, + }; + let resp = store_client.get_unconsumed_network_notes(req).await?.into_inner(); + + all_notes.reserve(resp.notes.len()); + for note in resp.notes { + all_notes.push(NetworkNote::try_from(note)?); + } + + match resp.next_token { + Some(token) => page_token = Some(token), + None => break, + } + } + + Ok(all_notes) + } + + // TODO: add pagination. + #[instrument(target = COMPONENT, name = "store.client.get_network_account_ids", skip_all, err)] + pub async fn get_network_account_ids(&self) -> Result, StoreError> { + let response = self.inner.clone().get_network_account_ids(()).await?.into_inner(); + + let accounts: Result, ConversionError> = response + .account_ids + .into_iter() + .map(|account_id| { + AccountId::read_from_bytes(&account_id.id) + .map_err(|err| ConversionError::deserialization_error("account_id", err)) + }) + .collect(); + + Ok(accounts?) + } + #[instrument(target = COMPONENT, name = "store.client.get_note_script_by_root", skip_all, err)] pub async fn get_note_script_by_root( &self, diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 351384033b..33703e88a4 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -165,24 +165,11 @@ pub struct MaybeAccountDetails { #[prost(message, optional, tag = "1")] pub details: ::core::option::Option, } -/// Returns a list of unconsumed network notes using pagination. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesRequest { - /// An opaque token used to paginate through the notes. - /// - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, -} /// Returns a paginated list of unconsumed network notes for an account. /// /// Notes created or consumed after the specified block are excluded from the result. #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesForAccountRequest { +pub struct UnconsumedNetworkNotesRequest { /// This should be null on the first call, and set to the response token until the response token /// is null, at which point all data has been fetched. /// @@ -216,6 +203,13 @@ pub struct UnconsumedNetworkNotes { #[prost(message, repeated, tag = "2")] pub notes: ::prost::alloc::vec::Vec, } +/// Represents the result of getting the network account ids. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NetworkAccountIdList { + /// The list of network account ids. + #[prost(message, repeated, tag = "1")] + pub account_ids: ::prost::alloc::vec::Vec, +} /// Current blockchain data based on the requested block number. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CurrentBlockchainData { @@ -2354,38 +2348,6 @@ pub mod ntx_builder_client { ); self.inner.unary(req, path, codec).await } - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - pub async fn get_unconsumed_network_notes_for_account( - &mut self, - request: impl tonic::IntoRequest< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetUnconsumedNetworkNotesForAccount", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "store.NtxBuilder", - "GetUnconsumedNetworkNotesForAccount", - ), - ); - self.inner.unary(req, path, codec).await - } /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this /// header for executing network transactions. If the block number is not provided, the latest /// header and peaks will be retrieved. @@ -2443,6 +2405,31 @@ pub mod ntx_builder_client { ); self.inner.unary(req, path, codec).await } + /// Returns a list of all network account ids. + pub async fn get_network_account_ids( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNetworkAccountIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountIds")); + self.inner.unary(req, path, codec).await + } /// Returns the script for a note by its root. pub async fn get_note_script_by_root( &mut self, @@ -2500,14 +2487,6 @@ pub mod ntx_builder_server { tonic::Response, tonic::Status, >; - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - async fn get_unconsumed_network_notes_for_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this /// header for executing network transactions. If the block number is not provided, the latest /// header and peaks will be retrieved. @@ -2526,6 +2505,14 @@ pub mod ntx_builder_server { tonic::Response, tonic::Status, >; + /// Returns a list of all network account ids. + async fn get_network_account_ids( + &self, + request: tonic::Request<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Returns the script for a note by its root. async fn get_note_script_by_root( &self, @@ -2713,17 +2700,15 @@ pub mod ntx_builder_server { }; Box::pin(fut) } - "/store.NtxBuilder/GetUnconsumedNetworkNotesForAccount" => { + "/store.NtxBuilder/GetCurrentBlockchainData" => { #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesForAccountSvc( - pub Arc, - ); + struct GetCurrentBlockchainDataSvc(pub Arc); impl< T: NtxBuilder, > tonic::server::UnaryService< - super::UnconsumedNetworkNotesForAccountRequest, - > for GetUnconsumedNetworkNotesForAccountSvc { - type Response = super::UnconsumedNetworkNotes; + super::super::blockchain::MaybeBlockNumber, + > for GetCurrentBlockchainDataSvc { + type Response = super::CurrentBlockchainData; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -2731,12 +2716,12 @@ pub mod ntx_builder_server { fn call( &mut self, request: tonic::Request< - super::UnconsumedNetworkNotesForAccountRequest, + super::super::blockchain::MaybeBlockNumber, >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_unconsumed_network_notes_for_account( + ::get_current_blockchain_data( &inner, request, ) @@ -2751,7 +2736,7 @@ pub mod ntx_builder_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetUnconsumedNetworkNotesForAccountSvc(inner); + let method = GetCurrentBlockchainDataSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -2767,28 +2752,27 @@ pub mod ntx_builder_server { }; Box::pin(fut) } - "/store.NtxBuilder/GetCurrentBlockchainData" => { + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { #[allow(non_camel_case_types)] - struct GetCurrentBlockchainDataSvc(pub Arc); + struct GetNetworkAccountDetailsByPrefixSvc( + pub Arc, + ); impl< T: NtxBuilder, - > tonic::server::UnaryService< - super::super::blockchain::MaybeBlockNumber, - > for GetCurrentBlockchainDataSvc { - type Response = super::CurrentBlockchainData; + > tonic::server::UnaryService + for GetNetworkAccountDetailsByPrefixSvc { + type Response = super::MaybeAccountDetails; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::blockchain::MaybeBlockNumber, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_current_blockchain_data( + ::get_network_account_details_by_prefix( &inner, request, ) @@ -2803,7 +2787,7 @@ pub mod ntx_builder_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetCurrentBlockchainDataSvc(inner); + let method = GetNetworkAccountDetailsByPrefixSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -2819,30 +2803,20 @@ pub mod ntx_builder_server { }; Box::pin(fut) } - "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { + "/store.NtxBuilder/GetNetworkAccountIds" => { #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByPrefixSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByPrefixSvc { - type Response = super::MaybeAccountDetails; + struct GetNetworkAccountIdsSvc(pub Arc); + impl tonic::server::UnaryService<()> + for GetNetworkAccountIdsSvc { + type Response = super::NetworkAccountIdList; type Future = BoxFuture< tonic::Response, tonic::Status, >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_network_account_details_by_prefix( - &inner, - request, - ) + ::get_network_account_ids(&inner, request) .await }; Box::pin(fut) @@ -2854,7 +2828,7 @@ pub mod ntx_builder_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetNetworkAccountDetailsByPrefixSvc(inner); + let method = GetNetworkAccountIdsSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 918b395502..7b48684edf 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -433,6 +433,13 @@ impl Db { .await } + /// Loads all network account IDs from the DB. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_network_account_ids(&self) -> Result> { + self.transact("Get all network account IDs", queries::select_all_network_account_ids) + .await + } + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_state_sync( &self, @@ -558,21 +565,9 @@ impl Db { Ok(()) } - /// Loads the network notes that have not been consumed yet, using pagination to limit the - /// number of notes returned. - pub(crate) async fn select_unconsumed_network_notes( - &self, - page: Page, - ) -> Result<(Vec, Page)> { - self.transact("unconsumed network notes", move |conn| { - models::queries::unconsumed_network_notes(conn, page) - }) - .await - } - /// Loads the network notes for an account that are unconsumed by a specified block number. /// Pagination is used to limit the number of notes returned. - pub(crate) async fn select_unconsumed_network_notes_for_account( + pub(crate) async fn select_unconsumed_network_notes( &self, network_account_id_prefix: NetworkAccountPrefix, block_num: BlockNumber, diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 9a178e21a1..fe51973596 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -407,6 +407,30 @@ pub(crate) fn select_all_accounts( Ok(account_infos) } +/// Returns all network account IDs. +/// +/// # Returns +/// +/// A vector with network account IDs, or an error. +pub(crate) fn select_all_network_account_ids( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + let account_ids_raw: Vec> = QueryDsl::select( + schema::accounts::table.filter(schema::accounts::network_account_id_prefix.is_not_null()), + schema::accounts::account_id, + ) + .load::>(conn)?; + + let account_ids = account_ids_raw + .into_iter() + .map(|id_bytes| { + AccountId::read_from_bytes(&id_bytes).map_err(DatabaseError::DeserializationError) + }) + .collect::, DatabaseError>>()?; + + Ok(account_ids) +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapValue { pub block_num: BlockNumber, diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a5c2ffc2da..4f5e4b5727 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -386,115 +386,6 @@ pub(crate) fn select_note_script_by_root( .map_err(Into::into) } -/// Returns a paginated batch of network notes that have not yet been consumed. -/// -/// # Returns -/// -/// A set of unconsumed network notes with maximum length of `size` and the page to get -/// the next set. -/// -/// Attention: uses the _implicit_ column `rowid`, which requires to use a few raw SQL nugget -/// statements -/// -/// # Raw SQL -/// -/// ```sql -/// SELECT -/// notes.committed_at, -/// notes.batch_index, -/// notes.note_index, -/// notes.note_id, -/// notes.note_type, -/// notes.sender, -/// notes.tag, -/// notes.aux, -/// notes.execution_hint, -/// notes.assets, -/// notes.inputs, -/// notes.serial_num, -/// notes.inclusion_path, -/// note_scripts.script, -/// notes.rowid -/// FROM notes -/// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root -/// WHERE -/// execution_mode = 0 AND consumed_at IS NULL AND notes.rowid >= ?1 -/// ORDER BY notes.rowid ASC -/// LIMIT ?2 -/// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -pub(crate) fn unconsumed_network_notes( - conn: &mut SqliteConnection, - mut page: Page, -) -> Result<(Vec, Page), DatabaseError> { - assert_eq!( - NoteExecutionMode::Network as u8, - 0, - "Hardcoded execution value must match query" - ); - - let rowid_sel = diesel::dsl::sql::("notes.rowid"); - let rowid_sel_ge = - diesel::dsl::sql::("notes.rowid >= ") - .bind::(page.token.unwrap_or_default() as i64); - - #[allow( - clippy::items_after_statements, - reason = "It's only relevant for a single call function" - )] - type RawLoadedTuple = ( - NoteRecordRawRow, - Option>, // script - i64, // rowid (from sql::("notes.rowid")) - ); - - #[allow( - clippy::items_after_statements, - reason = "It's only relevant for a single call function" - )] - fn split_into_raw_note_record_and_implicit_row_id( - tuple: RawLoadedTuple, - ) -> (NoteRecordWithScriptRawJoined, i64) { - let (note, script, row) = tuple; - let combined = NoteRecordWithScriptRawJoined::from((note, script)); - (combined, row) - } - - let raw = SelectDsl::select( - schema::notes::table.left_join( - schema::note_scripts::table - .on(schema::notes::script_root.eq(schema::note_scripts::script_root.nullable())), - ), - ( - NoteRecordRawRow::as_select(), - schema::note_scripts::script.nullable(), - rowid_sel.clone(), - ), - ) - .filter(schema::notes::execution_mode.eq(NoteExecutionMode::Network.to_raw_sql())) - .filter(schema::notes::consumed_at.is_null()) - .filter(rowid_sel_ge) - .order(rowid_sel.asc()) - .limit(page.size.get() as i64 + 1) - .load::(conn)?; - - let mut notes = Vec::with_capacity(page.size.into()); - for raw_item in raw { - let (raw_item, row_id) = split_into_raw_note_record_and_implicit_row_id(raw_item); - page.token = None; - if notes.len() == page.size.get() { - page.token = Some(row_id as u64); - break; - } - notes.push(TryInto::::try_into(raw_item)?); - } - - Ok((notes, page)) -} - /// Returns a paginated batch of network notes for an account that are unconsumed by a specified /// block number. /// diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 3c837f2509..84d0455fcf 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -450,139 +450,6 @@ fn make_account_and_note( #[test] #[miden_node_test_macro::enable_logging] fn sql_unconsumed_network_notes() { - // Number of notes to generate. - const N: u64 = 32; - - let mut conn = create_db(); - let conn = &mut conn; - - let block_num = BlockNumber::from(1); - // An arbitrary public account (network note tag requires public account). - create_block(conn, block_num); - - let account_notes = [ - make_account_and_note(conn, block_num, [0u8; 32], AccountStorageMode::Public), - make_account_and_note(conn, block_num, [1u8; 32], AccountStorageMode::Network), - ]; - let network_account_id = account_notes[1].0; - - // Create some notes, of which half are network notes. - let notes = (0..N) - .map(|i| { - let index = (i % 2) as usize; - let is_network = account_notes[index].0.storage_mode() == AccountStorageMode::Network; - let account_id = account_notes[index].0; - let new_note = &account_notes[index].1; - let note = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i), - note_commitment: num_to_word(i), - metadata: NoteMetadata::new( - account_notes[index].0, - NoteType::Public, - NoteTag::from_account_id(account_id), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: is_network.then_some(NoteDetails::from(new_note)), - inclusion_path: SparseMerklePath::default(), - }; - - (note, is_network.then_some(num_to_nullifier(i))) - }) - .collect::>(); - - // Copy out all network notes to assert against. These will be in chronological order already. - let network_notes = notes - .iter() - .filter_map(|(note, nullifier)| nullifier.is_some().then_some(note.clone())) - .collect::>(); - - // Insert the set of notes. - queries::insert_scripts(conn, notes.iter().map(|(note, _)| note)).unwrap(); - queries::insert_notes(conn, ¬es).unwrap(); - - // Fetch all network notes by setting a limit larger than the amount available. - let (result, _) = queries::unconsumed_network_notes( - conn, - Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }, - ) - .unwrap(); - assert_eq!(result, network_notes); - let (result, _) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }, - ) - .unwrap(); - assert_eq!(result, network_notes); - - // Check pagination works as expected. - let limit = 5; - let mut page = Page { - token: None, - size: NonZeroUsize::new(limit).unwrap(), - }; - network_notes.chunks(limit).for_each(|expected| { - let (result, new_page) = queries::unconsumed_network_notes(conn, page).unwrap(); - page = new_page; - assert_eq!(result, expected); - }); - network_notes.chunks(limit).for_each(|expected| { - let (result, new_page) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - page, - ) - .unwrap(); - page = new_page; - assert_eq!(result, expected); - }); - assert!(page.token.is_none()); - - // Consume every third network note and ensure these are now excluded from the results. - let consumed = notes - .iter() - .filter_map(|(_, nullifier)| *nullifier) - .step_by(3) - .collect::>(); - queries::insert_nullifiers_for_block(conn, &consumed, block_num).unwrap(); - - let expected = network_notes - .iter() - .enumerate() - .filter(|(i, _)| i % 3 != 0) - .map(|(_, note)| note.clone()) - .collect::>(); - let page = Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }; - let (result, _) = queries::unconsumed_network_notes(conn, page).unwrap(); - assert_eq!(result, expected); - let (result, _) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - page, - ) - .unwrap(); - assert_eq!(result, expected); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_unconsumed_network_notes_for_account() { let mut conn = create_db(); // Create account. diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 9834ed5bb6..91bc5a6480 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -115,46 +115,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { request: Request, ) -> Result, Status> { let request = request.into_inner(); - - let state = self.state.clone(); - - let size = - NonZero::try_from(request.page_size as usize).map_err(|err: TryFromIntError| { - invalid_argument(err.as_report_context("invalid page_size")) - })?; - let page = Page { token: request.page_token, size }; - // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created - // instead - let (notes, next_page) = - state.get_unconsumed_network_notes(page).await.map_err(internal_error)?; - - let mut network_notes = Vec::with_capacity(notes.len()); - for note in notes { - // SAFETY: Network notes are filtered in the database, so they should have details; - // otherwise the state would be corrupted - let (assets, recipient) = note.details.unwrap().into_parts(); - let note = Note::new(assets, note.metadata, recipient); - network_notes.push(note.into()); - } - - Ok(Response::new(proto::store::UnconsumedNetworkNotes { - notes: network_notes, - next_token: next_page.token, - })) - } - - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_unconsumed_network_notes_for_account", - skip_all, - err - )] - async fn get_unconsumed_network_notes_for_account( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); let block_num = BlockNumber::from(request.block_num); let network_account_id_prefix = NetworkAccountPrefix::try_from(request.network_account_id_prefix).map_err(|err| { @@ -190,14 +150,35 @@ impl ntx_builder_server::NtxBuilder for StoreApi { })) } + // TODO: add pagination. #[instrument( parent = None, target = COMPONENT, - name = "store.ntx_builder_server.get_note_script_by_root", + name = "store.ntx_builder_server.get_network_account_ids", skip_all, ret(level = "debug"), err )] + async fn get_network_account_ids( + &self, + _request: Request<()>, + ) -> Result, Status> { + let account_ids = self.state.get_all_network_accounts().await.map_err(internal_error)?; + + let account_ids: Vec = + account_ids.into_iter().map(Into::into).collect(); + + Ok(Response::new(proto::store::NetworkAccountIdList { account_ids })) + } + + #[instrument( + parent = None, + target = COMPONENT, + name = "store.ntx_builder_server.get_note_script_by_root", + skip_all, + ret(level = "debug"), + err + )] async fn get_note_script_by_root( &self, request: Request, diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 8fb314f393..2dd281301b 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -918,6 +918,11 @@ impl State { self.db.select_network_account_by_prefix(id_prefix).await } + /// Returns account IDs for all public (on-chain) network accounts. + pub async fn get_all_network_accounts(&self) -> Result, DatabaseError> { + self.db.select_all_network_account_ids().await + } + /// Returns the respective account proof with optional details, such as asset and storage /// entries. /// @@ -1097,14 +1102,6 @@ impl State { self.db.get_account_vault_sync(account_id, block_range).await } - /// Returns the unprocessed network notes, along with the next pagination token. - pub async fn get_unconsumed_network_notes( - &self, - page: Page, - ) -> Result<(Vec, Page), DatabaseError> { - self.db.select_unconsumed_network_notes(page).await - } - /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -1114,7 +1111,7 @@ impl State { page: Page, ) -> Result<(Vec, Page), DatabaseError> { self.db - .select_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) + .select_unconsumed_network_notes(network_account_id_prefix, block_num, page) .await } diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 27473b9267..05f515ccf1 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -175,10 +175,10 @@ message BatchInputsRequest { message BatchInputs { // The block header that the transaction batch should reference. blockchain.BlockHeader batch_reference_block_header = 1; - + // Proof of each _found_ unauthenticated note's inclusion in a block. repeated note.NoteInclusionInBlockProof note_proofs = 2; - + // The serialized chain MMR which includes proofs for all blocks referenced by the // above note inclusion proofs as well as proofs for inclusion of the blocks referenced // by the transactions in the batch. @@ -249,9 +249,6 @@ service NtxBuilder { // Returns a paginated list of unconsumed network notes. rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} - // Returns a paginated list of a network account's unconsumed notes up to a specified block number. - rpc GetUnconsumedNetworkNotesForAccount(UnconsumedNetworkNotesForAccountRequest) returns (UnconsumedNetworkNotes) {} - // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this // header for executing network transactions. If the block number is not provided, the latest // header and peaks will be retrieved. @@ -260,6 +257,9 @@ service NtxBuilder { // Returns the latest state of a network account with the specified account prefix. rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} + // Returns a list of all network account ids. + rpc GetNetworkAccountIds(google.protobuf.Empty) returns (NetworkAccountIdList) {} + // Returns the script for a note by its root. rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} } @@ -282,22 +282,10 @@ message MaybeAccountDetails { // GET UNCONSUMED NETWORK NOTES // ================================================================================================ -// Returns a list of unconsumed network notes using pagination. -message UnconsumedNetworkNotesRequest { - // An opaque token used to paginate through the notes. - // - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; -} - // Returns a paginated list of unconsumed network notes for an account. // // Notes created or consumed after the specified block are excluded from the result. -message UnconsumedNetworkNotesForAccountRequest { +message UnconsumedNetworkNotesRequest { // This should be null on the first call, and set to the response token until the response token // is null, at which point all data has been fetched. // @@ -330,6 +318,16 @@ message UnconsumedNetworkNotes { repeated note.NetworkNote notes = 2; } +// GET NETWORK ACCOUNTS +// ================================================================================================ + +// Represents the result of getting the network account ids. +message NetworkAccountIdList { + // The list of network account ids. + repeated account.AccountId account_ids = 1; +} + + // GET CURRENT BLOCKCHAIN DATA // ================================================================================================ From 8b1c11ad99fa2febe6911e9f7b03009e78269a39 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 12 Dec 2025 07:43:17 +0200 Subject: [PATCH 046/125] feat: reduce block interval to `3s` (#1438) --- CHANGELOG.md | 2 ++ crates/block-producer/src/lib.rs | 4 ++-- crates/store/src/accounts/mod.rs | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29e3ee30f7..e2bc1f4d04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,8 @@ - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). - [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). +- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). +- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). ### Fixes diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index e85fcb3a51..6f4f844fb3 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -51,10 +51,10 @@ const SERVER_MEMPOOL_EXPIRATION_SLACK: u32 = 2; const CACHED_MEMPOOL_STATS_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// How often a block is created. -pub const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(5); +pub const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(3); /// How often a batch is created. -pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(2); +pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); /// The default transaction capacity of the mempool. /// diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index 71131a6151..7569859977 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -118,7 +118,7 @@ pub struct AccountTreeWithHistory { impl AccountTreeWithHistory { /// Maximum number of historical blocks to maintain. - pub const MAX_HISTORY: usize = 33; + pub const MAX_HISTORY: usize = 50; // CONSTRUCTORS // -------------------------------------------------------------------------------------------- From 2a0d93e6f65d0e5bd86bbdd2cff1616f393cd507 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 12 Dec 2025 10:25:07 +0200 Subject: [PATCH 047/125] chore: merge main (#1449) This includes the mempool's bugfix for pass-through transactions. --- CHANGELOG.md | 6 + .../block-producer/src/batch_builder/mod.rs | 38 ++-- crates/block-producer/src/domain/batch.rs | 109 ++++++++++ crates/block-producer/src/domain/mod.rs | 3 +- crates/block-producer/src/mempool/mod.rs | 28 +-- crates/block-producer/src/mempool/nodes.rs | 194 +++++++++++++----- crates/block-producer/src/mempool/state.rs | 143 ++++++++++--- crates/block-producer/src/mempool/tests.rs | 122 +++++++++-- .../src/test_utils/proven_tx.rs | 1 + 9 files changed, 511 insertions(+), 133 deletions(-) create mode 100644 crates/block-producer/src/domain/batch.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index e2bc1f4d04..a9df3c1b39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,8 +37,14 @@ ## v0.12.6 +### Enhancements + - Added Faucet metadata to the `miden-network-monitor` binary ([#1373](https://github.com/0xMiden/miden-node/pull/1373)). +### Fixes + +- Block producer crash caused by pass through transactions ([#1396](https://github.com/0xMiden/miden-node/pull/1396)). + ## v0.12.5 (2025-11-27) - Actually update `miden-base` dependencies ([#1384](https://github.com/0xMiden/miden-node/pull/1384)). diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 6e991dea4d..fb84fd28b6 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -17,6 +17,7 @@ use tokio::time; use tracing::{Instrument, Span, instrument}; use url::Url; +use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::BuildBatchError; use crate::mempool::SharedMempool; @@ -168,7 +169,7 @@ impl BatchJob { }; batch.inject_telemetry(); - let batch_id = batch.id; + let batch_id = batch.id(); self.get_batch_inputs(batch) .and_then(|(txs, inputs)| Self::propose_batch(txs, inputs) ) @@ -190,25 +191,21 @@ impl BatchJob { #[instrument(target = COMPONENT, name = "batch_builder.select_batch", skip_all)] async fn select_batch(&self) -> Option { - self.mempool - .lock() - .await - .select_batch() - .map(|(id, transactions)| SelectedBatch { id, transactions }) + self.mempool.lock().await.select_batch() } #[instrument(target = COMPONENT, name = "batch_builder.get_batch_inputs", skip_all, err)] async fn get_batch_inputs( &self, batch: SelectedBatch, - ) -> Result<(Vec>, BatchInputs), BuildBatchError> { + ) -> Result<(SelectedBatch, BatchInputs), BuildBatchError> { let block_references = batch - .transactions + .txs() .iter() .map(Deref::deref) .map(AuthenticatedTransaction::reference_block); let unauthenticated_notes = batch - .transactions + .txs() .iter() .map(Deref::deref) .flat_map(AuthenticatedTransaction::unauthenticated_note_commitments); @@ -217,18 +214,18 @@ impl BatchJob { .get_batch_inputs(block_references, unauthenticated_notes) .await .map_err(BuildBatchError::FetchBatchInputsFailed) - .map(|inputs| (batch.transactions, inputs)) + .map(|inputs| (batch, inputs)) } #[instrument(target = COMPONENT, name = "batch_builder.propose_batch", skip_all, err)] async fn propose_batch( - transactions: Vec>, + selected: SelectedBatch, inputs: BatchInputs, ) -> Result { - let transactions = transactions - .iter() - .map(Deref::deref) - .map(AuthenticatedTransaction::proven_transaction) + let transactions = selected + .into_transactions() + .into_iter() + .map(|tx| tx.proven_transaction()) .collect(); ProposedBatch::new( @@ -295,11 +292,6 @@ impl BatchJob { } } -struct SelectedBatch { - id: BatchId, - transactions: Vec>, -} - // BATCH PROVER // ================================================================================================ @@ -332,11 +324,11 @@ impl BatchProver { impl TelemetryInjectorExt for SelectedBatch { fn inject_telemetry(&self) { - Span::current().set_attribute("batch.id", self.id); - Span::current().set_attribute("transactions.count", self.transactions.len()); + Span::current().set_attribute("batch.id", self.id()); + Span::current().set_attribute("transactions.count", self.txs().len()); // Accumulate all telemetry based on transactions. let (tx_ids, input_notes_count, output_notes_count, unauth_notes_count) = - self.transactions.iter().fold( + self.txs().iter().fold( (vec![], 0, 0, 0), |( mut tx_ids, diff --git a/crates/block-producer/src/domain/batch.rs b/crates/block-producer/src/domain/batch.rs new file mode 100644 index 0000000000..4a36798c73 --- /dev/null +++ b/crates/block-producer/src/domain/batch.rs @@ -0,0 +1,109 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use miden_objects::Word; +use miden_objects::account::AccountId; +use miden_objects::batch::BatchId; +use miden_objects::transaction::TransactionId; + +use crate::domain::transaction::AuthenticatedTransaction; + +// SELECTED BATCH +// ================================================================================================ + +/// A sequence of transactions selected by the [`Mempool`] to be processed by the +/// [`BatchBuilder`] into a [`ProposedBatch`], and then finally into a [`ProvenBatch`]. +/// +/// [Mempool]: crate::mempool::Mempool +/// [BatchBuilder]: crate::batch_builder::BatchBuilder +/// [ProposedBatch]: miden_objects::batch::ProposedBatch +/// [ProvenBatch]: miden_objects::batch::ProvenBatch +#[derive(Clone, Debug, PartialEq)] +pub(crate) struct SelectedBatch { + txs: Vec>, + id: BatchId, + account_updates: HashMap, +} + +impl SelectedBatch { + pub(crate) fn builder() -> SelectedBatchBuilder { + SelectedBatchBuilder::default() + } + + pub(crate) fn id(&self) -> BatchId { + self.id + } + + pub(crate) fn txs(&self) -> &[Arc] { + &self.txs + } + + pub(crate) fn into_transactions(self) -> Vec> { + self.txs + } + + /// The aggregated list of account transitions this batch causes given as tuples of `(AccountId, + /// initial commitment, final commitment)`. + /// + /// Note that the updates are aggregated, i.e. only a single update per account is possible, and + /// transaction updates to an account of `a -> b -> c` will result in a single `a -> c`. + pub(crate) fn account_updates(&self) -> impl Iterator { + self.account_updates.iter().map(|(account, (from, to))| (*account, *from, *to)) + } +} + +/// A builder to construct a [`SelectedBatch`]. +#[derive(Clone, Default)] +pub(crate) struct SelectedBatchBuilder { + pub(crate) txs: Vec>, + pub(crate) account_updates: HashMap, +} + +impl SelectedBatchBuilder { + /// Appends the given transaction to the current batch. + /// + /// # Panics + /// + /// Panics if the new transaction's account update is inconsistent with the current account + /// state within the batch i.e. if the transaction's initial account commitment does not + /// match the account update's final account commitment within the batch (if any). + pub(crate) fn push(&mut self, tx: Arc) { + let update = tx.account_update(); + self.account_updates + .entry(update.account_id()) + .and_modify(|(_, to)| { + assert!( + to == &update.initial_state_commitment(), + "Cannot select transaction {} as its initial commitment {} for account {} does \ +not match the current commitment {}", + tx.id(), + update.initial_state_commitment(), + update.account_id(), + to + ); + + *to = update.final_state_commitment(); + }) + .or_insert((update.initial_state_commitment(), update.final_state_commitment())); + + self.txs.push(tx); + } + + /// Returns `true` if the batch contains the given transaction already. + pub(crate) fn contains(&self, target: &TransactionId) -> bool { + self.txs.iter().any(|tx| &tx.id() == target) + } + + /// Returns `true` if it contains no transactions. + pub(crate) fn is_empty(&self) -> bool { + self.txs.is_empty() + } + + /// Finalizes the batch selection. + pub(crate) fn build(self) -> SelectedBatch { + let Self { txs, account_updates } = self; + let id = BatchId::from_ids(txs.iter().map(|tx| (tx.id(), tx.account_id()))); + + SelectedBatch { txs, id, account_updates } + } +} diff --git a/crates/block-producer/src/domain/mod.rs b/crates/block-producer/src/domain/mod.rs index 37f08066e0..410b16c40f 100644 --- a/crates/block-producer/src/domain/mod.rs +++ b/crates/block-producer/src/domain/mod.rs @@ -1 +1,2 @@ -pub mod transaction; +pub(crate) mod batch; +pub(crate) mod transaction; diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 659b21f727..3d6a367b5e 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -53,6 +53,7 @@ use subscription::SubscriptionProvider; use tokio::sync::{Mutex, MutexGuard, mpsc}; use tracing::{instrument, warn}; +use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::{AddTransactionError, VerifyTxError}; use crate::mempool::budget::BudgetStatus; @@ -263,7 +264,7 @@ impl Mempool { /// /// Returns `None` if no transactions are available. #[instrument(target = COMPONENT, name = "mempool.select_batch", skip_all)] - pub fn select_batch(&mut self) -> Option<(BatchId, Vec>)> { + pub fn select_batch(&mut self) -> Option { // The selection algorithm is fairly neanderthal in nature. // // We iterate over all transaction nodes, each time selecting the first transaction which @@ -278,13 +279,13 @@ impl Mempool { // // The additional bookkeeping can be implemented once we have fee related strategies. KISS. - let mut selected = ProposedBatchNode::default(); + let mut selected = SelectedBatch::builder(); let mut budget = self.config.batch_budget; let mut candidates = self.nodes.txs.values(); 'next: while let Some(candidate) = candidates.next() { - if selected.contains(candidate.id()) { + if selected.contains(&candidate.id()) { continue 'next; } @@ -294,7 +295,9 @@ impl Mempool { match parent { // TODO(mirko): Once user batches are supported, they will also need to be // checked here. - NodeId::Transaction(parent) if !selected.contains(parent) => continue 'next, + NodeId::Transaction(parent) if !selected.contains(&parent) => { + continue 'next; + }, NodeId::Transaction(_) | NodeId::ProposedBatch(_) | NodeId::ProvenBatch(_) @@ -313,11 +316,12 @@ impl Mempool { if selected.is_empty() { return None; } + let selected = selected.build(); - let batch_id = selected.calculate_id(); - let batch_txs = selected.transactions().cloned().collect::>(); + let batch = ProposedBatchNode::new(selected.clone()); + let batch_id = batch.batch_id(); - for tx in &batch_txs { + for tx in batch.transactions() { let node = self.nodes.txs.remove(&tx.id()).expect("selected transaction node must exist"); self.state.remove(&node); @@ -327,13 +331,13 @@ impl Mempool { "Transaction selected for inclusion in batch" ); } - self.state.insert(NodeId::ProposedBatch(batch_id), &selected); - self.nodes.proposed_batches.insert(batch_id, selected); + self.state.insert(NodeId::ProposedBatch(batch_id), &batch); + self.nodes.proposed_batches.insert(batch_id, batch); // TODO(mirko): Selecting a batch can unblock user batches, which should be checked here. self.inject_telemetry(); - Some((batch_id, batch_txs)) + Some(selected) } /// Drops the proposed batch and all of its descendants. @@ -415,7 +419,8 @@ impl Mempool { self.nodes.proposed_block.as_ref().unwrap().0 ); - let mut selected = BlockNode::default(); + let block_number = self.chain_tip.child(); + let mut selected = BlockNode::new(block_number); let mut budget = self.config.block_budget; let mut candidates = self.nodes.proven_batches.values(); @@ -443,7 +448,6 @@ impl Mempool { selected.push(candidate.clone()); } - let block_number = self.chain_tip.child(); // Replace the batches with the block in state and nodes. for batch in selected.batches() { // SAFETY: Selected batches came from nodes, and are unique. diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index a551c77818..8a0081214f 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -8,6 +8,7 @@ use miden_objects::block::BlockNumber; use miden_objects::note::{NoteHeader, Nullifier}; use miden_objects::transaction::{InputNoteCommitment, TransactionHeader, TransactionId}; +use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; /// Uniquely identifies a node in the mempool. @@ -49,38 +50,27 @@ impl TransactionNode { /// Represents a batch which has been proposed by the mempool and which is undergoing proving. /// /// Once proven it transitions to a [`ProvenBatchNode`]. -#[derive(Clone, Debug, PartialEq, Default)] -pub(super) struct ProposedBatchNode(Vec>); +#[derive(Clone, Debug, PartialEq)] +pub(super) struct ProposedBatchNode(SelectedBatch); impl ProposedBatchNode { - pub(super) fn push(&mut self, tx: Arc) { - self.0.push(tx); - } - - pub(super) fn contains(&mut self, id: TransactionId) -> bool { - self.0.iter().any(|tx| tx.id() == id) - } - - pub(super) fn is_empty(&self) -> bool { - self.0.is_empty() - } - - pub(super) fn calculate_id(&self) -> BatchId { - BatchId::from_transactions( - self.0 - .iter() - .map(AsRef::as_ref) - .map(AuthenticatedTransaction::raw_proven_transaction), - ) + pub(super) fn new(batch: SelectedBatch) -> Self { + Self(batch) } pub(super) fn into_proven_batch_node(self, proof: Arc) -> ProvenBatchNode { - let Self(txs) = self; - ProvenBatchNode { txs, inner: proof } + ProvenBatchNode { + txs: self.0.into_transactions(), + inner: proof, + } } pub(super) fn expires_at(&self) -> BlockNumber { - self.0.iter().map(|tx| tx.expires_at()).min().unwrap_or_default() + self.0.txs().iter().map(|tx| tx.expires_at()).min().unwrap_or_default() + } + + pub(super) fn batch_id(&self) -> BatchId { + self.0.id() } } @@ -113,17 +103,48 @@ impl ProvenBatchNode { } /// Represents a block - both committed and in-progress. -#[derive(Clone, Debug, PartialEq, Default)] +#[derive(Clone, Debug, PartialEq)] pub(super) struct BlockNode { txs: Vec>, batches: Vec>, + number: BlockNumber, + /// Aggregated account updates of all batches. + account_updates: HashMap, } impl BlockNode { + pub(super) fn new(number: BlockNumber) -> Self { + Self { + number, + txs: Vec::default(), + batches: Vec::default(), + account_updates: HashMap::default(), + } + } + pub(super) fn push(&mut self, batch: ProvenBatchNode) { - let ProvenBatchNode { txs, inner } = batch; + let ProvenBatchNode { txs, inner: batch } = batch; + for (account, update) in batch.account_updates() { + self.account_updates + .entry(*account) + .and_modify(|(_, to)| { + assert!( + to == &update.initial_state_commitment(), + "Cannot select batch {} as its initial commitment {} for account {} does \ + not match the current commitment {}", + batch.id(), + update.initial_state_commitment(), + update.account_id(), + to + ); + + *to = update.final_state_commitment(); + }) + .or_insert((update.initial_state_commitment(), update.final_state_commitment())); + } + self.txs.extend(txs); - self.batches.push(inner); + self.batches.push(batch); } pub(super) fn contains(&self, id: BatchId) -> bool { @@ -154,8 +175,12 @@ pub(super) trait Node { /// The account state commitment updates caused by this node. /// /// Output tuple represents each updates `(account ID, initial commitment, final commitment)`. + /// + /// Updates must be aggregates i.e. only a single account ID update allowed. fn account_updates(&self) -> Box + '_>; fn transactions(&self) -> Box> + '_>; + + fn id(&self) -> NodeId; } impl Node for TransactionNode { @@ -183,34 +208,35 @@ impl Node for TransactionNode { fn transactions(&self) -> Box> + '_> { Box::new(std::iter::once(&self.0)) } + + fn id(&self) -> NodeId { + NodeId::Transaction(self.id()) + } } impl Node for ProposedBatchNode { fn nullifiers(&self) -> Box + '_> { - Box::new(self.0.iter().flat_map(|tx| tx.nullifiers())) + Box::new(self.0.txs().iter().flat_map(|tx| tx.nullifiers())) } fn output_note_commitments(&self) -> Box + '_> { - Box::new(self.0.iter().flat_map(|tx| tx.output_note_commitments())) + Box::new(self.0.txs().iter().flat_map(|tx| tx.output_note_commitments())) } fn unauthenticated_note_commitments(&self) -> Box + '_> { - Box::new(self.0.iter().flat_map(|tx| tx.unauthenticated_note_commitments())) + Box::new(self.0.txs().iter().flat_map(|tx| tx.unauthenticated_note_commitments())) } fn account_updates(&self) -> Box + '_> { - Box::new(self.0.iter().flat_map(|tx| { - let update = tx.account_update(); - std::iter::once(( - update.account_id(), - update.initial_state_commitment(), - update.final_state_commitment(), - )) - })) + Box::new(self.0.account_updates()) } fn transactions(&self) -> Box> + '_> { - Box::new(self.0.iter()) + Box::new(self.0.txs().iter()) + } + + fn id(&self) -> NodeId { + NodeId::ProposedBatch(self.0.id()) } } @@ -252,6 +278,10 @@ impl Node for ProvenBatchNode { fn transactions(&self) -> Box> + '_> { Box::new(self.txs.iter()) } + + fn id(&self) -> NodeId { + NodeId::ProvenBatch(self.id()) + } } impl Node for BlockNode { @@ -277,20 +307,16 @@ impl Node for BlockNode { } fn account_updates(&self) -> Box + '_> { - Box::new(self.batches.iter().flat_map(|batch| batch.account_updates()).map( - |(_, update)| { - ( - update.account_id(), - update.initial_state_commitment(), - update.final_state_commitment(), - ) - }, - )) + Box::new(self.account_updates.iter().map(|(account, (from, to))| (*account, *from, *to))) } fn transactions(&self) -> Box> + '_> { Box::new(self.txs.iter()) } + + fn id(&self) -> NodeId { + NodeId::Block(self.number) + } } /// Contains the current nodes of the state DAG. @@ -327,8 +353,78 @@ impl Nodes { pub(super) fn uncommitted_tx_count(&self) -> usize { self.txs.len() - + self.proposed_batches.values().map(|b| b.0.len()).sum::() + + self.proposed_batches.values().map(|b| b.0.txs().len()).sum::() + self.proven_batches.values().map(|b| b.txs.len()).sum::() + self.proposed_block.as_ref().map(|b| b.1.txs.len()).unwrap_or_default() } } + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use miden_objects::batch::BatchAccountUpdate; + use miden_objects::transaction::{InputNotes, OrderedTransactionHeaders}; + + use super::*; + use crate::test_utils::MockProvenTxBuilder; + + #[test] + fn proposed_batch_aggregates_account_updates() { + let mut batch = SelectedBatch::builder(); + let txs = MockProvenTxBuilder::sequential(); + + let account = txs.first().unwrap().account_id(); + let from = txs.first().unwrap().account_update().initial_state_commitment(); + let to = txs.last().unwrap().account_update().final_state_commitment(); + let expected = std::iter::once((account, from, to)); + + for tx in txs { + batch.push(tx); + } + let batch = ProposedBatchNode::new(batch.build()); + + itertools::assert_equal(batch.account_updates(), expected); + } + + #[test] + fn block_aggregates_account_updates() { + // We map each tx into its own batch. + // + // This let's us trivially know what the expected aggregate block account update should be. + let txs = MockProvenTxBuilder::sequential(); + let account = txs.first().unwrap().account_id(); + let from = txs.first().unwrap().account_update().initial_state_commitment(); + let to = txs.last().unwrap().account_update().final_state_commitment(); + let expected = std::iter::once((account, from, to)); + + let mut block = BlockNode::new(BlockNumber::default()); + + for tx in txs { + let mut batch = SelectedBatch::builder(); + batch.push(tx.clone()); + let batch = batch.build(); + let batch = ProposedBatchNode::new(batch); + + let account_update = BatchAccountUpdate::from_transaction(tx.raw_proven_transaction()); + + let tx_header = TransactionHeader::from(tx.raw_proven_transaction()); + let proven_batch = ProvenBatch::new( + batch.batch_id(), + Word::default(), + BlockNumber::default(), + BTreeMap::from([(account_update.account_id(), account_update)]), + InputNotes::default(), + Vec::default(), + BlockNumber::from(u32::MAX), + OrderedTransactionHeaders::new_unchecked(vec![tx_header]), + ) + .unwrap(); + + let batch = batch.into_proven_batch_node(Arc::new(proven_batch)); + block.push(batch); + } + + itertools::assert_equal(block.account_updates(), expected); + } +} diff --git a/crates/block-producer/src/mempool/state.rs b/crates/block-producer/src/mempool/state.rs index efa32614ba..b4db41657b 100644 --- a/crates/block-producer/src/mempool/state.rs +++ b/crates/block-producer/src/mempool/state.rs @@ -107,8 +107,10 @@ impl InflightState { } for (account, from, to) in node.account_updates() { - let Entry::Occupied(entry) = - self.accounts.entry(account).and_modify(|entry| entry.remove(from, to)) + let Entry::Occupied(entry) = self + .accounts + .entry(account) + .and_modify(|entry| entry.remove(node.id(), from, to)) else { panic!("Account {account} update ({from} -> {to}) was not present for removal"); }; @@ -145,8 +147,8 @@ impl InflightState { let account_parents = node .account_updates() - .filter_map(|(account, from, _to)| { - self.accounts.get(&account).map(|account| account.parent(&from)) + .filter_map(|(account, from, to)| { + self.accounts.get(&account).map(|account| account.parents(from, to)) }) .flatten(); @@ -169,8 +171,8 @@ impl InflightState { let account_children = node .account_updates() - .filter_map(|(account, _from, to)| { - self.accounts.get(&account).map(|account| account.child(&to)) + .filter_map(|(account, from, to)| { + self.accounts.get(&account).map(|account| account.children(from, to)) }) .flatten(); @@ -197,6 +199,27 @@ impl InflightState { struct AccountUpdates { from: HashMap, to: HashMap, + /// This holds updates from nodes where the initial commitment is the same as the final + /// commitment aka no actual change was made to the account. + /// + /// This sounds counter-intuitive, but is caused by so-called pass-through transactions which + /// use an account at some state `A` but only consume and emit notes without changing the + /// account state itself. + /// + /// These still need to be tracked as part of account updates since they require that an + /// account is in the given state. Since we want these node's to be processed before the + /// account state is changed, this implies that they must be considered children of the + /// non-pass-through node that created the state. Similarly, they must be considered + /// parents of any non-pass-through node that changes to another state as otherwise this + /// node might be processed before the pass-through nodes are. + /// + /// Pass-through nodes with the same state are considered siblings of each as they don't + /// actually depend on each other, and may be processed in any order. + /// + /// Note also that its possible for any node's updates to an account to solely consist of + /// pass-through transactions and therefore in turn is a pass-through node from the perspective + /// of that account. + pass_through: HashMap>, } impl AccountUpdates { @@ -206,53 +229,105 @@ impl AccountUpdates { self.to .keys() .find(|commitment| !self.from.contains_key(commitment)) + .or(self.pass_through.keys().next()) .copied() .unwrap_or_default() } fn is_empty(&self) -> bool { - self.from.is_empty() && self.to.is_empty() + self.from.is_empty() && self.to.is_empty() && self.pass_through.is_empty() } - fn remove(&mut self, from: Word, to: Word) { - let from_removed = self - .from - .remove(&from) - .expect("should only be removing account updates from nodes that are present"); - let to_removed = self - .to - .remove(&to) - .expect("should only be removing account updates from nodes that are present"); - assert_eq!( - from_removed, to_removed, - "Account updates should be removed as a pair with the same node ID" - ); + fn remove(&mut self, id: NodeId, from: Word, to: Word) { + if from == to { + let entry = self.pass_through.entry(from).or_default(); + assert!( + entry.remove(&id), + "Account pass through commitment removal of {from} for {id:?} does not exist" + ); + if entry.is_empty() { + self.pass_through.remove(&from); + } + } else { + let from_removed = self + .from + .remove(&from) + .expect("should only be removing account updates from nodes that are present"); + let to_removed = self + .to + .remove(&to) + .expect("should only be removing account updates from nodes that are present"); + assert_eq!( + from_removed, to_removed, + "Account updates should be removed as a pair with the same node ID" + ); + assert_eq!(from_removed, id, "Account update removal should match the input node ID",); + } } fn insert(&mut self, id: NodeId, from: Word, to: Word) { - assert!( - self.from.insert(from, id).is_none(), - "Account already contained the commitment {from} when inserting {id:?}" - ); - assert!( - self.to.insert(to, id).is_none(), - "Account already contained the commitment {to} when inserting {id:?}" - ); + if from == to { + assert!( + self.pass_through.entry(from).or_default().insert(id), + "Account already contained the pass through commitment {from} for node {id:?}" + ); + } else { + assert!( + self.from.insert(from, id).is_none(), + "Account already contained the commitment {from} when inserting {id:?}" + ); + assert!( + self.to.insert(to, id).is_none(), + "Account already contained the commitment {to} when inserting {id:?}" + ); + } } - /// Returns the node ID that updated this account's commitment to the given value. + /// Returns the node IDs that updated this account's commitment to the given value. /// - /// In other words, this returns the ID of `node` where `node.to == from`. This infers the + /// Note that this might be multiple IDs due to pass through transactions. When the input + /// is itself a pass through transaction (`from == to`), then its sibling pass through + /// transactions are not considered parents as they are siblings. + /// + /// In other words, this returns the IDs of `node` where `node.to == from`. This infers the /// parent-child relationship where `parent.to == child.from`. - fn parent(&self, from: &Word) -> Option<&NodeId> { - self.to.get(from) + fn parents(&self, from: Word, to: Word) -> impl Iterator { + let direct_parent = self.to.get(&from).into_iter(); + + // If the node query isn't for a pass-through node, then it must also consider pass-through + // nodes at its `from` commitment as parents. + // + // This means the query node depends on the pass-through nodes since these must be processed + // before the account commitment may change. + let pass_through_parents = (from != to) + .then(|| self.pass_through.get(&from).map(HashSet::iter)) + .flatten() + .unwrap_or_default(); + + direct_parent.chain(pass_through_parents) } /// Returns the node ID that consumed the given commitment. /// + /// Note that this might be multiple IDs due to pass through transactions. When the input + /// is itself a pass through transaction (`from == to`), then its sibling pass through + /// transactions are not considered children as they are siblings. + /// /// In other words, this returns the ID of `node` where `node.from == to`. This infers the /// parent-child relationship where `parent.to == child.from`. - fn child(&self, to: &Word) -> Option<&NodeId> { - self.from.get(to) + fn children(&self, from: Word, to: Word) -> impl Iterator { + let direct_child = self.from.get(&to).into_iter(); + + // If the node query isn't for a pass-through node, then it must also consider pass-through + // nodes at its `to` commitment as children. + // + // This means the pass-through nodes depend on the query node since it changes the account + // commitment to the state required by the pass-through nodes. + let pass_through_children = (from != to) + .then(|| self.pass_through.get(&to).map(HashSet::iter)) + .flatten() + .unwrap_or_default(); + + direct_child.chain(pass_through_children) } } diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index e2b556cd78..0f41e96601 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -68,23 +68,23 @@ fn children_of_failed_batches_are_ignored() { let (mut uut, _) = Mempool::for_tests(); uut.add_transaction(txs[0].clone()).unwrap(); - let (parent_batch, batch_txs) = uut.select_batch().unwrap(); - assert_eq!(batch_txs, vec![txs[0].clone()]); + let parent_batch = uut.select_batch().unwrap(); + assert_eq!(parent_batch.txs(), vec![txs[0].clone()]); uut.add_transaction(txs[1].clone()).unwrap(); - let (child_batch_a, batch_txs) = uut.select_batch().unwrap(); - assert_eq!(batch_txs, vec![txs[1].clone()]); + let child_batch_a = uut.select_batch().unwrap(); + assert_eq!(child_batch_a.txs(), vec![txs[1].clone()]); uut.add_transaction(txs[2].clone()).unwrap(); - let (_, batch_txs) = uut.select_batch().unwrap(); - assert_eq!(batch_txs, vec![txs[2].clone()]); + let next_batch = uut.select_batch().unwrap(); + assert_eq!(next_batch.txs(), vec![txs[2].clone()]); // Child batch jobs are now dangling. - uut.rollback_batch(parent_batch); + uut.rollback_batch(parent_batch.id()); let reference = uut.clone(); // Success or failure of the child job should effectively do nothing. - uut.rollback_batch(child_batch_a); + uut.rollback_batch(child_batch_a.id()); assert_eq!(uut, reference); let proven_batch = @@ -102,13 +102,13 @@ fn failed_batch_transactions_are_requeued() { uut.select_batch().unwrap(); uut.add_transaction(txs[1].clone()).unwrap(); - let (failed_batch, _) = uut.select_batch().unwrap(); + let failed_batch = uut.select_batch().unwrap(); uut.add_transaction(txs[2].clone()).unwrap(); uut.select_batch().unwrap(); // Middle batch failed, so it and its child transaction should be re-entered into the queue. - uut.rollback_batch(failed_batch); + uut.rollback_batch(failed_batch.id()); reference.add_transaction(txs[0].clone()).unwrap(); reference.select_batch().unwrap(); @@ -226,10 +226,10 @@ fn subtree_reversion_removes_all_descendents() { uut.select_batch().unwrap(); uut.add_transaction(reverted_txs[1].clone()).unwrap(); - let (to_revert, _) = uut.select_batch().unwrap(); + let to_revert = uut.select_batch().unwrap(); uut.add_transaction(reverted_txs[2].clone()).unwrap(); - uut.revert_subtree(NodeId::ProposedBatch(to_revert)); + uut.revert_subtree(NodeId::ProposedBatch(to_revert.id())); // We expect the second batch and the latter reverted txns to be non-existent. reference.add_transaction(reverted_txs[0].clone()).unwrap(); @@ -255,11 +255,11 @@ fn transactions_from_reverted_batches_are_requeued() { uut.add_transaction(tx_set_b[1].clone()).unwrap(); uut.add_transaction(tx_set_a[1].clone()).unwrap(); - let (batch_id, _) = uut.select_batch().unwrap(); + let batch = uut.select_batch().unwrap(); uut.add_transaction(tx_set_b[2].clone()).unwrap(); uut.add_transaction(tx_set_a[2].clone()).unwrap(); - uut.rollback_batch(batch_id); + uut.rollback_batch(batch.id()); reference.add_transaction(tx_set_b[0].clone()).unwrap(); reference.add_transaction(tx_set_a[0].clone()).unwrap(); @@ -271,3 +271,97 @@ fn transactions_from_reverted_batches_are_requeued() { assert_eq!(uut, reference); } + +/// This test checks that pass through transactions can successfully be added to an empty mempool, +/// and that they work as expected. +#[test] +fn pass_through_txs_on_an_empty_account() { + let (mut uut, _) = Mempool::for_tests(); + + let tx_final = MockProvenTxBuilder::with_account_index(0).build(); + let tx_final = Arc::new(AuthenticatedTransaction::from_inner(tx_final)); + + let account_update = tx_final.account_update().clone(); + let tx_pass_through_base = MockProvenTxBuilder::with_account( + account_update.account_id(), + account_update.initial_state_commitment(), + account_update.initial_state_commitment(), + ); + + // Note: transactions _must_ have an input note or update an account to be considered valid. + // Since by definition pass through txs don't update an account, they must have a nullifier. + let tx_pass_through_a = tx_pass_through_base.clone().nullifiers_range(0..2).build(); + let tx_pass_through_a = Arc::new(AuthenticatedTransaction::from_inner(tx_pass_through_a)); + + let tx_pass_through_b = tx_pass_through_base.nullifiers_range(3..5).build(); + let tx_pass_through_b = Arc::new(AuthenticatedTransaction::from_inner(tx_pass_through_b)); + + uut.add_transaction(tx_pass_through_a.clone()).unwrap(); + uut.add_transaction(tx_pass_through_b.clone()).unwrap(); + uut.add_transaction(tx_final.clone()).unwrap(); + + let batch = uut.select_batch().unwrap(); + + // Ensure the batch correctly aggregates the account update. + let expected = std::iter::once(( + account_update.account_id(), + account_update.initial_state_commitment(), + account_update.final_state_commitment(), + )); + itertools::assert_equal(batch.account_updates(), expected); + + // Ensure the batch contains a,b and final. Final should also be the last tx since its order + // is required. + assert!(batch.txs().contains(&tx_pass_through_a)); + assert!(batch.txs().contains(&tx_pass_through_b)); + assert_eq!(batch.txs().last().unwrap(), &tx_final); +} + +/// Tests that pass through transactions retain parent-child relations based on notes, even though +/// they act as "siblings" for account purposes. +#[test] +fn pass_through_txs_with_note_dependencies() { + let (mut uut, mut reference) = Mempool::for_tests(); + + // Used to get a valid account ID. + let tx_final = MockProvenTxBuilder::with_account_index(0).build(); + let account_update = tx_final.account_update(); + + let tx_pass_through_base = MockProvenTxBuilder::with_account( + account_update.account_id(), + account_update.initial_state_commitment(), + account_update.initial_state_commitment(), + ); + + // Note: transactions _must_ have an input note or update an account to be considered valid. + // Since by definition pass through txs don't update an account, they must have a nullifier. + let tx_pass_through_a = tx_pass_through_base + .clone() + .nullifiers_range(0..2) + .private_notes_created_range(3..4) + .build(); + let tx_pass_through_a = Arc::new(AuthenticatedTransaction::from_inner(tx_pass_through_a)); + + // This includes a note (3) created by (a). + let tx_pass_through_b = tx_pass_through_base.unauthenticated_notes_range(3..4).build(); + let tx_pass_through_b = Arc::new(AuthenticatedTransaction::from_inner(tx_pass_through_b)); + + // Select batches such that (a) and (b) go into separate batches. + // + // We then rollback batch (a) and check that batch (b) is also reverted which tests that the + // relationship was correctly inferred by the mempool. + uut.add_transaction(tx_pass_through_a.clone()).unwrap(); + let batch_a = uut.select_batch().unwrap(); + assert_eq!(batch_a.txs(), std::slice::from_ref(&tx_pass_through_a)); + + uut.add_transaction(tx_pass_through_b.clone()).unwrap(); + let batch_b = uut.select_batch().unwrap(); + assert_eq!(batch_b.txs(), std::slice::from_ref(&tx_pass_through_b)); + + // Rollback (a) and check that (b) also reverted by comparing to the reference. + uut.rollback_batch(batch_a.id()); + reference.add_transaction(tx_pass_through_a).unwrap(); + reference.add_transaction(tx_pass_through_b).unwrap(); + + assert_eq!(uut, reference); +} diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index f08a602f3d..1f53662fe9 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -20,6 +20,7 @@ use rand::Rng; use super::MockPrivateAccount; use crate::domain::transaction::AuthenticatedTransaction; +#[derive(Clone)] pub struct MockProvenTxBuilder { account_id: AccountId, initial_account_commitment: Word, From c4f91319228fd95dbc4a65299948edea53123024 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 12 Dec 2025 11:09:33 +0200 Subject: [PATCH 048/125] chore(store): retain source error for sql conversions (#1442) --- crates/store/src/db/models/conv.rs | 56 +++++++++++++++++++----------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index a48a2ccfd4..7ef547ac61 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,36 +32,47 @@ on relevant platforms" )] -use std::any::type_name; - -use miden_node_proto::domain::account::{NetworkAccountError, NetworkAccountPrefix}; +use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_objects::Felt; use miden_objects::account::StorageSlotName; use miden_objects::block::BlockNumber; use miden_objects::note::{NoteExecutionMode, NoteTag}; #[derive(Debug, thiserror::Error)] -#[error("failed to convert a database value to it's in memory type {0}")] -pub struct DatabaseTypeConversionError(&'static str); +#[error("failed to convert from database type {from_type} into {into_type}")] +pub struct DatabaseTypeConversionError { + source: Box, + from_type: &'static str, + into_type: &'static str, +} /// Convert from and to it's database representation and back /// /// We do not assume sanity of DB types. pub(crate) trait SqlTypeConvert: Sized { type Raw: Sized; - type Error: std::error::Error + Send + Sync + 'static; + fn to_raw_sql(self) -> Self::Raw; - fn from_raw_sql(_raw: Self::Raw) -> Result; + fn from_raw_sql(_raw: Self::Raw) -> Result; + + fn map_err( + source: E, + ) -> DatabaseTypeConversionError { + DatabaseTypeConversionError { + source: Box::new(source), + from_type: std::any::type_name::(), + into_type: std::any::type_name::(), + } + } } impl SqlTypeConvert for BlockNumber { type Raw = i64; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - u32::try_from(raw) - .map(BlockNumber::from) - .map_err(|_| DatabaseTypeConversionError(type_name::())) + + fn from_raw_sql(raw: Self::Raw) -> Result { + u32::try_from(raw).map(BlockNumber::from).map_err(Self::map_err) } + fn to_raw_sql(self) -> Self::Raw { i64::from(self.as_u32()) } @@ -69,10 +80,9 @@ impl SqlTypeConvert for BlockNumber { impl SqlTypeConvert for NetworkAccountPrefix { type Raw = i64; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - NetworkAccountPrefix::try_from(raw as u32) - .map_err(|_e| DatabaseTypeConversionError(type_name::())) + + fn from_raw_sql(raw: Self::Raw) -> Result { + NetworkAccountPrefix::try_from(raw as u32).map_err(Self::map_err) } fn to_raw_sql(self) -> Self::Raw { i64::from(self.inner()) @@ -81,14 +91,19 @@ impl SqlTypeConvert for NetworkAccountPrefix { impl SqlTypeConvert for NoteExecutionMode { type Raw = i32; - type Error = DatabaseTypeConversionError; #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("valid values are 0 or 1 but found {0}")] + struct ValueError(i32); + Ok(match raw { 0 => Self::Network, 1 => Self::Local, - _ => return Err(DatabaseTypeConversionError(type_name::())), + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, }) } @@ -103,10 +118,9 @@ impl SqlTypeConvert for NoteExecutionMode { impl SqlTypeConvert for NoteTag { type Raw = i32; - type Error = DatabaseTypeConversionError; #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { + fn from_raw_sql(raw: Self::Raw) -> Result { #[allow(clippy::cast_sign_loss)] Ok(NoteTag::from(raw as u32)) } From 08ce4886ecc2f64676dbde7f870ebd02a46be655 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Fri, 12 Dec 2025 15:17:30 -0300 Subject: [PATCH 049/125] fix: use proper trait impl (#1451) --- crates/store/src/db/models/conv.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 7ef547ac61..0a4cd97c95 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -133,11 +133,9 @@ impl SqlTypeConvert for NoteTag { impl SqlTypeConvert for StorageSlotName { type Raw = String; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - StorageSlotName::new(raw) - .map_err(|_| DatabaseTypeConversionError(type_name::())) + fn from_raw_sql(raw: Self::Raw) -> Result { + StorageSlotName::new(raw).map_err(Self::map_err) } fn to_raw_sql(self) -> Self::Raw { From aacb82c3b068b54e7d00492a490d6a5d82a4dfcf Mon Sep 17 00:00:00 2001 From: sergerad Date: Mon, 15 Dec 2025 09:55:53 +1300 Subject: [PATCH 050/125] Use RPC to submit proven tx in ntx-builder --- bin/node/src/commands/bundled.rs | 1 + crates/ntx-builder/src/actor/execute.rs | 40 +++++++++++------------- crates/ntx-builder/src/actor/mod.rs | 14 ++++----- crates/ntx-builder/src/block_producer.rs | 16 ---------- crates/ntx-builder/src/builder.rs | 6 +++- crates/ntx-builder/src/lib.rs | 1 + 6 files changed, 33 insertions(+), 45 deletions(-) diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 09bdcf0257..b928222da7 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -275,6 +275,7 @@ impl BundledCommand { NetworkTransactionBuilder::new( store_ntx_builder_url, block_producer_url, + rpc_url, ntx_builder.tx_prover_url, ntx_builder.ticker_interval, checkpoint, diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index ff306b84df..2449f3880f 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -46,7 +46,7 @@ use tracing::{Instrument, instrument}; use crate::COMPONENT; use crate::actor::account_state::TransactionCandidate; -use crate::block_producer::BlockProducerClient; +use crate::rpc::RpcClient; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -75,7 +75,8 @@ type NtxResult = Result; /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - block_producer: BlockProducerClient, + /// Client for submitting transactions to the network. + rpc_client: RpcClient, /// The prover to delegate proofs to. /// @@ -93,17 +94,12 @@ pub struct NtxContext { impl NtxContext { /// Creates a new [`NtxContext`] instance. pub fn new( - block_producer: BlockProducerClient, + rpc_client: RpcClient, prover: Option, store: StoreClient, script_cache: LruCache, ) -> Self { - Self { - block_producer, - prover, - store, - script_cache, - } + Self { rpc_client, prover, store, script_cache } } /// Executes a transaction end-to-end: filtering, executing, proving, and submitted to the block @@ -147,7 +143,7 @@ impl NtxContext { .set_attribute("reference_block.number", chain_tip_header.block_num()); async move { - async move { + Box::pin(async move { let data_store = NtxDataStore::new( account, chain_tip_header, @@ -157,13 +153,15 @@ impl NtxContext { ); let notes = notes.into_iter().map(Note::from).collect::>(); - let (successful, failed) = self.filter_notes(&data_store, notes).await?; - let executed = Box::pin(self.execute(&data_store, successful)).await?; - let proven = Box::pin(self.prove(executed.into())).await?; - let tx_id = proven.id(); - self.submit(proven).await?; - Ok((tx_id, failed)) - } + let (successful_notes, failed_notes) = + self.filter_notes(&data_store, notes).await?; + let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; + let tx_inputs: TransactionInputs = executed_tx.into(); + let proven_tx = Box::pin(self.prove(tx_inputs.clone())).await?; + let tx_id = proven_tx.id(); + self.submit(proven_tx, tx_inputs).await?; + Ok((tx_id, failed_notes)) + }) .in_current_span() .await .inspect_err(|err| tracing::Span::current().set_error(err)) @@ -256,11 +254,11 @@ impl NtxContext { .map_err(NtxError::Proving) } - /// Submits the transaction to the block producer. + /// Submits the transaction to the RPC server with transaction inputs. #[instrument(target = COMPONENT, name = "ntx.execute_transaction.submit", skip_all, err)] - async fn submit(&self, tx: ProvenTransaction) -> NtxResult<()> { - self.block_producer - .submit_proven_transaction(tx) + async fn submit(&self, tx: ProvenTransaction, tx_inputs: TransactionInputs) -> NtxResult<()> { + self.rpc_client + .submit_proven_transaction(tx, tx_inputs) .await .map_err(NtxError::Submission) } diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 947890ede1..d5a1ab794f 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -22,8 +22,8 @@ use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; -use crate::block_producer::BlockProducerClient; use crate::builder::ChainState; +use crate::rpc::RpcClient; use crate::store::StoreClient; // ACTOR SHUTDOWN REASON @@ -52,8 +52,8 @@ pub enum ActorShutdownReason { pub struct AccountActorContext { /// Client for interacting with the store in order to load account state. pub store: StoreClient, - /// Address of the block producer gRPC server. - pub block_producer_url: Url, + /// Address of the RPC gRPC server. + pub rpc_url: Url, /// Address of the remote prover. If `None`, transactions will be proven locally, which is // undesirable due to the performance impact. pub tx_prover_url: Option, @@ -153,7 +153,7 @@ pub struct AccountActor { mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, - block_producer: BlockProducerClient, + rpc_client: RpcClient, prover: Option, chain_state: Arc>, script_cache: LruCache, @@ -168,7 +168,7 @@ impl AccountActor { event_rx: mpsc::Receiver>, cancel_token: CancellationToken, ) -> Self { - let block_producer = BlockProducerClient::new(actor_context.block_producer_url.clone()); + let rpc_client = RpcClient::new(actor_context.rpc_url.clone()); let prover = actor_context.tx_prover_url.clone().map(RemoteTransactionProver::new); Self { origin, @@ -176,7 +176,7 @@ impl AccountActor { mode: ActorMode::NoViableNotes, event_rx, cancel_token, - block_producer, + rpc_client, prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), @@ -275,7 +275,7 @@ impl AccountActor { // Execute the selected transaction. let context = execute::NtxContext::new( - self.block_producer.clone(), + self.rpc_client.clone(), self.prover.clone(), self.store.clone(), self.script_cache.clone(), diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index b8926e1ec3..2c85d5b51a 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -6,8 +6,6 @@ use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::generated::{self as proto}; use miden_node_utils::FlattenResult; use miden_objects::block::BlockNumber; -use miden_objects::transaction::ProvenTransaction; -use miden_tx::utils::Serializable; use tokio_stream::StreamExt; use tonic::Status; use tracing::{info, instrument}; @@ -41,20 +39,6 @@ impl BlockProducerClient { Self { client: block_producer } } - #[instrument(target = COMPONENT, name = "ntx.block_producer.client.submit_proven_transaction", skip_all, err)] - pub async fn submit_proven_transaction( - &self, - proven_tx: ProvenTransaction, - ) -> Result<(), Status> { - let request = proto::transaction::ProvenTransaction { - transaction: proven_tx.to_bytes(), - transaction_inputs: None, - }; - - self.client.clone().submit_proven_transaction(request).await?; - - Ok(()) - } #[instrument(target = COMPONENT, name = "ntx.block_producer.client.subscribe_to_mempool", skip_all, err)] pub async fn subscribe_to_mempool_with_retry( diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index c74f2dacd5..be80f8a183 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -73,6 +73,8 @@ pub struct NetworkTransactionBuilder { store_url: Url, /// Address of the block producer gRPC server. block_producer_url: Url, + /// Address of the RPC gRPC server. + rpc_url: Url, /// Address of the remote prover. If `None`, transactions will be proven locally, which is /// undesirable due to the performance impact. tx_prover_url: Option, @@ -101,6 +103,7 @@ impl NetworkTransactionBuilder { pub fn new( store_url: Url, block_producer_url: Url, + rpc_url: Url, tx_prover_url: Option, ticker_interval: Duration, bp_checkpoint: Arc, @@ -110,6 +113,7 @@ impl NetworkTransactionBuilder { Self { store_url, block_producer_url, + rpc_url, tx_prover_url, ticker_interval, bp_checkpoint, @@ -145,7 +149,7 @@ impl NetworkTransactionBuilder { let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); let actor_context = AccountActorContext { - block_producer_url: self.block_producer_url.clone(), + rpc_url: self.rpc_url.clone(), tx_prover_url: self.tx_prover_url.clone(), chain_state: chain_state.clone(), store: store.clone(), diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index b0d89f94c8..5b71ff0605 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -4,6 +4,7 @@ mod actor; mod block_producer; mod builder; mod coordinator; +mod rpc; mod store; pub use builder::NetworkTransactionBuilder; From ab880c510fb5ba04894a0146fc1593e9b76eef9b Mon Sep 17 00:00:00 2001 From: sergerad Date: Mon, 15 Dec 2025 09:58:40 +1300 Subject: [PATCH 051/125] Revert "Use RPC to submit proven tx in ntx-builder" This reverts commit aacb82c3b068b54e7d00492a490d6a5d82a4dfcf. --- bin/node/src/commands/bundled.rs | 1 - crates/ntx-builder/src/actor/execute.rs | 40 +++++++++++++----------- crates/ntx-builder/src/actor/mod.rs | 14 ++++----- crates/ntx-builder/src/block_producer.rs | 16 ++++++++++ crates/ntx-builder/src/builder.rs | 6 +--- crates/ntx-builder/src/lib.rs | 1 - 6 files changed, 45 insertions(+), 33 deletions(-) diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index b928222da7..09bdcf0257 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -275,7 +275,6 @@ impl BundledCommand { NetworkTransactionBuilder::new( store_ntx_builder_url, block_producer_url, - rpc_url, ntx_builder.tx_prover_url, ntx_builder.ticker_interval, checkpoint, diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 2449f3880f..ff306b84df 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -46,7 +46,7 @@ use tracing::{Instrument, instrument}; use crate::COMPONENT; use crate::actor::account_state::TransactionCandidate; -use crate::rpc::RpcClient; +use crate::block_producer::BlockProducerClient; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -75,8 +75,7 @@ type NtxResult = Result; /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - /// Client for submitting transactions to the network. - rpc_client: RpcClient, + block_producer: BlockProducerClient, /// The prover to delegate proofs to. /// @@ -94,12 +93,17 @@ pub struct NtxContext { impl NtxContext { /// Creates a new [`NtxContext`] instance. pub fn new( - rpc_client: RpcClient, + block_producer: BlockProducerClient, prover: Option, store: StoreClient, script_cache: LruCache, ) -> Self { - Self { rpc_client, prover, store, script_cache } + Self { + block_producer, + prover, + store, + script_cache, + } } /// Executes a transaction end-to-end: filtering, executing, proving, and submitted to the block @@ -143,7 +147,7 @@ impl NtxContext { .set_attribute("reference_block.number", chain_tip_header.block_num()); async move { - Box::pin(async move { + async move { let data_store = NtxDataStore::new( account, chain_tip_header, @@ -153,15 +157,13 @@ impl NtxContext { ); let notes = notes.into_iter().map(Note::from).collect::>(); - let (successful_notes, failed_notes) = - self.filter_notes(&data_store, notes).await?; - let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; - let tx_inputs: TransactionInputs = executed_tx.into(); - let proven_tx = Box::pin(self.prove(tx_inputs.clone())).await?; - let tx_id = proven_tx.id(); - self.submit(proven_tx, tx_inputs).await?; - Ok((tx_id, failed_notes)) - }) + let (successful, failed) = self.filter_notes(&data_store, notes).await?; + let executed = Box::pin(self.execute(&data_store, successful)).await?; + let proven = Box::pin(self.prove(executed.into())).await?; + let tx_id = proven.id(); + self.submit(proven).await?; + Ok((tx_id, failed)) + } .in_current_span() .await .inspect_err(|err| tracing::Span::current().set_error(err)) @@ -254,11 +256,11 @@ impl NtxContext { .map_err(NtxError::Proving) } - /// Submits the transaction to the RPC server with transaction inputs. + /// Submits the transaction to the block producer. #[instrument(target = COMPONENT, name = "ntx.execute_transaction.submit", skip_all, err)] - async fn submit(&self, tx: ProvenTransaction, tx_inputs: TransactionInputs) -> NtxResult<()> { - self.rpc_client - .submit_proven_transaction(tx, tx_inputs) + async fn submit(&self, tx: ProvenTransaction) -> NtxResult<()> { + self.block_producer + .submit_proven_transaction(tx) .await .map_err(NtxError::Submission) } diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index d5a1ab794f..947890ede1 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -22,8 +22,8 @@ use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; +use crate::block_producer::BlockProducerClient; use crate::builder::ChainState; -use crate::rpc::RpcClient; use crate::store::StoreClient; // ACTOR SHUTDOWN REASON @@ -52,8 +52,8 @@ pub enum ActorShutdownReason { pub struct AccountActorContext { /// Client for interacting with the store in order to load account state. pub store: StoreClient, - /// Address of the RPC gRPC server. - pub rpc_url: Url, + /// Address of the block producer gRPC server. + pub block_producer_url: Url, /// Address of the remote prover. If `None`, transactions will be proven locally, which is // undesirable due to the performance impact. pub tx_prover_url: Option, @@ -153,7 +153,7 @@ pub struct AccountActor { mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, - rpc_client: RpcClient, + block_producer: BlockProducerClient, prover: Option, chain_state: Arc>, script_cache: LruCache, @@ -168,7 +168,7 @@ impl AccountActor { event_rx: mpsc::Receiver>, cancel_token: CancellationToken, ) -> Self { - let rpc_client = RpcClient::new(actor_context.rpc_url.clone()); + let block_producer = BlockProducerClient::new(actor_context.block_producer_url.clone()); let prover = actor_context.tx_prover_url.clone().map(RemoteTransactionProver::new); Self { origin, @@ -176,7 +176,7 @@ impl AccountActor { mode: ActorMode::NoViableNotes, event_rx, cancel_token, - rpc_client, + block_producer, prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), @@ -275,7 +275,7 @@ impl AccountActor { // Execute the selected transaction. let context = execute::NtxContext::new( - self.rpc_client.clone(), + self.block_producer.clone(), self.prover.clone(), self.store.clone(), self.script_cache.clone(), diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index 2c85d5b51a..b8926e1ec3 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -6,6 +6,8 @@ use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::generated::{self as proto}; use miden_node_utils::FlattenResult; use miden_objects::block::BlockNumber; +use miden_objects::transaction::ProvenTransaction; +use miden_tx::utils::Serializable; use tokio_stream::StreamExt; use tonic::Status; use tracing::{info, instrument}; @@ -39,6 +41,20 @@ impl BlockProducerClient { Self { client: block_producer } } + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.submit_proven_transaction", skip_all, err)] + pub async fn submit_proven_transaction( + &self, + proven_tx: ProvenTransaction, + ) -> Result<(), Status> { + let request = proto::transaction::ProvenTransaction { + transaction: proven_tx.to_bytes(), + transaction_inputs: None, + }; + + self.client.clone().submit_proven_transaction(request).await?; + + Ok(()) + } #[instrument(target = COMPONENT, name = "ntx.block_producer.client.subscribe_to_mempool", skip_all, err)] pub async fn subscribe_to_mempool_with_retry( diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index be80f8a183..c74f2dacd5 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -73,8 +73,6 @@ pub struct NetworkTransactionBuilder { store_url: Url, /// Address of the block producer gRPC server. block_producer_url: Url, - /// Address of the RPC gRPC server. - rpc_url: Url, /// Address of the remote prover. If `None`, transactions will be proven locally, which is /// undesirable due to the performance impact. tx_prover_url: Option, @@ -103,7 +101,6 @@ impl NetworkTransactionBuilder { pub fn new( store_url: Url, block_producer_url: Url, - rpc_url: Url, tx_prover_url: Option, ticker_interval: Duration, bp_checkpoint: Arc, @@ -113,7 +110,6 @@ impl NetworkTransactionBuilder { Self { store_url, block_producer_url, - rpc_url, tx_prover_url, ticker_interval, bp_checkpoint, @@ -149,7 +145,7 @@ impl NetworkTransactionBuilder { let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); let actor_context = AccountActorContext { - rpc_url: self.rpc_url.clone(), + block_producer_url: self.block_producer_url.clone(), tx_prover_url: self.tx_prover_url.clone(), chain_state: chain_state.clone(), store: store.clone(), diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 5b71ff0605..b0d89f94c8 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -4,7 +4,6 @@ mod actor; mod block_producer; mod builder; mod coordinator; -mod rpc; mod store; pub use builder::NetworkTransactionBuilder; From 53660bebee56cfddb59a19c506fcb321c1b4669e Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 16 Dec 2025 08:50:37 +1300 Subject: [PATCH 052/125] feat: Validator block signatures (#1426) --- CHANGELOG.md | 1 + Cargo.lock | 32 ++++---- bin/node/.env | 1 + bin/node/Cargo.toml | 1 + bin/node/Dockerfile | 7 +- bin/node/src/commands/bundled.rs | 55 +++++++++++-- bin/node/src/commands/mod.rs | 5 ++ bin/node/src/commands/store.rs | 49 +++++++---- bin/node/src/commands/validator.rs | 18 +++- bin/stress-test/src/seeding/mod.rs | 7 +- .../block-producer/src/block_builder/mod.rs | 53 ++++-------- crates/block-producer/src/errors.rs | 2 + crates/block-producer/src/validator/mod.rs | 82 ++----------------- crates/proto/src/domain/block.rs | 53 +++++++++++- crates/proto/src/generated/blockchain.rs | 20 ++++- crates/proto/src/generated/validator.rs | 22 ++--- crates/rpc/src/tests.rs | 5 +- crates/store/src/db/tests.rs | 26 ++++-- crates/store/src/genesis/config/errors.rs | 5 ++ crates/store/src/genesis/config/mod.rs | 23 ++++-- crates/store/src/genesis/config/tests.rs | 5 +- crates/store/src/genesis/mod.rs | 32 +++----- crates/store/src/server/mod.rs | 6 +- crates/validator/src/server/mod.rs | 36 ++++---- proto/proto/internal/validator.proto | 17 +--- proto/proto/types/blockchain.proto | 25 +++++- 26 files changed, 334 insertions(+), 254 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9df3c1b39..34017c3e0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). - [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). +- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). - Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). diff --git a/Cargo.lock b/Cargo.lock index 79315c56c5..9ee51162d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1347,7 +1347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2124,7 +2124,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2499,7 +2499,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2595,7 +2595,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "fs-err", "miden-assembly", @@ -2698,6 +2698,7 @@ dependencies = [ "clap 4.5.53", "figment", "fs-err", + "hex", "humantime", "miden-node-block-producer", "miden-node-ntx-builder", @@ -2961,7 +2962,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "bech32", "getrandom 0.3.4", @@ -2976,6 +2977,7 @@ dependencies = [ "miden-utils-sync", "miden-verifier", "rand 0.9.2", + "rand_chacha 0.9.0", "rand_xoshiro", "semver 1.0.27", "serde", @@ -3007,7 +3009,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "proc-macro2", "quote", @@ -3113,7 +3115,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3131,7 +3133,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "miden-lib", "miden-objects", @@ -3144,7 +3146,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#e682ee095c9baaf51952adc53019aa270208f7ce" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" dependencies = [ "miden-objects", "miden-tx", @@ -3370,7 +3372,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4701,7 +4703,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4714,7 +4716,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5347,7 +5349,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5356,7 +5358,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -6365,7 +6367,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/bin/node/.env b/bin/node/.env index 29001ba361..01e699aff2 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -10,6 +10,7 @@ MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= +MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 38db5e109d..7ebdf8d4a7 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -21,6 +21,7 @@ tracing-forest = ["miden-node-block-producer/tracing-forest"] anyhow = { workspace = true } clap = { features = ["env", "string"], workspace = true } fs-err = { workspace = true } +hex = { workspace = true } humantime = { workspace = true } miden-node-block-producer = { workspace = true } miden-node-ntx-builder = { workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index a6b5cae559..3becd3ded9 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -13,8 +13,6 @@ COPY ./crates ./crates COPY ./proto ./proto RUN cargo install --path bin/node --locked -RUN rm -rf data accounts && mkdir data accounts -RUN miden-node bundled bootstrap --data-directory ./data --accounts-directory ./accounts FROM debian:bullseye-slim @@ -26,8 +24,6 @@ RUN apt-get update && \ sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /app/accounts accounts -COPY --from=builder /app/data data COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ @@ -47,6 +43,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 -# Start the Miden node # Miden node does not spawn sub-processes, so it can be used as the PID1 -CMD miden-node bundled start --rpc.url http://0.0.0.0:57291 --data-directory ./data +CMD miden-node diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 09bdcf0257..9a57db7898 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -10,6 +10,9 @@ use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; +use miden_objects::block::BlockSigner; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_objects::utils::Deserializable; use tokio::net::TcpListener; use tokio::sync::Barrier; use tokio::task::JoinSet; @@ -21,6 +24,8 @@ use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, + ENV_VALIDATOR_INSECURE_SECRET_KEY, + INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, duration_to_human_readable_string, }; @@ -43,7 +48,17 @@ pub enum BundledCommand { accounts_directory: PathBuf, /// Constructs the genesis block from the given toml file. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "FILE")] - genesis_config_file: Option, + genesis_config_file: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, /// Runs all three node components in the same process. @@ -82,6 +97,14 @@ pub enum BundledCommand { value_name = "DURATION" )] grpc_timeout: Duration, + + /// Insecure, hex-encoded validator secret key for development and testing purposes. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY" + )] + validator_insecure_secret_key: Option, }, } @@ -92,12 +115,14 @@ impl BundledCommand { data_directory, accounts_directory, genesis_config_file, + validator_insecure_secret_key, } => { // Currently the bundled bootstrap is identical to the store's bootstrap. crate::commands::store::StoreCommand::Bootstrap { data_directory, accounts_directory, genesis_config_file, + validator_insecure_secret_key, } .handle() .await @@ -110,9 +135,20 @@ impl BundledCommand { ntx_builder, enable_otel: _, grpc_timeout, + validator_insecure_secret_key, } => { - Self::start(rpc_url, data_directory, ntx_builder, block_producer, grpc_timeout) - .await + let secret_key_hex = + validator_insecure_secret_key.unwrap_or(INSECURE_VALIDATOR_KEY_HEX.into()); + let signer = SecretKey::read_from_bytes(hex::decode(secret_key_hex)?.as_ref())?; + Self::start( + rpc_url, + data_directory, + ntx_builder, + block_producer, + grpc_timeout, + signer, + ) + .await }, } } @@ -124,6 +160,7 @@ impl BundledCommand { ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, grpc_timeout: Duration, + signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. @@ -227,10 +264,14 @@ impl BundledCommand { let validator_id = join_set .spawn({ async move { - Validator { address: validator_address, grpc_timeout } - .serve() - .await - .context("failed while serving validator component") + Validator { + address: validator_address, + grpc_timeout, + signer, + } + .serve() + .await + .context("failed while serving validator component") } }) .id(); diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index ef7386bb6b..ecfee995fa 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -15,6 +15,10 @@ pub mod rpc; pub mod store; pub mod validator; +/// A predefined, insecure validator key for development purposes. +const INSECURE_VALIDATOR_KEY_HEX: &str = + "0101010101010101010101010101010101010101010101010101010101010101"; + const ENV_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_BLOCK_PRODUCER_URL"; const ENV_VALIDATOR_URL: &str = "MIDEN_NODE_VALIDATOR_URL"; const ENV_BATCH_PROVER_URL: &str = "MIDEN_NODE_BATCH_PROVER_URL"; @@ -31,6 +35,7 @@ const ENV_GENESIS_CONFIG_FILE: &str = "MIDEN_GENESIS_CONFIG_FILE"; const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; +const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index c4c39d92be..4ba41e9ebf 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -5,6 +5,8 @@ use anyhow::Context; use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; use miden_node_utils::grpc::UrlExt; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_objects::utils::Deserializable; use url::Url; use super::{ @@ -17,6 +19,8 @@ use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, + ENV_VALIDATOR_INSECURE_SECRET_KEY, + INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -38,7 +42,17 @@ pub enum StoreCommand { accounts_directory: PathBuf, /// Use the given configuration file to construct the genesis state from. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "GENESIS_CONFIG")] - genesis_config_file: Option, + genesis_config_file: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, /// Starts the store component. @@ -90,9 +104,13 @@ impl StoreCommand { data_directory, accounts_directory, genesis_config_file, - } => { - Self::bootstrap(&data_directory, &accounts_directory, genesis_config_file.as_ref()) - }, + validator_insecure_secret_key, + } => Self::bootstrap( + &data_directory, + &accounts_directory, + &genesis_config_file, + validator_insecure_secret_key, + ), StoreCommand::Start { rpc_url, ntx_builder_url, @@ -164,19 +182,18 @@ impl StoreCommand { fn bootstrap( data_directory: &Path, accounts_directory: &Path, - maybe_genesis_config: Option<&PathBuf>, + genesis_config: &PathBuf, + validator_insecure_secret_key: String, ) -> anyhow::Result<()> { - let config = maybe_genesis_config - .map(|genesis_config| { - let toml_str = fs_err::read_to_string(genesis_config)?; - let config = GenesisConfig::read_toml(toml_str.as_str()) - .context(format!("Read from file: {}", genesis_config.display()))?; - Ok::<_, anyhow::Error>(config) - }) - .transpose()? - .unwrap_or_default(); - - let (genesis_state, secrets) = config.into_state()?; + // Decode the validator key. + let signer = SecretKey::read_from_bytes(&hex::decode(validator_insecure_secret_key)?)?; + + // Read the toml. + let toml_str = fs_err::read_to_string(genesis_config)?; + let config = GenesisConfig::read_toml(toml_str.as_str()) + .context(format!("Read from file: {}", genesis_config.display()))?; + + let (genesis_state, secrets) = config.into_state(signer)?; // Create directories if they do not already exist. for directory in &[accounts_directory, data_directory] { diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index 2483317817..a96d1bfc03 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -3,12 +3,16 @@ use std::time::Duration; use anyhow::Context; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_objects::utils::Deserializable; use url::Url; use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, + ENV_VALIDATOR_INSECURE_SECRET_KEY, ENV_VALIDATOR_URL, + INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -35,17 +39,27 @@ pub enum ValidatorCommand { value_name = "DURATION" )] grpc_timeout: Duration, + + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg(long = "insecure.secret-key", env = ENV_VALIDATOR_INSECURE_SECRET_KEY, value_name = "INSECURE_SECRET_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] + insecure_secret_key: String, }, } impl ValidatorCommand { pub async fn handle(self) -> anyhow::Result<()> { - let Self::Start { url, grpc_timeout, .. } = self; + let Self::Start { + url, grpc_timeout, insecure_secret_key, .. + } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - Validator { address, grpc_timeout } + let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; + + Validator { address, grpc_timeout, signer } .serve() .await .context("failed while serving validator component") diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index f1cd50617d..19f4b892d3 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -37,6 +37,7 @@ use miden_objects::block::{ ProposedBlock, ProvenBlock, }; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_objects::crypto::dsa::rpo_falcon512::{PublicKey, SecretKey}; use miden_objects::crypto::rand::RpoRandomCoin; use miden_objects::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; @@ -90,7 +91,8 @@ pub async fn seed_store( // generate the faucet account and the genesis state let faucet = create_faucet(); let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); - let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1); + let signer = EcdsaSecretKey::new(); + let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1, signer); Store::bootstrap(genesis_state.clone(), &data_directory).expect("store should bootstrap"); // start the store @@ -252,7 +254,8 @@ async fn apply_block( let block_proof = LocalBlockProver::new(0) .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) .unwrap(); - let proven_block = ProvenBlock::new_unchecked(header, body, block_proof); + let signature = EcdsaSecretKey::new().sign(header.commitment()); + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); let block_size: usize = proven_block.to_bytes().len(); let start = Instant::now(); diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 50d3d29462..8d5c9b806f 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -17,6 +17,7 @@ use miden_objects::block::{ ProposedBlock, ProvenBlock, }; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::note::NoteHeader; use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; @@ -28,7 +29,7 @@ use url::Url; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; use crate::store::StoreClient; -use crate::validator::{BlockProducerValidatorClient, BodyDiff, HeaderDiff, ValidatorError}; +use crate::validator::BlockProducerValidatorClient; use crate::{COMPONENT, TelemetryInjectorExt}; // BLOCK BUILDER @@ -130,8 +131,7 @@ impl BlockBuilder { ProposedBlock::inject_telemetry(proposed_block); }) .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) - .and_then(|(proposed_block, inputs, header, body)| self.prove_block(proposed_block, inputs, header, body)) - .and_then(|(proposed_block, header, body, block_proof)| self.construct_proven_block(proposed_block, header, body, block_proof)) + .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) .inspect_ok(ProvenBlock::inject_telemetry) // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot // handle errors after it considers the process complete (which makes sense). @@ -230,44 +230,31 @@ impl BlockBuilder { &self, proposed_block: ProposedBlock, block_inputs: BlockInputs, - ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, BlockBody), BuildBlockError> { + ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> + { // Concurrently build the block and validate it via the validator. let build_result = tokio::task::spawn_blocking({ let proposed_block = proposed_block.clone(); move || build_block(proposed_block) }); - let (header, body) = self + let signature = self .validator .sign_block(proposed_block.clone()) .await .map_err(|err| BuildBlockError::ValidateBlockFailed(err.into()))?; - let (expected_header, expected_body) = build_result + let (header, body) = build_result .await .map_err(|err| BuildBlockError::other(format!("task join error: {err}")))? .map_err(BuildBlockError::ProposeBlockFailed)?; - // Check that the header and body returned from the validator is consistent with the - // proposed block. - // TODO(sergerad): Update Eq implementation once signatures are part of the header. - if header != expected_header { - let diff = HeaderDiff { - validator_header: header, - expected_header, - } - .into(); - return Err(BuildBlockError::ValidateBlockFailed( - ValidatorError::HeaderMismatch(diff).into(), - )); - } - if body != expected_body { - let diff = BodyDiff { validator_body: body, expected_body }.into(); - return Err(BuildBlockError::ValidateBlockFailed( - ValidatorError::BodyMismatch(diff).into(), - )); + // Verify the signature against the built block to ensure that + // the validator has provided a valid signature for the relevant block. + if !signature.verify(header.commitment(), header.validator_key()) { + return Err(BuildBlockError::InvalidSignature); } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, header, body)) + Ok((ordered_batches, block_inputs, header, signature, body)) } #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] @@ -276,8 +263,9 @@ impl BlockBuilder { ordered_batches: OrderedBatches, block_inputs: BlockInputs, header: BlockHeader, + signature: Signature, body: BlockBody, - ) -> Result<(OrderedBatches, BlockHeader, BlockBody, BlockProof), BuildBlockError> { + ) -> Result { // Prove block using header and body from validator. let block_proof = self .block_prover @@ -285,19 +273,8 @@ impl BlockBuilder { .await?; self.simulate_proving().await; - Ok((ordered_batches, header, body, block_proof)) - } - - #[instrument(target = COMPONENT, name = "block_builder.construct_proven_block", skip_all, err)] - async fn construct_proven_block( - &self, - ordered_batches: OrderedBatches, - header: BlockHeader, - body: BlockBody, - block_proof: BlockProof, - ) -> Result { // SAFETY: The header and body are assumed valid and consistent with the proof. - let proven_block = ProvenBlock::new_unchecked(header, body, block_proof); + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { return Err(BuildBlockError::SecurityLevelTooLow( proven_block.proof_security_level(), diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 74064b0387..d53a5ead4e 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -213,6 +213,8 @@ pub enum BuildBlockError { ProposeBlockFailed(#[source] ProposedBlockError), #[error("failed to validate block")] ValidateBlockFailed(#[source] Box), + #[error("block signature is invalid")] + InvalidSignature, #[error("failed to prove block")] ProveBlockFailed(#[source] BlockProverError), /// We sometimes randomly inject errors into the batch building process to test our failure diff --git a/crates/block-producer/src/validator/mod.rs b/crates/block-producer/src/validator/mod.rs index 37162870b5..0ee0e19718 100644 --- a/crates/block-producer/src/validator/mod.rs +++ b/crates/block-producer/src/validator/mod.rs @@ -1,10 +1,8 @@ -use std::fmt::{Display, Formatter}; - use miden_node_proto::clients::{Builder, ValidatorClient}; -use miden_node_proto::errors::{ConversionError, MissingFieldHelper}; use miden_node_proto::generated as proto; -use miden_objects::block::{BlockBody, BlockHeader, ProposedBlock}; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_objects::block::ProposedBlock; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; use thiserror::Error; use tracing::{info, instrument}; use url::Url; @@ -18,55 +16,8 @@ use crate::COMPONENT; pub enum ValidatorError { #[error("gRPC transport error: {0}")] Transport(#[from] tonic::Status), - #[error("response content error: {0}")] - ResponseContent(#[from] ConversionError), - #[error("failed to convert header: {0}")] - HeaderConversion(String), - #[error("failed to deserialize body: {0}")] - BodyDeserialization(String), - #[error("validator header does not match the request: {0}")] - HeaderMismatch(Box), - #[error("validator body does not match the request: {0}")] - BodyMismatch(Box), -} - -// VALIDATION DIFF TYPES -// ================================================================================================ - -/// Represents a difference between validator and expected block headers -#[derive(Debug, Clone)] -pub struct HeaderDiff { - pub validator_header: BlockHeader, - pub expected_header: BlockHeader, -} - -impl Display for HeaderDiff { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Expected Header:")?; - writeln!(f, "{:?}", self.expected_header)?; - writeln!(f, "============================")?; - writeln!(f, "Validator Header:")?; - writeln!(f, "{:?}", self.validator_header)?; - Ok(()) - } -} - -/// Represents a difference between validator and expected block bodies -#[derive(Debug, Clone)] -pub struct BodyDiff { - pub validator_body: BlockBody, - pub expected_body: BlockBody, -} - -impl Display for BodyDiff { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Expected Body:")?; - writeln!(f, "{:?}", self.expected_body)?; - writeln!(f, "============================")?; - writeln!(f, "Validator Body:")?; - writeln!(f, "{:?}", self.validator_body)?; - Ok(()) - } + #[error("signature deserialization failed: {0}")] + Deserialization(#[from] DeserializationError), } // VALIDATOR CLIENT @@ -100,31 +51,16 @@ impl BlockProducerValidatorClient { pub async fn sign_block( &self, proposed_block: ProposedBlock, - ) -> Result<(BlockHeader, BlockBody), ValidatorError> { + ) -> Result { // Send request and receive response. let message = proto::blockchain::ProposedBlock { proposed_block: proposed_block.to_bytes(), }; let request = tonic::Request::new(message); let response = self.client.clone().sign_block(request).await?; - let signed_block = response.into_inner(); - - // Extract header from response. - let header_proto = signed_block - .header - .ok_or(miden_node_proto::generated::blockchain::BlockHeader::missing_field("header")) - .map_err(ValidatorError::ResponseContent)?; - let header = BlockHeader::try_from(header_proto) - .map_err(|err| ValidatorError::HeaderConversion(err.to_string()))?; - - // Extract body from response. - let body_proto = signed_block - .body - .ok_or(miden_node_proto::generated::blockchain::BlockBody::missing_field("body")) - .map_err(ValidatorError::ResponseContent)?; - let body = BlockBody::read_from_bytes(&body_proto.block_body) - .map_err(|err| ValidatorError::BodyDeserialization(err.to_string()))?; - Ok((header, body)) + // Deserialize the signature. + let signature = response.into_inner(); + Signature::read_from_bytes(&signature.signature).map_err(ValidatorError::Deserialization) } } diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index 0cb96fa082..a41d7c7ce6 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -9,6 +9,7 @@ use miden_objects::block::{ FeeParameters, NullifierWitness, }; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_objects::note::{NoteId, NoteInclusionProof}; use miden_objects::transaction::PartialBlockchain; use miden_objects::utils::{Deserializable, Serializable}; @@ -47,7 +48,7 @@ impl From<&BlockHeader> for proto::blockchain::BlockHeader { note_root: Some(header.note_root().into()), tx_commitment: Some(header.tx_commitment().into()), tx_kernel_commitment: Some(header.tx_kernel_commitment().into()), - proof_commitment: Some(header.proof_commitment().into()), + validator_key: Some(header.validator_key().into()), timestamp: header.timestamp(), fee_parameters: Some(header.fee_parameters().into()), } @@ -108,8 +109,8 @@ impl TryFrom for BlockHeader { )))? .try_into()?, value - .proof_commitment - .ok_or(proto::blockchain::BlockHeader::missing_field(stringify!(proof_commitment)))? + .validator_key + .ok_or(proto::blockchain::BlockHeader::missing_field(stringify!(validator_key)))? .try_into()?, FeeParameters::try_from(value.fee_parameters.ok_or( proto::blockchain::FeeParameters::missing_field(stringify!(fee_parameters)), @@ -202,6 +203,52 @@ impl TryFrom for BlockInputs { } } +// PUBLIC KEY +// ================================================================================================ + +impl TryFrom for PublicKey { + type Error = ConversionError; + fn try_from(public_key: proto::blockchain::ValidatorPublicKey) -> Result { + PublicKey::read_from_bytes(&public_key.validator_key) + .map_err(|source| ConversionError::deserialization_error("PublicKey", source)) + } +} + +impl From for proto::blockchain::ValidatorPublicKey { + fn from(value: PublicKey) -> Self { + Self::from(&value) + } +} + +impl From<&PublicKey> for proto::blockchain::ValidatorPublicKey { + fn from(value: &PublicKey) -> Self { + Self { validator_key: value.to_bytes() } + } +} + +// SIGNATURE +// ================================================================================================ + +impl TryFrom for Signature { + type Error = ConversionError; + fn try_from(signature: proto::blockchain::BlockSignature) -> Result { + Signature::read_from_bytes(&signature.signature) + .map_err(|source| ConversionError::deserialization_error("Signature", source)) + } +} + +impl From for proto::blockchain::BlockSignature { + fn from(value: Signature) -> Self { + Self::from(&value) + } +} + +impl From<&Signature> for proto::blockchain::BlockSignature { + fn from(value: &Signature) -> Self { + Self { signature: value.to_bytes() } + } +} + // FEE PARAMETERS // ================================================================================================ diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 41afe7ec73..927eadb05f 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -64,9 +64,9 @@ pub struct BlockHeader { /// A commitment to a set of IDs of transactions which affected accounts in this block. #[prost(message, optional, tag = "8")] pub tx_commitment: ::core::option::Option, - /// A commitment to a STARK proof attesting to the correct state transition. + /// The validator's ECDSA public key. #[prost(message, optional, tag = "9")] - pub proof_commitment: ::core::option::Option, + pub validator_key: ::core::option::Option, /// A commitment to all transaction kernels supported by this block. #[prost(message, optional, tag = "10")] pub tx_kernel_commitment: ::core::option::Option, @@ -77,6 +77,22 @@ pub struct BlockHeader { #[prost(fixed32, tag = "12")] pub timestamp: u32, } +/// Validator ECDSA public key. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ValidatorPublicKey { + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::PublicKey\]. + #[prost(bytes = "vec", tag = "1")] + pub validator_key: ::prost::alloc::vec::Vec, +} +/// Block ECDSA Signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockSignature { + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::Signature\]. + #[prost(bytes = "vec", tag = "1")] + pub signature: ::prost::alloc::vec::Vec, +} /// Definition of the fee parameters. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FeeParameters { diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs index 53f7aeb590..39869d9fc3 100644 --- a/crates/proto/src/generated/validator.rs +++ b/crates/proto/src/generated/validator.rs @@ -9,16 +9,6 @@ pub struct ValidatorStatus { #[prost(string, tag = "2")] pub status: ::prost::alloc::string::String, } -/// Response message for SignBlock RPC. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SignedBlock { - /// The block header. - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// The block body. - #[prost(message, optional, tag = "2")] - pub body: ::core::option::Option, -} /// Generated client implementations. pub mod api_client { #![allow( @@ -161,7 +151,10 @@ pub mod api_client { pub async fn sign_block( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await @@ -205,7 +198,10 @@ pub mod api_server { async fn sign_block( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Validator API for the Validator component. #[derive(Debug)] @@ -379,7 +375,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::blockchain::ProposedBlock, > for SignBlockSvc { - type Response = super::SignedBlock; + type Response = super::super::blockchain::BlockSignature; type Future = BoxFuture< tonic::Response, tonic::Status, diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 38578d1bd3..8aac2cb2f7 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -20,6 +20,7 @@ use miden_objects::account::{ AccountStorageMode, AccountType, }; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_objects::testing::noop_auth_component::NoopAuthComponent; use miden_objects::transaction::ProvenTransactionBuilder; use miden_objects::utils::Serializable; @@ -423,7 +424,9 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let (genesis_state, _) = GenesisConfig::default().into_state().unwrap(); + let config = GenesisConfig::default(); + let signer = SecretKey::new(); + let (genesis_state, _) = config.into_state(signer).unwrap(); Store::bootstrap(genesis_state.clone(), data_directory.path()).expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 84d0455fcf..cdd270266c 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -34,6 +34,7 @@ use miden_objects::block::{ BlockNoteTree, BlockNumber, }; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_objects::crypto::merkle::SparseMerklePath; use miden_objects::crypto::rand::RpoRandomCoin; use miden_objects::note::{ @@ -54,6 +55,7 @@ use miden_objects::testing::account_id::{ ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; +use miden_objects::testing::random_signer::RandomBlockSigner; use miden_objects::transaction::{ InputNoteCommitment, InputNotes, @@ -89,7 +91,7 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { num_to_word(7), num_to_word(8), num_to_word(9), - num_to_word(10), + SecretKey::new().public_key(), test_fee_params(), 11_u8.into(), ); @@ -855,7 +857,7 @@ fn db_block_header() { num_to_word(7), num_to_word(8), num_to_word(9), - num_to_word(10), + SecretKey::new().public_key(), test_fee_params(), 11_u8.into(), ); @@ -887,7 +889,7 @@ fn db_block_header() { num_to_word(17), num_to_word(18), num_to_word(19), - num_to_word(20), + SecretKey::new().public_key(), test_fee_params(), 21_u8.into(), ); @@ -1451,7 +1453,8 @@ fn genesis_with_account_assets() { .build_existing() .unwrap(); - let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0); + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); let genesis_block = genesis_state.into_block().unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); @@ -1498,7 +1501,8 @@ fn genesis_with_account_storage_map() { .build_existing() .unwrap(); - let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0); + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); let genesis_block = genesis_state.into_block().unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); @@ -1543,7 +1547,8 @@ fn genesis_with_account_assets_and_storage() { .build_existing() .unwrap(); - let genesis_state = GenesisState::new(vec![account], test_fee_params(), 1, 0); + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); let genesis_block = genesis_state.into_block().unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); @@ -1612,8 +1617,13 @@ fn genesis_with_multiple_accounts() { .build_existing() .unwrap(); - let genesis_state = - GenesisState::new(vec![account1, account2, account3], test_fee_params(), 1, 0); + let genesis_state = GenesisState::new( + vec![account1, account2, account3], + test_fee_params(), + 1, + 0, + SecretKey::random(), + ); let genesis_block = genesis_state.into_block().unwrap(); crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index 313d390cd5..a494c24ef0 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,5 +1,6 @@ use miden_lib::account::faucets::FungibleFaucetError; use miden_lib::account::wallets::BasicWalletError; +use miden_lib::utils::DeserializationError; use miden_objects::account::AccountId; use miden_objects::{AccountError, AssetError, FeeError, TokenSymbolError}; @@ -54,4 +55,8 @@ pub enum GenesisConfigError { NativeAssetFaucetIsNotPublic(TokenSymbolStr), #[error("faucet account of {0} is not public")] NativeAssetFaucitIsNotAFungibleFaucet(TokenSymbolStr), + #[error("invalid secret key")] + InvalidSecretKey(#[from] DeserializationError), + #[error("provided signer config is not supported")] + UnsupportedSignerConfig, } diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index d5b16add00..4ad6c2557f 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -26,7 +26,7 @@ use miden_objects::account::{ }; use miden_objects::asset::{FungibleAsset, TokenSymbol}; use miden_objects::block::FeeParameters; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; +use miden_objects::crypto::dsa::rpo_falcon512::SecretKey as RpoSecretKey; use miden_objects::{Felt, FieldElement, ONE, TokenSymbolError, ZERO}; use rand::distr::weighted::Weight; use rand::{Rng, SeedableRng}; @@ -94,7 +94,10 @@ impl GenesisConfig { /// /// Also returns the set of secrets for the generated accounts. #[allow(clippy::too_many_lines)] - pub fn into_state(self) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { + pub fn into_state( + self, + signer: S, + ) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { let GenesisConfig { version, timestamp, @@ -102,6 +105,7 @@ impl GenesisConfig { fee_parameters, fungible_faucet: fungible_faucet_configs, wallet: wallet_configs, + .. } = self; let symbol = native_faucet.symbol.clone(); @@ -154,7 +158,7 @@ impl GenesisConfig { tracing::debug!("Adding wallet account {index} with {assets:?}"); let mut rng = ChaCha20Rng::from_seed(rand::random()); - let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); + let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); let auth = AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }; let init_seed: [u8; 32] = rng.random(); @@ -263,6 +267,7 @@ impl GenesisConfig { accounts: all_accounts, version, timestamp, + block_signer: signer, }, AccountSecrets { secrets }, )) @@ -332,7 +337,7 @@ pub struct FungibleFaucetConfig { impl FungibleFaucetConfig { /// Create a fungible faucet from a config entry - fn build_account(self) -> Result<(Account, SecretKey), GenesisConfigError> { + fn build_account(self) -> Result<(Account, RpoSecretKey), GenesisConfigError> { let FungibleFaucetConfig { symbol, decimals, @@ -340,7 +345,7 @@ impl FungibleFaucetConfig { storage_mode, } = self; let mut rng = ChaCha20Rng::from_seed(rand::random()); - let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); + let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); let auth = AuthRpoFalcon512::new(secret_key.public_key().into()); let init_seed: [u8; 32] = rng.random(); @@ -426,7 +431,7 @@ pub struct AccountFileWithName { #[derive(Debug, Clone)] pub struct AccountSecrets { // name, account, private key, account seed - pub secrets: Vec<(String, AccountId, SecretKey)>, + pub secrets: Vec<(String, AccountId, RpoSecretKey)>, } impl AccountSecrets { @@ -434,10 +439,10 @@ impl AccountSecrets { /// /// If no name is present, a new one is generated based on the current time /// and the index in - pub fn as_account_files( + pub fn as_account_files( &self, - genesis_state: &GenesisState, - ) -> impl Iterator> + use<'_> { + genesis_state: &GenesisState, + ) -> impl Iterator> + use<'_, S> { let account_lut = IndexMap::::from_iter( genesis_state.accounts.iter().map(|account| (account.id(), account.clone())), ); diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index e347f11603..2658806360 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,5 +1,6 @@ use assert_matches::assert_matches; use miden_objects::ONE; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; use super::*; @@ -10,7 +11,7 @@ type TestResult = Result<(), Box>; fn parsing_yields_expected_default_values() -> TestResult { let s = include_str!("./samples/01-simple.toml"); let gcfg = GenesisConfig::read_toml(s)?; - let (state, _secrets) = gcfg.into_state()?; + let (state, _secrets) = gcfg.into_state(SecretKey::new())?; let _ = state; // faucets always precede wallet accounts let native_faucet = state.accounts[0].clone(); @@ -59,7 +60,7 @@ fn parsing_yields_expected_default_values() -> TestResult { #[miden_node_test_macro::enable_logging] fn genesis_accounts_have_nonce_one() -> TestResult { let gcfg = GenesisConfig::default(); - let (state, secrets) = gcfg.into_state().unwrap(); + let (state, secrets) = gcfg.into_state(SecretKey::new()).unwrap(); let mut iter = secrets.as_account_files(&state); let AccountFileWithName { account_file: status_quo, .. } = iter.next().unwrap().unwrap(); assert!(iter.next().is_none()); diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index 2dfc3ccb7d..d3ef1706b4 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -10,13 +10,13 @@ use miden_objects::block::{ BlockNoteTree, BlockNumber, BlockProof, + BlockSigner, FeeParameters, ProvenBlock, }; use miden_objects::crypto::merkle::{Forest, LargeSmt, MemoryStorage, MmrPeaks, Smt}; use miden_objects::note::Nullifier; use miden_objects::transaction::OrderedTransactionHeaders; -use miden_objects::utils::serde::{ByteReader, Deserializable, DeserializationError}; use crate::errors::GenesisError; @@ -27,11 +27,12 @@ pub mod config; /// Represents the state at genesis, which will be used to derive the genesis block. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct GenesisState { +pub struct GenesisState { pub accounts: Vec, pub fee_parameters: FeeParameters, pub version: u32, pub timestamp: u32, + pub block_signer: S, } /// A type-safety wrapper ensuring that genesis block data can only be created from @@ -48,21 +49,25 @@ impl GenesisBlock { } } -impl GenesisState { +impl GenesisState { pub fn new( accounts: Vec, fee_parameters: FeeParameters, version: u32, timestamp: u32, + signer: S, ) -> Self { Self { accounts, fee_parameters, version, timestamp, + block_signer: signer, } } +} +impl GenesisState { /// Returns the block header and the account SMT pub fn into_block(self) -> Result { let accounts: Vec = self @@ -115,7 +120,7 @@ impl GenesisState { empty_block_note_tree.root(), Word::empty(), TransactionKernel.to_commitment(), - Word::empty(), + self.block_signer.public_key(), self.fee_parameters, self.timestamp, ); @@ -129,25 +134,10 @@ impl GenesisState { let block_proof = BlockProof::new_dummy(); + let signature = self.block_signer.sign(&header); // SAFETY: Header and accounts should be valid by construction. // No notes or nullifiers are created at genesis, which is consistent with the above empty // block note tree root and empty nullifier tree root. - Ok(GenesisBlock(ProvenBlock::new_unchecked(header, body, block_proof))) - } -} - -// SERIALIZATION -// ================================================================================================ - -impl Deserializable for GenesisState { - fn read_from(source: &mut R) -> Result { - let num_accounts = source.read_usize()?; - let accounts = source.read_many::(num_accounts)?; - - let version = source.read_u32()?; - let timestamp = source.read_u32()?; - let fee_parameters = source.read::()?; - - Ok(Self::new(accounts, fee_parameters, version, timestamp)) + Ok(GenesisBlock(ProvenBlock::new_unchecked(header, body, signature, block_proof))) } } diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 32e9515546..036727a885 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -12,6 +12,7 @@ use miden_node_proto_build::{ }; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_objects::block::BlockSigner; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; @@ -50,7 +51,10 @@ impl Store { skip_all, err, )] - pub fn bootstrap(genesis: GenesisState, data_directory: &Path) -> anyhow::Result<()> { + pub fn bootstrap( + genesis: GenesisState, + data_directory: &Path, + ) -> anyhow::Result<()> { let genesis = genesis .into_block() .context("failed to convert genesis configuration into the genesis block")?; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 638b1b1828..cd4efe483e 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -8,7 +8,7 @@ use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_objects::block::ProposedBlock; +use miden_objects::block::{BlockSigner, ProposedBlock}; use miden_objects::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; @@ -23,16 +23,19 @@ use crate::COMPONENT; /// The handle into running the gRPC validator server. /// /// Facilitates the running of the gRPC server which implements the validator API. -pub struct Validator { +pub struct Validator { /// The address of the validator component. pub address: SocketAddr, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. pub grpc_timeout: Duration, + + /// The signer used to sign blocks. + pub signer: S, } -impl Validator { +impl Validator { /// Serves the validator RPC API. /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is @@ -63,7 +66,7 @@ impl Validator { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .timeout(self.grpc_timeout) - .add_service(api_server::ApiServer::new(ValidatorServer {})) + .add_service(api_server::ApiServer::new(ValidatorServer { signer: self.signer })) .add_service(reflection_service) .add_service(reflection_service_alpha) .serve_with_incoming(TcpListenerStream::new(listener)) @@ -78,10 +81,12 @@ impl Validator { /// The underlying implementation of the gRPC validator server. /// /// Implements the gRPC API for the validator. -struct ValidatorServer {} +struct ValidatorServer { + signer: S, +} #[tonic::async_trait] -impl api_server::Api for ValidatorServer { +impl api_server::Api for ValidatorServer { /// Returns the status of the validator. async fn status( &self, @@ -106,7 +111,7 @@ impl api_server::Api for ValidatorServer { async fn sign_block( &self, request: tonic::Request, - ) -> Result, tonic::Status> { + ) -> Result, tonic::Status> { let proposed_block_bytes = request.into_inner().proposed_block; // Deserialize the proposed block. @@ -117,20 +122,13 @@ impl api_server::Api for ValidatorServer { )) })?; - // Build header and body - let (header, body) = build_block(proposed_block) + // Build and sign header. + let (header, _body) = build_block(proposed_block) .map_err(|err| tonic::Status::internal(format!("Failed to build block: {err}")))?; + let signature = self.signer.sign(&header); - // Convert to protobuf format - let header_proto = proto::blockchain::BlockHeader::from(&header); - let body_proto = proto::blockchain::BlockBody { block_body: body.to_bytes() }; - - // Both header and body are required fields and must always be populated - let response = proto::validator::SignedBlock { - header: Some(header_proto), - body: Some(body_proto), - }; - + // Send the signature. + let response = proto::blockchain::BlockSignature { signature: signature.to_bytes() }; Ok(tonic::Response::new(response)) } } diff --git a/proto/proto/internal/validator.proto b/proto/proto/internal/validator.proto index b6645ef9f8..e3bb02a61c 100644 --- a/proto/proto/internal/validator.proto +++ b/proto/proto/internal/validator.proto @@ -4,6 +4,7 @@ package validator; import "types/transaction.proto"; import "types/blockchain.proto"; +import "types/primitives.proto"; import "google/protobuf/empty.proto"; // VALIDATOR API @@ -18,10 +19,10 @@ service Api { rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (google.protobuf.Empty) {} // Validates a proposed block and returns the block header and body. - rpc SignBlock(blockchain.ProposedBlock) returns (SignedBlock) {} + rpc SignBlock(blockchain.ProposedBlock) returns (blockchain.BlockSignature) {} } -// STATUS +// VALIDATOR STATUS // ================================================================================================ // Represents the status of the validator. @@ -32,15 +33,3 @@ message ValidatorStatus { // The validator's status. string status = 2; } - -// VALIDATE BLOCK RESPONSE -// ================================================================================================ - -// Response message for SignBlock RPC. -message SignedBlock { - // The block header. - blockchain.BlockHeader header = 1; - - // The block body. - blockchain.BlockBody body = 2; -} diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index b22dad84c6..619ccf1cff 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -66,8 +66,8 @@ message BlockHeader { // A commitment to a set of IDs of transactions which affected accounts in this block. primitives.Digest tx_commitment = 8; - // A commitment to a STARK proof attesting to the correct state transition. - primitives.Digest proof_commitment = 9; + // The validator's ECDSA public key. + ValidatorPublicKey validator_key = 9; // A commitment to all transaction kernels supported by this block. primitives.Digest tx_kernel_commitment = 10; @@ -79,6 +79,27 @@ message BlockHeader { fixed32 timestamp = 12; } +// PUBLIC KEY +// ================================================================================================ + +// Validator ECDSA public key. +message ValidatorPublicKey { + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::PublicKey]. + bytes validator_key = 1; +} + +// BLOCK SIGNATURE +// ================================================================================================ + +// Block ECDSA Signature. +message BlockSignature { + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::Signature]. + bytes signature = 1; +} + + // FEE PARAMETERS // ================================================================================================ From dd1446a3d8ff4fb676a2b84e2095678d88cc8469 Mon Sep 17 00:00:00 2001 From: Francisco Krause Arnim <56402156+fkrause98@users.noreply.github.com> Date: Mon, 15 Dec 2025 18:38:36 -0300 Subject: [PATCH 053/125] fix: proper private account_id check (#1455) --- crates/store/src/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 2dd281301b..0605a0310d 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -934,7 +934,7 @@ impl State { ) -> Result { let AccountProofRequest { block_num, account_id, details } = account_request; - if details.is_some() && !account_id.is_public() { + if details.is_some() && !account_id.has_public_state() { return Err(DatabaseError::AccountNotPublic(account_id)); } From e30bdcbacadcc388e2fb65fc00627ac13c1330a0 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Wed, 17 Dec 2025 15:43:46 +1300 Subject: [PATCH 054/125] feat: Integrate RPC with Validator for transaction validation (#1457) --- CHANGELOG.md | 1 + Cargo.lock | 16 +-- bin/network-monitor/src/counter.rs | 4 +- bin/network-monitor/src/deploy/counter.rs | 13 +-- bin/node/src/commands/bundled.rs | 3 + bin/node/src/commands/rpc.rs | 8 +- bin/stress-test/src/seeding/mod.rs | 8 +- crates/proto/src/domain/account.rs | 20 ++-- crates/proto/src/domain/block.rs | 9 +- crates/rpc/src/server/api.rs | 45 ++++---- crates/rpc/src/server/mod.rs | 10 +- crates/rpc/src/tests.rs | 3 + crates/store/src/accounts/mod.rs | 4 +- crates/store/src/db/tests.rs | 105 ++++++++++-------- crates/store/src/genesis/config/mod.rs | 2 +- crates/store/src/genesis/config/tests.rs | 5 +- crates/store/src/state.rs | 14 +-- crates/validator/Cargo.toml | 2 + crates/validator/src/lib.rs | 1 + crates/validator/src/server/mod.rs | 27 ++++- .../src/tx_validation/data_store.rs} | 38 +------ crates/validator/src/tx_validation/mod.rs | 60 ++++++++++ 22 files changed, 236 insertions(+), 162 deletions(-) rename crates/{rpc/src/server/validator.rs => validator/src/tx_validation/data_store.rs} (79%) create mode 100644 crates/validator/src/tx_validation/mod.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 34017c3e0b..6be32f2bf3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). - The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). - Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). +- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). ### Changes diff --git a/Cargo.lock b/Cargo.lock index 9ee51162d5..38063686b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2499,7 +2499,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "miden-objects", "thiserror 2.0.17", @@ -2595,7 +2595,7 @@ dependencies = [ [[package]] name = "miden-lib" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "fs-err", "miden-assembly", @@ -2951,6 +2951,8 @@ dependencies = [ "miden-node-proto-build", "miden-node-utils", "miden-objects", + "miden-tx", + "thiserror 2.0.17", "tokio", "tokio-stream", "tonic", @@ -2962,7 +2964,7 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "bech32", "getrandom 0.3.4", @@ -3009,7 +3011,7 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "proc-macro2", "quote", @@ -3115,7 +3117,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3133,7 +3135,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "miden-lib", "miden-objects", @@ -3146,7 +3148,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#4b08b9c916bb3a1ecc7e509d51564e9860c1bbc8" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#198e25ee552638caefa356ff5b248a654b119083" dependencies = [ "miden-objects", "miden-tx", diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index b44e17dc99..af647a7957 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -10,7 +10,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use anyhow::{Context, Result}; use miden_lib::AuthScheme; use miden_lib::account::interface::AccountInterface; -use miden_lib::utils::ScriptBuilder; +use miden_lib::utils::CodeBuilder; use miden_node_proto::clients::RpcClient; use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; @@ -528,7 +528,7 @@ async fn create_and_submit_network_note( fn create_increment_script() -> Result<(NoteScript, Library)> { let library = get_counter_library()?; - let script_builder = ScriptBuilder::new(true) + let script_builder = CodeBuilder::new(true) .with_dynamically_linked_library(&library) .context("Failed to create script builder with library")?; diff --git a/bin/network-monitor/src/deploy/counter.rs b/bin/network-monitor/src/deploy/counter.rs index c7720fa0df..75381fba23 100644 --- a/bin/network-monitor/src/deploy/counter.rs +++ b/bin/network-monitor/src/deploy/counter.rs @@ -4,7 +4,7 @@ use std::path::Path; use anyhow::Result; use miden_lib::testing::account_component::IncrNonceAuthComponent; -use miden_lib::transaction::TransactionKernel; +use miden_lib::utils::CodeBuilder; use miden_objects::account::{ Account, AccountBuilder, @@ -50,12 +50,11 @@ pub fn create_counter_account(owner_account_id: AccountId) -> Result { let counter_slot = StorageSlot::with_value(COUNTER_SLOT_NAME.clone(), Word::empty()); - let account_code = AccountComponent::compile( - script, - TransactionKernel::assembler(), - vec![counter_slot, owner_id_slot], - )? - .with_supports_all_types(); + let component_code = + CodeBuilder::default().compile_component_code("counter::program", script)?; + + let account_code = AccountComponent::new(component_code, vec![counter_slot, owner_id_slot])? + .with_supports_all_types(); let incr_nonce_auth: AccountComponent = IncrNonceAuthComponent.into(); diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 9a57db7898..bf66f6c041 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -283,10 +283,13 @@ impl BundledCommand { .context("Failed to parse URL")?; let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) .context("Failed to parse URL")?; + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; Rpc { listener: grpc_rpc, store_url, block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout, } .serve() diff --git a/bin/node/src/commands/rpc.rs b/bin/node/src/commands/rpc.rs index ed05546b3f..643734f378 100644 --- a/bin/node/src/commands/rpc.rs +++ b/bin/node/src/commands/rpc.rs @@ -5,7 +5,7 @@ use miden_node_rpc::Rpc; use miden_node_utils::grpc::UrlExt; use url::Url; -use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL}; +use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL, ENV_VALIDATOR_URL}; use crate::commands::{DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, duration_to_human_readable_string}; #[derive(clap::Subcommand)] @@ -25,6 +25,10 @@ pub enum RpcCommand { #[arg(long = "block-producer.url", env = ENV_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Option, + /// The validator's gRPC url. + #[arg(long = "validator.url", env = ENV_VALIDATOR_URL, value_name = "URL")] + validator_url: Url, + /// Enables the exporting of traces for OpenTelemetry. /// /// This can be further configured using environment variables as defined in the official @@ -51,6 +55,7 @@ impl RpcCommand { url, store_url, block_producer_url, + validator_url, enable_otel: _, grpc_timeout, } = self; @@ -64,6 +69,7 @@ impl RpcCommand { listener, store_url, block_producer_url, + validator_url, grpc_timeout, } .serve() diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 19f4b892d3..924710a098 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -443,13 +443,11 @@ fn create_emit_note_tx( ) -> ProvenTransaction { let initial_account_hash = faucet.commitment(); - let slot = faucet.storage().get_item(BasicFungibleFaucet::metadata_slot_name()).unwrap(); + let metadata_slot_name = AccountStorage::faucet_sysdata_slot(); + let slot = faucet.storage().get_item(metadata_slot_name).unwrap(); faucet .storage_mut() - .set_item( - AccountStorage::faucet_metadata_slot(), - [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into(), - ) + .set_item(metadata_slot_name, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) .unwrap(); faucet.increment_nonce(ONE).unwrap(); diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index f442e01157..2541d020ab 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -8,11 +8,13 @@ use miden_objects::account::{ AccountId, AccountStorageHeader, StorageMap, + StorageSlotHeader, StorageSlotName, StorageSlotType, }; use miden_objects::asset::{Asset, AssetVault}; -use miden_objects::block::{AccountWitness, BlockNumber}; +use miden_objects::block::BlockNumber; +use miden_objects::block::account_tree::AccountWitness; use miden_objects::crypto::merkle::SparseMerklePath; use miden_objects::note::{NoteExecutionMode, NoteTag}; use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; @@ -167,18 +169,18 @@ impl TryFrom for AccountStorageHeader { fn try_from(value: proto::account::AccountStorageHeader) -> Result { let proto::account::AccountStorageHeader { slots } = value; - let items = slots + let slot_headers = slots .into_iter() .map(|slot| { let slot_name = StorageSlotName::new(slot.slot_name)?; let slot_type = storage_slot_type_from_raw(slot.slot_type)?; let commitment = slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; - Ok((slot_name, slot_type, commitment)) + Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) }) .collect::, ConversionError>>()?; - Ok(AccountStorageHeader::new(items)?) + Ok(AccountStorageHeader::new(slot_headers)?) } } @@ -333,12 +335,10 @@ impl From for proto::account::AccountStorageHeader { fn from(value: AccountStorageHeader) -> Self { let slots = value .slots() - .map(|(slot_name, slot_type, slot_value)| { - proto::account::account_storage_header::StorageSlot { - slot_name: slot_name.to_string(), - slot_type: storage_slot_type_to_raw(*slot_type), - commitment: Some(proto::primitives::Digest::from(*slot_value)), - } + .map(|slot_header| proto::account::account_storage_header::StorageSlot { + slot_name: slot_header.name().to_string(), + slot_type: storage_slot_type_to_raw(slot_header.slot_type()), + commitment: Some(proto::primitives::Digest::from(slot_header.value())), }) .collect(); diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index a41d7c7ce6..76bc4c73ba 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -2,13 +2,8 @@ use std::collections::BTreeMap; use std::ops::RangeInclusive; use miden_objects::account::AccountId; -use miden_objects::block::{ - BlockHeader, - BlockInputs, - BlockNumber, - FeeParameters, - NullifierWitness, -}; +use miden_objects::block::nullifier_tree::NullifierWitness; +use miden_objects::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; use miden_objects::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_objects::note::{NoteId, NoteInclusionProof}; use miden_objects::transaction::PartialBlockchain; diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 4c370d777b..59ffffb0c3 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient}; +use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient, ValidatorClient}; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::MempoolStats; use miden_node_proto::generated::rpc::api_server::{self, Api}; @@ -20,12 +20,7 @@ use miden_objects::account::AccountId; use miden_objects::batch::ProvenBatch; use miden_objects::block::{BlockHeader, BlockNumber}; use miden_objects::note::{Note, NoteRecipient, NoteScript}; -use miden_objects::transaction::{ - OutputNote, - ProvenTransaction, - ProvenTransactionBuilder, - TransactionInputs, -}; +use miden_objects::transaction::{OutputNote, ProvenTransaction, ProvenTransactionBuilder}; use miden_objects::utils::serde::{Deserializable, Serializable}; use miden_objects::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; @@ -34,7 +29,6 @@ use tracing::{debug, info, instrument, warn}; use url::Url; use crate::COMPONENT; -use crate::server::validator; // RPC SERVICE // ================================================================================================ @@ -42,11 +36,12 @@ use crate::server::validator; pub struct RpcService { store: StoreRpcClient, block_producer: Option, + validator: ValidatorClient, genesis_commitment: Option, } impl RpcService { - pub(super) fn new(store_url: Url, block_producer_url: Option) -> Self { + pub(super) fn new(store_url: Url, block_producer_url: Option, validator_url: Url) -> Self { let store = { info!(target: COMPONENT, store_endpoint = %store_url, "Initializing store client"); Builder::new(store_url) @@ -73,9 +68,25 @@ impl RpcService { .connect_lazy::() }); + let validator = { + info!( + target: COMPONENT, + validator_endpoint = %validator_url, + "Initializing validator client", + ); + Builder::new(validator_url) + .without_tls() + .without_timeout() + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::() + }; + Self { store, block_producer, + validator, genesis_commitment: None, } } @@ -379,18 +390,14 @@ impl api_server::Api for RpcService { })?; // If transaction inputs are provided, re-execute the transaction to validate it. - if let Some(tx_inputs_bytes) = &request.transaction_inputs { - // Deserialize the transaction inputs. - let tx_inputs = TransactionInputs::read_from_bytes(tx_inputs_bytes).map_err(|err| { - Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) - })?; - // Re-execute the transaction. - match validator::re_execute_transaction(tx_inputs).await { - Ok(_executed_tx) => { + if request.transaction_inputs.is_some() { + // Re-execute the transaction via the Validator. + match self.validator.clone().submit_proven_transaction(request.clone()).await { + Ok(_) => { debug!( target = COMPONENT, tx_id = %tx.id().to_hex(), - "Transaction re-execution successful" + "Transaction validation successful" ); }, Err(e) => { @@ -398,7 +405,7 @@ impl api_server::Api for RpcService { target = COMPONENT, tx_id = %tx.id().to_hex(), error = %e, - "Transaction re-execution failed, but continuing with submission" + "Transaction validation failed, but continuing with submission" ); }, } diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index 71ef163c29..2299072073 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -21,17 +21,17 @@ use crate::server::health::HealthCheckLayer; mod accept; mod api; mod health; -mod validator; /// The RPC server component. /// /// On startup, binds to the provided listener and starts serving the RPC API. -/// It connects lazily to the store and block producer components as needed. +/// It connects lazily to the store, validator and block producer components as needed. /// Requests will fail if the components are not available. pub struct Rpc { pub listener: TcpListener, pub store_url: Url, pub block_producer_url: Option, + pub validator_url: Url, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. @@ -44,7 +44,11 @@ impl Rpc { /// Note: Executes in place (i.e. not spawned) and will run indefinitely until /// a fatal error is encountered. pub async fn serve(self) -> anyhow::Result<()> { - let mut api = api::RpcService::new(self.store_url.clone(), self.block_producer_url.clone()); + let mut api = api::RpcService::new( + self.store_url.clone(), + self.block_producer_url.clone(), + self.validator_url, + ); let genesis = api .get_genesis_header_with_retry() diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 8aac2cb2f7..05e9ee2639 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -394,10 +394,13 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) let store_url = Url::parse(&format!("http://{store_addr}")).unwrap(); // SAFETY: The block_producer_addr is always valid as it is created from a `SocketAddr`. let block_producer_url = Url::parse(&format!("http://{block_producer_addr}")).unwrap(); + // SAFETY: Using dummy validator URL for test - not actually contacted in this test + let validator_url = Url::parse("http://127.0.0.1:0").unwrap(); Rpc { listener: rpc_listener, store_url, block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout: Duration::from_secs(30), } .serve() diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index 7569859977..34f46e7adb 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -3,8 +3,8 @@ use std::collections::{BTreeMap, HashMap}; use miden_objects::account::{AccountId, AccountIdPrefix}; -use miden_objects::block::account_tree::{AccountMutationSet, AccountTree}; -use miden_objects::block::{AccountWitness, BlockNumber}; +use miden_objects::block::BlockNumber; +use miden_objects::block::account_tree::{AccountMutationSet, AccountTree, AccountWitness}; use miden_objects::crypto::merkle::{ EmptySubtreeRoots, LargeSmt, diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index cdd270266c..e203e217c5 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -7,7 +7,7 @@ use std::sync::{Arc, Mutex}; use diesel::{Connection, SqliteConnection}; use miden_lib::account::auth::AuthRpoFalcon512; use miden_lib::note::create_p2id_note; -use miden_lib::transaction::TransactionKernel; +use miden_lib::utils::CodeBuilder; use miden_node_proto::domain::account::AccountSummary; use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_objects::account::auth::PublicKeyCommitment; @@ -1409,19 +1409,18 @@ fn mock_account_code_and_storage( StorageSlot::with_value(StorageSlotName::mock(5), num_to_word(5)), ]; - let component = AccountComponent::compile( - component_code, - TransactionKernel::assembler(), - component_storage, - ) - .unwrap() - .with_supported_type(account_type); + let account_component_code = CodeBuilder::default() + .compile_component_code("counter_contract::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); AccountBuilder::new(init_seed.unwrap_or([0; 32])) .account_type(account_type) .storage_mode(storage_mode) .with_assets(assets) - .with_component(component) + .with_component(account_component) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap() @@ -1435,11 +1434,14 @@ fn mock_account_code_and_storage( #[miden_node_test_macro::enable_logging] fn genesis_with_account_assets() { use crate::genesis::GenesisState; + let component_code = "export.foo push.1 end"; - let component = - AccountComponent::compile("export.foo push.1 end", TransactionKernel::assembler(), vec![]) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); let fungible_asset = FungibleAsset::new(faucet_id, 1000).unwrap(); @@ -1447,7 +1449,7 @@ fn genesis_with_account_assets() { let account = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) - .with_component(component) + .with_component(account_component) .with_assets([fungible_asset.into()]) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() @@ -1485,18 +1487,19 @@ fn genesis_with_account_storage_map() { StorageSlot::with_empty_value(StorageSlotName::mock(1)), ]; - let component = AccountComponent::compile( - "export.foo push.1 end", - TransactionKernel::assembler(), - component_storage, - ) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component_code = "export.foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); let account = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) - .with_component(component) + .with_component(account_component) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1530,18 +1533,19 @@ fn genesis_with_account_assets_and_storage() { StorageSlot::with_map(StorageSlotName::mock(2), storage_map), ]; - let component = AccountComponent::compile( - "export.foo push.1 end", - TransactionKernel::assembler(), - component_storage, - ) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let component_code = "export.foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); let account = AccountBuilder::new([3u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) - .with_component(component) + .with_component(account_component) .with_assets([fungible_asset.into()]) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() @@ -1563,15 +1567,17 @@ fn genesis_with_multiple_accounts() { use crate::genesis::GenesisState; - let component1 = - AccountComponent::compile("export.foo push.1 end", TransactionKernel::assembler(), vec![]) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", "export.foo push.1 end") + .unwrap(); + let account_component1 = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); let account1 = AccountBuilder::new([1u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) - .with_component(component1) + .with_component(account_component1) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1579,15 +1585,17 @@ fn genesis_with_multiple_accounts() { let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); let fungible_asset = FungibleAsset::new(faucet_id, 2000).unwrap(); - let component2 = - AccountComponent::compile("export.bar push.2 end", TransactionKernel::assembler(), vec![]) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let account_component_code = CodeBuilder::default() + .compile_component_code("bar::interface", "export.bar push.2 end") + .unwrap(); + let account_component2 = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); let account2 = AccountBuilder::new([2u8; 32]) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) - .with_component(component2) + .with_component(account_component2) .with_assets([fungible_asset.into()]) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() @@ -1601,18 +1609,17 @@ fn genesis_with_multiple_accounts() { let component_storage = vec![StorageSlot::with_map(StorageSlotName::mock(0), storage_map)]; - let component3 = AccountComponent::compile( - "export.baz push.3 end", - TransactionKernel::assembler(), - component_storage, - ) - .unwrap() - .with_supported_type(AccountType::RegularAccountImmutableCode); + let account_component_code = CodeBuilder::default() + .compile_component_code("baz::interface", "export.baz push.3 end") + .unwrap(); + let account_component3 = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); let account3 = AccountBuilder::new([3u8; 32]) - .account_type(AccountType::RegularAccountImmutableCode) + .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) - .with_component(component3) + .with_component(account_component3) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 4ad6c2557f..e27a891cd0 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -219,7 +219,7 @@ impl GenesisConfig { if total_issuance != 0 { // slot 0 storage_delta.set_item( - AccountStorage::faucet_metadata_slot().clone(), + AccountStorage::faucet_sysdata_slot().clone(), [ZERO, ZERO, ZERO, Felt::new(total_issuance)].into(), ); tracing::debug!( diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 2658806360..7fde9ee3fd 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -45,10 +45,7 @@ fn parsing_yields_expected_default_values() -> TestResult { // check total issuance of the faucet assert_eq!( - native_faucet - .storage() - .get_item(AccountStorage::faucet_metadata_slot()) - .unwrap()[3], + native_faucet.storage().get_item(AccountStorage::faucet_sysdata_slot()).unwrap()[3], Felt::new(999_777), "Issuance mismatch" ); diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 0605a0310d..c9225d1476 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -24,17 +24,9 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_objects::account::{AccountHeader, AccountId, StorageSlot, StorageSlotContent}; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::block::nullifier_tree::NullifierTree; -use miden_objects::block::{ - AccountWitness, - BlockHeader, - BlockInputs, - BlockNumber, - Blockchain, - NullifierWitness, - ProvenBlock, -}; +use miden_objects::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; +use miden_objects::block::nullifier_tree::{NullifierTree, NullifierWitness}; +use miden_objects::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; use miden_objects::crypto::merkle::{ Forest, LargeSmt, diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 8f50ef4934..0afc266de5 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -23,6 +23,8 @@ miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } miden-objects = { workspace = true } +miden-tx = { workspace = true } +thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } tonic = { default-features = true, features = ["transport"], workspace = true } diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index d467b33fb5..065098d8ab 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,4 +1,5 @@ mod server; +mod tx_validation; pub use server::Validator; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index cd4efe483e..5e950be673 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -6,16 +6,20 @@ use miden_lib::block::build_block; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; +use miden_node_utils::ErrorReport; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::grpc::grpc_trace_fn; use miden_objects::block::{BlockSigner, ProposedBlock}; +use miden_objects::transaction::{ProvenTransaction, TransactionInputs}; use miden_objects::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; +use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; use crate::COMPONENT; +use crate::tx_validation::validate_transaction; // VALIDATOR // ================================================================================ @@ -101,9 +105,28 @@ impl api_server::Api for ValidatorServer /// Receives a proven transaction, then validates and stores it. async fn submit_proven_transaction( &self, - _request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { - // TODO(sergerad): Implement transaction validation logic. + let request = request.into_inner(); + // Deserialize the transaction. + let proven_tx = + ProvenTransaction::read_from_bytes(&request.transaction).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid proven transaction")) + })?; + + // Deserialize the transaction inputs. + let Some(tx_inputs) = request.transaction_inputs else { + return Err(Status::invalid_argument("Missing transaction inputs")); + }; + let tx_inputs = TransactionInputs::read_from_bytes(&tx_inputs).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) + })?; + + // Validate the transaction. + validate_transaction(proven_tx, tx_inputs).await.map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction")) + })?; + Ok(tonic::Response::new(())) } diff --git a/crates/rpc/src/server/validator.rs b/crates/validator/src/tx_validation/data_store.rs similarity index 79% rename from crates/rpc/src/server/validator.rs rename to crates/validator/src/tx_validation/data_store.rs index 5a0b077e8d..89685aa2a3 100644 --- a/crates/rpc/src/server/validator.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -7,47 +7,21 @@ use miden_objects::account::{AccountId, PartialAccount, StorageMapWitness}; use miden_objects::asset::{AssetVaultKey, AssetWitness}; use miden_objects::block::{BlockHeader, BlockNumber}; use miden_objects::note::NoteScript; -use miden_objects::transaction::{ - AccountInputs, - ExecutedTransaction, - PartialBlockchain, - TransactionInputs, -}; +use miden_objects::transaction::{AccountInputs, PartialBlockchain, TransactionInputs}; use miden_objects::vm::FutureMaybeSend; -use miden_tx::auth::UnreachableAuth; -use miden_tx::{ - DataStore, - DataStoreError, - MastForestStore, - TransactionExecutor, - TransactionExecutorError, - TransactionMastStore, -}; +use miden_tx::{DataStore, DataStoreError, MastForestStore, TransactionMastStore}; -/// Executes a transaction using the provided transaction inputs. -pub async fn re_execute_transaction( - tx_inputs: TransactionInputs, -) -> Result { - // Create a DataStore from the transaction inputs. - let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); - - // Execute the transaction. - let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); - let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = - TransactionExecutor::new(&data_store); - executor - .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) - .await -} +// TRANSACTION INPUTS DATA STORE +// ================================================================================================ /// A [`DataStore`] implementation that wraps [`TransactionInputs`] -struct TransactionInputsDataStore { +pub struct TransactionInputsDataStore { tx_inputs: TransactionInputs, mast_store: TransactionMastStore, } impl TransactionInputsDataStore { - fn new(tx_inputs: TransactionInputs) -> Self { + pub fn new(tx_inputs: TransactionInputs) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(tx_inputs.account().code()); Self { tx_inputs, mast_store } diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs new file mode 100644 index 0000000000..188fe2f6b8 --- /dev/null +++ b/crates/validator/src/tx_validation/mod.rs @@ -0,0 +1,60 @@ +mod data_store; + +pub use data_store::TransactionInputsDataStore; +use miden_objects::MIN_PROOF_SECURITY_LEVEL; +use miden_objects::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; +use miden_tx::auth::UnreachableAuth; +use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; + +// TRANSACTION VALIDATION ERROR +// ================================================================================================ + +#[derive(thiserror::Error, Debug)] +pub enum TransactionValidationError { + #[error("failed to re-executed the transaction")] + ExecutionError(#[from] TransactionExecutorError), + #[error("re-executed transaction did not match the provided proven transaction")] + Mismatch { + proven_tx_header: Box, + executed_tx_header: Box, + }, + #[error("transaction proof verification failed")] + ProofVerificationFailed(#[from] miden_tx::TransactionVerifierError), +} + +// TRANSACTION VALIDATION +// ================================================================================================ + +/// Validates a transaction by verifying its proof, executing it and comparing its header with the +/// provided proven transaction. +pub async fn validate_transaction( + proven_tx: ProvenTransaction, + tx_inputs: TransactionInputs, +) -> Result<(), TransactionValidationError> { + // First, verify the transaction proof + let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); + tx_verifier.verify(&proven_tx)?; + + // Create a DataStore from the transaction inputs. + let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); + + // Execute the transaction. + let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); + let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = + TransactionExecutor::new(&data_store); + let executed_tx = executor + .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) + .await?; + + // Validate that the executed transaction matches the submitted transaction. + let executed_tx_header: TransactionHeader = (&executed_tx).into(); + let proven_tx_header: TransactionHeader = (&proven_tx).into(); + if executed_tx_header == proven_tx_header { + Ok(()) + } else { + Err(TransactionValidationError::Mismatch { + proven_tx_header: proven_tx_header.into(), + executed_tx_header: executed_tx_header.into(), + }) + } +} From 4e27e155dae31f63f31761cd47b837f44db5487c Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 17 Dec 2025 12:35:59 -0300 Subject: [PATCH 055/125] chore: improve tracing in network monitor (#1366) --- CHANGELOG.md | 1 + bin/network-monitor/src/commands/start.rs | 20 ++++++- bin/network-monitor/src/counter.rs | 30 +++++++++- bin/network-monitor/src/faucet.rs | 28 +++++++++- bin/network-monitor/src/monitor/tasks.rs | 55 ++++++++++++++++-- bin/network-monitor/src/remote_prover.rs | 35 +++++++++++- bin/network-monitor/src/status.rs | 68 +++++++++++++++++------ 7 files changed, 205 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6be32f2bf3..f2289458b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). - The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). - Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). +- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). - Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). ### Changes diff --git a/bin/network-monitor/src/commands/start.rs b/bin/network-monitor/src/commands/start.rs index 3f1cbca6b2..461d3793f2 100644 --- a/bin/network-monitor/src/commands/start.rs +++ b/bin/network-monitor/src/commands/start.rs @@ -4,7 +4,7 @@ use anyhow::Result; use miden_node_utils::logging::OpenTelemetry; -use tracing::{info, instrument, warn}; +use tracing::{debug, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; @@ -15,7 +15,16 @@ use crate::monitor::tasks::Tasks; /// /// This function initializes all monitoring tasks including RPC status checking, /// remote prover testing, faucet testing, and the web frontend. -#[instrument(target = COMPONENT, name = "start-monitor", skip_all, fields(port = %config.port))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.start_monitor", + skip_all, + level = "info", + fields(port = %config.port), + ret(level = "debug"), + err +)] pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Load configuration from command-line arguments and environment variables info!("Loaded configuration: {:?}", config); @@ -29,17 +38,21 @@ pub async fn start_monitor(config: MonitorConfig) -> Result<()> { let mut tasks = Tasks::new(); // Initialize the RPC Status endpoint checker task. + debug!(target: COMPONENT, "Initializing RPC status checker"); let rpc_rx = tasks.spawn_rpc_checker(&config).await?; // Initialize the prover checkers & tests tasks, only if URLs were provided. let prover_rxs = if config.remote_prover_urls.is_empty() { + debug!(target: COMPONENT, "No remote prover URLs configured, skipping prover tasks"); Vec::new() } else { + debug!(target: COMPONENT, "Initializing prover checkers and tests"); tasks.spawn_prover_tasks(&config).await? }; // Initialize the faucet testing task. let faucet_rx = if config.faucet_url.is_some() { + debug!(target: COMPONENT, "Initializing faucet testing task"); Some(tasks.spawn_faucet(&config)) } else { warn!("Faucet URL not configured, skipping faucet testing"); @@ -48,13 +61,16 @@ pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Initialize the counter increment and tracking tasks only if enabled. let (ntx_increment_rx, ntx_tracking_rx) = if config.disable_ntx_service { + debug!(target: COMPONENT, "NTX service disabled, skipping counter increment task"); (None, None) } else { + debug!(target: COMPONENT, "Initializing counter increment task"); let (increment_rx, tracking_rx) = tasks.spawn_ntx_service(&config).await?; (Some(increment_rx), Some(tracking_rx)) }; // Initialize HTTP server. + debug!(target: COMPONENT, "Initializing HTTP server"); let server_state = ServerState { rpc: rpc_rx, provers: prover_rxs, diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index af647a7957..c6b4b5c003 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -195,7 +195,15 @@ async fn setup_increment_task( /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument(target = COMPONENT, name = "run-increment-task", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.run_increment_task", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn run_increment_task( config: MonitorConfig, tx: watch::Sender, @@ -332,7 +340,15 @@ fn send_status(tx: &watch::Sender, status: ServiceStatus) -> Resu /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument(target = COMPONENT, name = "run-counter-tracking-task", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.run_counter_tracking_task", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn run_counter_tracking_task( config: MonitorConfig, tx: watch::Sender, @@ -456,7 +472,15 @@ fn load_counter_account(file_path: &Path) -> Result { /// Create and submit a network note that targets the counter account. #[allow(clippy::too_many_arguments)] -#[instrument(target = COMPONENT, name = "create-and-submit-network-note", skip_all, ret)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.create_and_submit_network_note", + skip_all, + level = "info", + ret(level = "debug"), + err +)] async fn create_and_submit_network_note( wallet_account: &Account, counter_account: &Account, diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 5cc0944b21..6f9d5b82b1 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -88,7 +88,14 @@ pub struct GetMetadataResponse { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "faucet-test-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.run_faucet_test_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_faucet_test_task( faucet_url: Url, status_sender: watch::Sender, @@ -167,6 +174,15 @@ pub async fn run_faucet_test_task( /// # Returns /// /// The response from the faucet if successful, or an error if the test fails. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.perform_faucet_test", + skip_all, + level = "info", + ret(level = "debug"), + err +)] async fn perform_faucet_test( client: &Client, faucet_url: &Url, @@ -248,7 +264,15 @@ async fn perform_faucet_test( /// /// The nonce that solves the challenge, or an error if no solution is found within reasonable /// bounds. -#[instrument(target = COMPONENT, name = "solve-pow-challenge", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.solve_pow_challenge", + skip_all, + level = "info", + ret(level = "debug"), + err +)] fn solve_pow_challenge(challenge: &str, target: u64) -> anyhow::Result { let challenge_bytes = hex::decode(challenge).context("Failed to decode challenge from hex")?; diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 6c8063a860..327a213b53 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -48,11 +48,21 @@ impl Tasks { } /// Spawn the RPC status checker task. - #[instrument(target = COMPONENT, name = "tasks.spawn-rpc-checker", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_rpc_checker", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_rpc_checker( &mut self, config: &MonitorConfig, ) -> Result> { + debug!(target: COMPONENT, rpc_url = %config.rpc_url, "Spawning RPC status checker task"); + // Create initial status for RPC service let mut rpc = ClientBuilder::new(config.rpc_url.clone()) .with_tls() @@ -79,15 +89,25 @@ impl Tasks { .id(); self.names.insert(id, "rpc-checker".to_string()); + debug!(target: COMPONENT, "RPC status checker task spawned successfully"); Ok(rpc_rx) } /// Spawn prover status and test tasks for all configured provers. - #[instrument(target = COMPONENT, name = "tasks.spawn-prover-tasks", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_prover_tasks", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_prover_tasks( &mut self, config: &MonitorConfig, ) -> Result, watch::Receiver)>> { + debug!(target: COMPONENT, prover_count = config.remote_prover_urls.len(), "Spawning prover tasks"); let mut prover_rxs = Vec::new(); for (i, prover_url) in config.remote_prover_urls.iter().enumerate() { @@ -195,11 +215,19 @@ impl Tasks { prover_rxs.push((prover_status_rx, prover_test_rx)); } + debug!(target: COMPONENT, spawned_provers = prover_rxs.len(), "All prover tasks spawned successfully"); Ok(prover_rxs) } /// Spawn the faucet testing task. - #[instrument(target = COMPONENT, name = "tasks.spawn-faucet", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_faucet", + skip_all, + level = "info", + ret(level = "debug") + )] pub fn spawn_faucet(&mut self, config: &MonitorConfig) -> Receiver { let current_time = current_unix_timestamp_secs(); @@ -236,8 +264,16 @@ impl Tasks { faucet_rx } - /// Spawn the network transaction service checker tasks (increment and tracking). - #[instrument(target = COMPONENT, name = "tasks.spawn-ntx-service", skip_all)] + /// Spawn the network transaction service checker task. + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_ntx_service", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_ntx_service( &mut self, config: &MonitorConfig, @@ -312,7 +348,14 @@ impl Tasks { } /// Spawn the HTTP frontend server. - #[instrument(target = COMPONENT, name = "tasks.spawn-frontend", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_http_server", + skip_all, + level = "info", + ret(level = "debug") + )] pub fn spawn_http_server(&mut self, server_state: ServerState, config: &MonitorConfig) { let config = config.clone(); let id = self.handles.spawn(async move { serve(server_state, config).await }).id(); diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index a9cee796ec..7ba5043078 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -87,7 +87,14 @@ pub struct ProverTestDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "remote-prover-test-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.run_remote_prover_test_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_remote_prover_test_task( prover_url: Url, name: &str, @@ -154,7 +161,14 @@ pub async fn run_remote_prover_test_task( /// # Returns /// /// A `ServiceStatus` containing the results of the proof test. -#[instrument(target = COMPONENT, name = "test-remote-prover", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.test_remote_prover", + skip_all, + level = "info", + ret(level = "debug") +)] async fn test_remote_prover( client: &mut miden_node_proto::clients::RemoteProverClient, name: &str, @@ -257,6 +271,15 @@ fn tonic_status_to_json(status: &tonic::Status) -> String { /// This function creates a mock transaction using `MockChainBuilder` similar to what's done /// in the remote prover tests. The transaction is generated once and can be reused for /// multiple proof test calls. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.generate_mock_transaction", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn generate_mock_transaction() -> anyhow::Result { let mut mock_chain_builder = MockChainBuilder::new(); @@ -304,6 +327,14 @@ pub async fn generate_mock_transaction() -> anyhow::Result { // GENERATE TEST REQUEST PAYLOAD // ================================================================================================ +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.generate_prover_test_payload", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn generate_prover_test_payload() -> proto::remote_prover::ProofRequest { proto::remote_prover::ProofRequest { proof_type: proto::remote_prover::ProofType::Transaction.into(), diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 1c4b6e326b..14ab47859d 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -15,7 +15,7 @@ use miden_node_proto::generated::rpc::{BlockProducerStatus, RpcStatus, StoreStat use serde::{Deserialize, Serialize}; use tokio::sync::watch; use tokio::time::MissedTickBehavior; -use tracing::{info, instrument}; +use tracing::{debug, info, instrument}; use url::Url; use crate::faucet::FaucetTestDetails; @@ -280,7 +280,14 @@ impl From for RpcStatusDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "rpc-status-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.run_rpc_status_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_rpc_status_task( rpc_url: Url, status_sender: watch::Sender, @@ -326,7 +333,14 @@ pub async fn run_rpc_status_task( /// # Returns /// /// A `ServiceStatus` containing the status of the RPC service. -#[instrument(target = COMPONENT, name = "check-status.rpc", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.check_rpc_status", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn check_rpc_status( rpc: &mut miden_node_proto::clients::RpcClient, current_time: u64, @@ -343,12 +357,15 @@ pub(crate) async fn check_rpc_status( details: ServiceDetails::RpcStatus(status.into()), } }, - Err(e) => ServiceStatus { - name: "RPC".to_string(), - status: Status::Unhealthy, - last_checked: current_time, - error: Some(e.to_string()), - details: ServiceDetails::Error, + Err(e) => { + debug!(target: COMPONENT, error = %e, "RPC status check failed"); + ServiceStatus { + name: "RPC".to_string(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(e.to_string()), + details: ServiceDetails::Error, + } }, } } @@ -372,7 +389,14 @@ pub(crate) async fn check_rpc_status( /// /// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are /// connection issues or failures while checking the remote prover status. -#[instrument(target = COMPONENT, name = "remote-prover-status-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.run_remote_prover_status_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_remote_prover_status_task( prover_url: Url, name: String, @@ -428,7 +452,14 @@ pub async fn run_remote_prover_status_task( /// # Returns /// /// A `ServiceStatus` containing the status of the remote prover service. -#[instrument(target = COMPONENT, name = "check-status.remote-prover", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.check_remote_prover_status", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn check_remote_prover_status( remote_prover: &mut miden_node_proto::clients::RemoteProverProxyStatusClient, display_name: String, @@ -459,12 +490,15 @@ pub(crate) async fn check_remote_prover_status( details: ServiceDetails::RemoteProverStatus(remote_prover_details), } }, - Err(e) => ServiceStatus { - name: display_name, - status: Status::Unhealthy, - last_checked: current_time, - error: Some(e.to_string()), - details: ServiceDetails::Error, + Err(e) => { + debug!(target: COMPONENT, prover_name = %display_name, error = %e, "Remote prover status check failed"); + ServiceStatus { + name: display_name, + status: Status::Unhealthy, + last_checked: current_time, + error: Some(e.to_string()), + details: ServiceDetails::Error, + } }, } } From a03124eea43b91da7c2580d8352f18efc06569d9 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 17 Dec 2025 12:51:14 -0300 Subject: [PATCH 056/125] chore: unify and revisit query limits (#1443) * chore: unify and revisit query limits * review: remove TooMany* error variants * review: update limiter doc & rename MAX_RESPONSE_PAYLOAD_BYTES * review: fix changelog entry * review: create GENERAL_REQUEST_LIMIT const --- CHANGELOG.md | 1 + crates/store/src/constants.rs | 16 ------- .../store/src/db/models/queries/accounts.rs | 11 +++-- .../store/src/db/models/queries/nullifiers.rs | 4 +- .../src/db/models/queries/transactions.rs | 14 ++++-- crates/store/src/errors.rs | 8 ---- crates/store/src/lib.rs | 1 - crates/store/src/server/rpc_api.rs | 47 ++++++++++--------- crates/utils/src/limiter.rs | 37 +++++++++------ 9 files changed, 67 insertions(+), 72 deletions(-) delete mode 100644 crates/store/src/constants.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index f2289458b3..85f7f989ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). - [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). +- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). - [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). - Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). diff --git a/crates/store/src/constants.rs b/crates/store/src/constants.rs deleted file mode 100644 index ba2dc620fd..0000000000 --- a/crates/store/src/constants.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Constants used for pagination and size limits across the store. - -/// Maximum number of account IDs that can be requested in a single query. -pub const MAX_ACCOUNT_IDS: usize = 100; - -/// Maximum number of nullifiers that can be requested in a single query. -pub const MAX_NULLIFIERS: usize = 100; - -/// Maximum number of note tags that can be requested in a single query. -pub const MAX_NOTE_TAGS: usize = 100; - -/// Maximum number of note IDs that can be requested in a single query. -pub const MAX_NOTE_IDS: usize = 100; - -/// Maximum payload size for all paginated endpoints (4 MB). -pub const MAX_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index fe51973596..32bd4dbd92 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -20,7 +20,11 @@ use diesel::{ use miden_lib::utils::{Deserializable, Serializable}; use miden_node_proto as proto; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; -use miden_node_utils::limiter::{QueryParamAccountIdLimit, QueryParamLimiter}; +use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, + QueryParamAccountIdLimit, + QueryParamLimiter, +}; use miden_objects::account::delta::AccountUpdateDetails; use miden_objects::account::{ Account, @@ -36,7 +40,6 @@ use miden_objects::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_objects::block::{BlockAccountUpdate, BlockNumber}; use miden_objects::{Felt, Word}; -use crate::constants::MAX_PAYLOAD_BYTES; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; @@ -271,7 +274,7 @@ pub(crate) fn select_account_vault_assets( // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::(); // key + asset + block_num - const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); @@ -511,7 +514,7 @@ pub(crate) fn select_account_storage_map_values( // See miden-base/issues/1770 for more details pub const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index 95e77ab46f..55e402bc8d 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -13,6 +13,7 @@ use diesel::{ }; use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, QueryParamLimiter, QueryParamNullifierLimit, QueryParamNullifierPrefixLimit, @@ -21,7 +22,6 @@ use miden_objects::block::BlockNumber; use miden_objects::note::Nullifier; use super::DatabaseError; -use crate::constants::MAX_PAYLOAD_BYTES; use crate::db::models::conv::{SqlTypeConvert, nullifier_prefix_to_raw_sql}; use crate::db::models::utils::{get_nullifier_prefix, vec_raw_try_into}; use crate::db::{NullifierInfo, schema}; @@ -69,7 +69,7 @@ pub(crate) fn select_nullifiers_by_prefix( pub const NULLIFIER_BYTES: usize = 32; // digest size (nullifier) pub const BLOCK_NUM_BYTES: usize = 4; // 32 bits per block number pub const ROW_OVERHEAD_BYTES: usize = NULLIFIER_BYTES + BLOCK_NUM_BYTES; // 36 bytes - pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; assert_eq!(prefix_len, 16, "Only 16-bit prefixes are supported"); diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 7b5caf5e73..cd0b7f5081 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -13,7 +13,11 @@ use diesel::{ SqliteConnection, }; use miden_lib::utils::Deserializable; -use miden_node_utils::limiter::{QueryParamAccountIdLimit, QueryParamLimiter}; +use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, + QueryParamAccountIdLimit, + QueryParamLimiter, +}; use miden_objects::account::AccountId; use miden_objects::block::BlockNumber; use miden_objects::note::{NoteId, Nullifier}; @@ -280,11 +284,13 @@ pub fn select_transactions_records( account_ids: &[AccountId], block_range: RangeInclusive, ) -> Result<(BlockNumber, Vec), DatabaseError> { - const MAX_PAYLOAD_BYTES: i64 = 4 * 1024 * 1024; // 4 MB const NUM_TXS_PER_CHUNK: i64 = 1000; // Read 1000 transactions at a time QueryParamAccountIdLimit::check(account_ids.len())?; + let max_payload_bytes = + i64::try_from(MAX_RESPONSE_PAYLOAD_BYTES).expect("payload limit fits within i64"); + if block_range.is_empty() { return Err(DatabaseError::InvalidBlockRange { from: *block_range.start(), @@ -334,7 +340,7 @@ pub fn select_transactions_records( let mut last_added_tx: Option = None; for tx in chunk { - if total_size + tx.size_in_bytes <= MAX_PAYLOAD_BYTES { + if total_size + tx.size_in_bytes <= max_payload_bytes { total_size += tx.size_in_bytes; last_added_tx = Some(tx); added_from_chunk += 1; @@ -359,7 +365,7 @@ pub fn select_transactions_records( // Ensure block consistency: remove the last block if it's incomplete // (we may have stopped loading mid-block due to size constraints) - if total_size >= MAX_PAYLOAD_BYTES { + if total_size >= max_payload_bytes { // SAFETY: We're guaranteed to have at least one transaction since total_size > 0 let last_block_num = last_block_num.expect( "guaranteed to have processed at least one transaction when size limit is reached", diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 88a5583b0b..befc934a6a 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -339,8 +339,6 @@ pub enum NoteSyncError { MmrError(#[from] MmrError), #[error("invalid block range")] InvalidBlockRange(#[from] InvalidBlockRange), - #[error("too many note tags: received {0}, max {1}")] - TooManyNoteTags(usize, usize), #[error("malformed note tags")] DeserializationFailed(#[from] ConversionError), } @@ -450,8 +448,6 @@ pub enum GetNotesByIdError { DeserializationFailed(#[from] ConversionError), #[error("note {0} not found")] NoteNotFound(miden_objects::note::NoteId), - #[error("too many note IDs: received {0}, max {1}")] - TooManyNoteIds(usize, usize), #[error("note {0} is not public")] NoteNotPublic(miden_objects::note::NoteId), } @@ -480,8 +476,6 @@ pub enum CheckNullifiersError { DatabaseError(#[from] DatabaseError), #[error("malformed nullifier")] DeserializationFailed(#[from] ConversionError), - #[error("too many nullifiers: received {0}, maximum {1}")] - TooManyNullifiers(usize, usize), } // SYNC TRANSACTIONS ERRORS @@ -498,8 +492,6 @@ pub enum SyncTransactionsError { DeserializationFailed(#[from] ConversionError), #[error("account {0} not found")] AccountNotFound(AccountId), - #[error("too many account IDs: received {0}, max {1}")] - TooManyAccountIds(usize, usize), } // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index a9974fa7ca..ce49564703 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -2,7 +2,6 @@ use std::time::Duration; mod accounts; mod blocks; -mod constants; mod db; mod errors; pub mod genesis; diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 02a5725188..748cd07703 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -2,6 +2,13 @@ use miden_node_proto::convert; use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; +use miden_node_utils::limiter::{ + QueryParamAccountIdLimit, + QueryParamLimiter, + QueryParamNoteIdLimit, + QueryParamNoteTagLimit, + QueryParamNullifierLimit, +}; use miden_objects::Word; use miden_objects::account::AccountId; use miden_objects::note::NoteId; @@ -9,7 +16,6 @@ use tonic::{Request, Response, Status}; use tracing::{debug, info, instrument}; use crate::COMPONENT; -use crate::constants::{MAX_ACCOUNT_IDS, MAX_NOTE_IDS, MAX_NOTE_TAGS, MAX_NULLIFIERS}; use crate::errors::{ CheckNullifiersError, GetBlockByNumberError, @@ -77,13 +83,7 @@ impl rpc_server::Rpc for StoreApi { let request = request.into_inner(); // Validate nullifiers count - if request.nullifiers.len() > MAX_NULLIFIERS { - return Err(CheckNullifiersError::TooManyNullifiers( - request.nullifiers.len(), - MAX_NULLIFIERS, - ) - .into()); - } + check::(request.nullifiers.len())?; let nullifiers = validate_nullifiers::(&request.nullifiers)?; @@ -224,11 +224,7 @@ impl rpc_server::Rpc for StoreApi { .into_inclusive_range::(&chain_tip)?; // Validate note tags count - if request.note_tags.len() > MAX_NOTE_TAGS { - return Err( - NoteSyncError::TooManyNoteTags(request.note_tags.len(), MAX_NOTE_TAGS).into() - ); - } + check::(request.note_tags.len())?; let (state, mmr_proof, last_block_included) = self.state.sync_notes(request.note_tags, block_range).await?; @@ -268,9 +264,7 @@ impl rpc_server::Rpc for StoreApi { let note_ids = request.into_inner().ids; // Validate note IDs count - if note_ids.len() > MAX_NOTE_IDS { - return Err(GetNotesByIdError::TooManyNoteIds(note_ids.len(), MAX_NOTE_IDS).into()); - } + check::(note_ids.len())?; let note_ids: Vec = convert_digests_to_words::(note_ids)?; @@ -545,13 +539,7 @@ impl rpc_server::Rpc for StoreApi { read_account_ids::(&request.account_ids)?; // Validate account IDs count - if account_ids.len() > MAX_ACCOUNT_IDS { - return Err(SyncTransactionsError::TooManyAccountIds( - account_ids.len(), - MAX_ACCOUNT_IDS, - ) - .into()); - } + check::(account_ids.len())?; let (last_block_included, transaction_records_db) = self .state @@ -604,3 +592,16 @@ impl rpc_server::Rpc for StoreApi { })) } } + +// LIMIT HELPERS +// ================================================================================================ + +/// Formats an "Out of range" error +fn out_of_range_error(err: E) -> Status { + Status::out_of_range(err.to_string()) +} + +/// Check, but don't repeat ourselves mapping the error +fn check(n: usize) -> Result<(), Status> { + ::check(n).map_err(out_of_range_error) +} diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index d024998417..cf43407174 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -1,12 +1,17 @@ -//! Limit the size of a parameter list for a specific parameter +//! Limits for RPC and store parameters and payload sizes. //! -//! Used for: -//! 1. the external facing RPC -//! 2. limiting SQL statements not exceeding parameter limits +//! # Rationale +//! - Parameter limits are kept at [`GENERAL_REQUEST_LIMIT`] items across all multi-value RPC +//! parameters. This caps worst-case SQL `IN` clauses and keeps responses comfortably under the 4 +//! MiB payload budget enforced in the store. +//! - Limits are enforced both at the RPC boundary and inside the store to prevent bypasses and to +//! avoid expensive queries even if validation is skipped earlier in the stack. +//! - `MAX_PAGINATED_PAYLOAD_BYTES` is set to 4 MiB (e.g. 1000 nullifier rows at ~36 B each, 1000 +//! transactions summaries streamed in chunks). //! -//! The 1st is good to terminate invalid requests as early as possible, -//! where the second is both a fallback and a safeguard not benching -//! pointless parameter combinations. +//! Add new limits here so callers share the same values and rationale. + +const GENERAL_REQUEST_LIMIT: usize = 1000; #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] @@ -37,12 +42,16 @@ pub trait QueryParamLimiter { } } +/// Maximum payload size (in bytes) for paginated responses returned by the +/// store. +pub const MAX_RESPONSE_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; + /// Used for the following RPC endpoints /// * `state_sync` pub struct QueryParamAccountIdLimit; impl QueryParamLimiter for QueryParamAccountIdLimit { const PARAM_NAME: &str = "account_id"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints @@ -50,7 +59,7 @@ impl QueryParamLimiter for QueryParamAccountIdLimit { pub struct QueryParamNullifierPrefixLimit; impl QueryParamLimiter for QueryParamNullifierPrefixLimit { const PARAM_NAME: &str = "nullifier_prefix"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints @@ -60,7 +69,7 @@ impl QueryParamLimiter for QueryParamNullifierPrefixLimit { pub struct QueryParamNullifierLimit; impl QueryParamLimiter for QueryParamNullifierLimit { const PARAM_NAME: &str = "nullifier"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints @@ -68,7 +77,7 @@ impl QueryParamLimiter for QueryParamNullifierLimit { pub struct QueryParamNoteTagLimit; impl QueryParamLimiter for QueryParamNoteTagLimit { const PARAM_NAME: &str = "note_tag"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints @@ -76,19 +85,19 @@ impl QueryParamLimiter for QueryParamNoteTagLimit { pub struct QueryParamNoteIdLimit; impl QueryParamLimiter for QueryParamNoteIdLimit { const PARAM_NAME: &str = "note_id"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for internal queries retrieving note inclusion proofs by commitment. pub struct QueryParamNoteCommitmentLimit; impl QueryParamLimiter for QueryParamNoteCommitmentLimit { const PARAM_NAME: &str = "note_commitment"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Only used internally, not exposed via public RPC. pub struct QueryParamBlockLimit; impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } From 41fa46e2acaa8f33ba43b9fd65a03f6d555b5146 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:29:09 -0300 Subject: [PATCH 057/125] feat: track ntx latency (#1430) * feat: track ntx latency * review: fix changelog entries * review: reduce lock risk & improve docs * review: remove pending_latency field --- CHANGELOG.md | 1 + bin/network-monitor/.env | 1 + bin/network-monitor/README.md | 3 + bin/network-monitor/assets/index.html | 6 + bin/network-monitor/src/config.rs | 10 + bin/network-monitor/src/counter.rs | 260 +++++++++++++++++------ bin/network-monitor/src/monitor/tasks.rs | 30 ++- bin/network-monitor/src/status.rs | 11 + 8 files changed, 254 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 85f7f989ef..ea5f192d7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). +- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). - [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). - Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). - [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). diff --git a/bin/network-monitor/.env b/bin/network-monitor/.env index c5779257db..9dfb9d7494 100644 --- a/bin/network-monitor/.env +++ b/bin/network-monitor/.env @@ -16,3 +16,4 @@ MIDEN_MONITOR_DISABLE_NTX_SERVICE=false MIDEN_MONITOR_COUNTER_FILEPATH=counter_account.mac MIDEN_MONITOR_WALLET_FILEPATH=wallet_account.mac MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL=30s +MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT=2m diff --git a/bin/network-monitor/README.md b/bin/network-monitor/README.md index abde03a7e9..c9245996d6 100644 --- a/bin/network-monitor/README.md +++ b/bin/network-monitor/README.md @@ -40,6 +40,7 @@ miden-network-monitor start --faucet-url http://localhost:8080 --enable-otel - `--wallet-filepath`: Path where the wallet account is located (default: `wallet_account.mac`) - `--counter-filepath`: Path where the network account is located (default: `counter_program.mac`) - `--counter-increment-interval`: Interval at which to send the increment counter transaction (default: `30s`) +- `--counter-latency-timeout`: Maximum time to wait for a counter update after submitting a transaction (default: `2m`) - `--help, -h`: Show help information - `--version, -V`: Show version information @@ -60,6 +61,7 @@ If command-line arguments are not provided, the application falls back to enviro - `MIDEN_MONITOR_WALLET_FILEPATH`: Path where the wallet account is located - `MIDEN_MONITOR_COUNTER_FILEPATH`: Path where the network account is located - `MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL`: Interval at which to send the increment counter transaction +- `MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT`: Maximum time to wait for a counter update after submitting a transaction ## Commands @@ -180,6 +182,7 @@ The monitor application provides real-time status monitoring for the following M - **Metrics**: - Success/Failure counts for increment transactions - Last TX ID with copy-to-clipboard + - Latency in blocks from submission to observed counter update (with pending measurement tracking) ### Network Transactions (Counter Tracking) - **Service Health**: Real-time monitoring of on-chain counter value changes diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index f3fd32bc4d..9075c48f7e 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -334,6 +334,12 @@ Success Rate: ${formatSuccessRate(details.NtxIncrement.success_count, details.NtxIncrement.failure_count)}
+ ${details.NtxIncrement.last_latency_blocks !== null && details.NtxIncrement.last_latency_blocks !== undefined ? ` +
+ Latency: + ${details.NtxIncrement.last_latency_blocks} blocks +
+ ` : ''} ${details.NtxIncrement.last_tx_id ? `
Last TX ID: diff --git a/bin/network-monitor/src/config.rs b/bin/network-monitor/src/config.rs index fa2af59e25..767fbf8364 100644 --- a/bin/network-monitor/src/config.rs +++ b/bin/network-monitor/src/config.rs @@ -138,6 +138,16 @@ pub struct MonitorConfig { )] pub counter_increment_interval: Duration, + /// Maximum time to wait for the counter update after submitting a transaction. + #[arg( + long = "counter-latency-timeout", + env = "MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT", + default_value = "2m", + value_parser = humantime::parse_duration, + help = "Maximum time to wait for a counter update after submitting a transaction" + )] + pub counter_latency_timeout: Duration, + /// The timeout for the outgoing requests. #[arg( long = "request-timeout", diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c6b4b5c003..d7bfdb6ee6 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -6,6 +6,7 @@ use std::path::Path; use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::Instant; use anyhow::{Context, Result}; use miden_lib::AuthScheme; @@ -38,7 +39,7 @@ use miden_tx::utils::Serializable; use miden_tx::{LocalTransactionProver, TransactionExecutor}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; -use tokio::sync::watch; +use tokio::sync::{Mutex, watch}; use tracing::{error, info, instrument, warn}; use crate::COMPONENT; @@ -47,11 +48,19 @@ use crate::deploy::{MonitorDataStore, create_genesis_aware_rpc_client, get_count use crate::status::{ CounterTrackingDetails, IncrementDetails, + PendingLatencyDetails, ServiceDetails, ServiceStatus, Status, }; +#[derive(Debug, Default, Clone)] +pub struct LatencyState { + pending: Option, + pending_started: Option, + last_latency_blocks: Option, +} + /// Get the genesis block header. async fn get_genesis_block_header(rpc_client: &mut RpcClient) -> Result { let block_header_request = BlockHeaderByNumberRequest { @@ -208,6 +217,7 @@ pub async fn run_increment_task( config: MonitorConfig, tx: watch::Sender, expected_counter_value: Arc, + latency_state: Arc>, ) -> Result<()> { // Create RPC client let mut rpc_client = @@ -229,7 +239,9 @@ pub async fn run_increment_task( loop { interval.tick().await; - let last_error = match create_and_submit_network_note( + let mut last_error = None; + + match create_and_submit_network_note( &wallet_account, &counter_account, &secret_key, @@ -241,16 +253,34 @@ pub async fn run_increment_task( ) .await { - Ok((tx_id, final_account, _block_height)) => handle_increment_success( - &mut wallet_account, - &final_account, - &mut data_store, - &mut details, - tx_id, - &expected_counter_value, - )?, - Err(e) => Some(handle_increment_failure(&mut details, &e)), - }; + Ok((tx_id, final_account, block_height)) => { + let target_value = handle_increment_success( + &mut wallet_account, + &final_account, + &mut data_store, + &mut details, + tx_id, + &expected_counter_value, + )?; + + { + let mut guard = latency_state.lock().await; + guard.pending = Some(PendingLatencyDetails { + submit_height: block_height.as_u32(), + target_value, + }); + guard.pending_started = Some(Instant::now()); + } + }, + Err(e) => { + last_error = Some(handle_increment_failure(&mut details, &e)); + }, + } + + { + let guard = latency_state.lock().await; + details.last_latency_blocks = guard.last_latency_blocks; + } let status = build_increment_status(&details, last_error); send_status(&tx, status)?; @@ -258,6 +288,8 @@ pub async fn run_increment_task( } /// Handle the success path for increment operations. +/// +/// Returns the next expected counter value after a successful increment. fn handle_increment_success( wallet_account: &mut Account, final_account: &AccountHeader, @@ -265,7 +297,7 @@ fn handle_increment_success( details: &mut IncrementDetails, tx_id: String, expected_counter_value: &Arc, -) -> Result> { +) -> Result { let updated_wallet = Account::new( wallet_account.id(), wallet_account.vault().clone(), @@ -281,9 +313,9 @@ fn handle_increment_success( details.last_tx_id = Some(tx_id); // Increment the expected counter value - expected_counter_value.fetch_add(1, Ordering::Relaxed); + let new_expected = expected_counter_value.fetch_add(1, Ordering::Relaxed) + 1; - Ok(None) + Ok(new_expected) } /// Handle the failure path when creating/submitting the network note fails. @@ -353,6 +385,7 @@ pub async fn run_counter_tracking_task( config: MonitorConfig, tx: watch::Sender, expected_counter_value: Arc, + latency_state: Arc>, ) -> Result<()> { // Create RPC client let mut rpc_client = @@ -368,11 +401,45 @@ pub async fn run_counter_tracking_task( }; let mut details = CounterTrackingDetails::default(); + initialize_counter_tracking_state( + &mut rpc_client, + &counter_account, + &expected_counter_value, + &mut details, + ) + .await; + + let mut poll_interval = tokio::time::interval(config.counter_increment_interval / 2); + + loop { + poll_interval.tick().await; + + let last_error = poll_counter_once( + &mut rpc_client, + &counter_account, + &expected_counter_value, + &latency_state, + &mut details, + &config, + ) + .await; + let status = build_tracking_status(&details, last_error); + send_status(&tx, status)?; + } +} - // Initialize the expected counter value by fetching the current value from the node - match fetch_counter_value(&mut rpc_client, counter_account.id()).await { +/// Initialize tracking state by fetching the current counter value from the node. +/// +/// Populates `expected_counter_value` and seeds `details` with the latest observed +/// values so the first poll iteration starts from a consistent snapshot. +async fn initialize_counter_tracking_state( + rpc_client: &mut RpcClient, + counter_account: &Account, + expected_counter_value: &Arc, + details: &mut CounterTrackingDetails, +) { + match fetch_counter_value(rpc_client, counter_account.id()).await { Ok(Some(initial_value)) => { - // Set the expected value to the current value from the node expected_counter_value.store(initial_value, Ordering::Relaxed); details.current_value = Some(initial_value); details.expected_value = Some(initial_value); @@ -380,61 +447,121 @@ pub async fn run_counter_tracking_task( info!("Initialized counter tracking with value: {}", initial_value); }, Ok(None) => { - // Counter doesn't exist yet, initialize to 0 expected_counter_value.store(0, Ordering::Relaxed); warn!("Counter account not found, initializing expected value to 0"); }, Err(e) => { - // Failed to fetch, initialize to 0 but log the error expected_counter_value.store(0, Ordering::Relaxed); error!("Failed to fetch initial counter value, initializing to 0: {:?}", e); }, } +} - let mut poll_interval = tokio::time::interval(config.counter_increment_interval / 2); +/// Poll the counter once, updating details and latency tracking state. +/// +/// Returns a human-readable error string when the poll fails or latency tracking +/// cannot complete; otherwise returns `None`. +async fn poll_counter_once( + rpc_client: &mut RpcClient, + counter_account: &Account, + expected_counter_value: &Arc, + latency_state: &Arc>, + details: &mut CounterTrackingDetails, + config: &MonitorConfig, +) -> Option { + let mut last_error = None; + let current_time = crate::monitor::tasks::current_unix_timestamp_secs(); + + match fetch_counter_value(rpc_client, counter_account.id()).await { + Ok(Some(value)) => { + details.current_value = Some(value); + details.last_updated = Some(current_time); + + update_expected_and_pending(details, expected_counter_value, value); + handle_latency_tracking(rpc_client, latency_state, config, value, &mut last_error) + .await; + }, + Ok(None) => { + // Counter value not available, but not an error + }, + Err(e) => { + error!("Failed to fetch counter value: {:?}", e); + last_error = Some(format!("fetch counter value failed: {e}")); + }, + } - loop { - poll_interval.tick().await; + last_error +} - let current_time = crate::monitor::tasks::current_unix_timestamp_secs(); - let last_error = match fetch_counter_value(&mut rpc_client, counter_account.id()).await { - Ok(Some(value)) => { - // Update current value and timestamp - details.current_value = Some(value); - details.last_updated = Some(current_time); - - // Get expected value and calculate pending increments - let expected = expected_counter_value.load(Ordering::Relaxed); - details.expected_value = Some(expected); - - // Calculate how many increments are pending (expected - current) - // Use saturating_sub to avoid negative values if current > expected (shouldn't - // happen normally, but could due to race conditions) - if expected >= value { - details.pending_increments = Some(expected - value); - } else { - // This shouldn't happen, but log it if it does - warn!( - "Expected counter value ({}) is less than current value ({}), setting pending to 0", - expected, value - ); - details.pending_increments = Some(0); - } +/// Update expected and pending counters based on the latest observed value. +fn update_expected_and_pending( + details: &mut CounterTrackingDetails, + expected_counter_value: &Arc, + observed_value: u64, +) { + let expected = expected_counter_value.load(Ordering::Relaxed); + details.expected_value = Some(expected); - None - }, - Ok(None) => { - // Counter value not available, but not an error - None - }, - Err(e) => { - error!("Failed to fetch counter value: {:?}", e); - Some(format!("fetch counter value failed: {e}")) - }, - }; + if expected >= observed_value { + details.pending_increments = Some(expected - observed_value); + } else { + warn!( + "Expected counter value ({}) is less than current value ({}), setting pending to 0", + expected, observed_value + ); + details.pending_increments = Some(0); + } +} - let status = build_tracking_status(&details, last_error); - send_status(&tx, status)?; +/// Update latency tracking state, performing RPC as needed while minimizing lock hold time. +/// +/// Populates `last_error` when latency bookkeeping fails or times out. +async fn handle_latency_tracking( + rpc_client: &mut RpcClient, + latency_state: &Arc>, + config: &MonitorConfig, + observed_value: u64, + last_error: &mut Option, +) { + let (pending, pending_started) = { + let guard = latency_state.lock().await; + (guard.pending.clone(), guard.pending_started) + }; + + if let Some(pending) = pending { + if observed_value >= pending.target_value { + match fetch_chain_tip(rpc_client).await { + Ok(observed_height) => { + let latency_blocks = observed_height.saturating_sub(pending.submit_height); + let mut guard = latency_state.lock().await; + if guard.pending.as_ref().map(|p| p.target_value) == Some(pending.target_value) + { + guard.last_latency_blocks = Some(latency_blocks); + guard.pending = None; + guard.pending_started = None; + } + }, + Err(e) => { + *last_error = Some(format!("Failed to fetch chain tip for latency calc: {e}")); + }, + } + } else if let Some(started) = pending_started { + if Instant::now().saturating_duration_since(started) >= config.counter_latency_timeout { + warn!( + "Latency measurement timed out after {:?} for target value {}", + config.counter_latency_timeout, pending.target_value + ); + let mut guard = latency_state.lock().await; + if guard.pending.as_ref().map(|p| p.target_value) == Some(pending.target_value) { + guard.pending = None; + guard.pending_started = None; + } + *last_error = Some(format!( + "Timed out after {:?} waiting for counter to reach {}", + config.counter_latency_timeout, pending.target_value + )); + } + } } } @@ -594,3 +721,16 @@ fn create_network_note( let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) } + +/// Fetch the current chain tip height from RPC status. +async fn fetch_chain_tip(rpc_client: &mut RpcClient) -> Result { + let status = rpc_client.status(()).await?.into_inner(); + + if let Some(block_producer_status) = status.block_producer { + Ok(block_producer_status.chain_tip) + } else if let Some(store_status) = status.store { + Ok(store_status.chain_tip) + } else { + anyhow::bail!("RPC status response did not include a chain tip") + } +} diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 327a213b53..4aa5addec9 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -11,14 +11,14 @@ use miden_node_proto::clients::{ RemoteProverProxyStatusClient, RpcClient, }; -use tokio::sync::watch; use tokio::sync::watch::Receiver; +use tokio::sync::{Mutex, watch}; use tokio::task::{Id, JoinSet}; use tracing::{debug, instrument}; use crate::COMPONENT; use crate::config::MonitorConfig; -use crate::counter::{run_counter_tracking_task, run_increment_task}; +use crate::counter::{LatencyState, run_counter_tracking_task, run_increment_task}; use crate::deploy::ensure_accounts_exist; use crate::faucet::run_faucet_test_task; use crate::frontend::{ServerState, serve}; @@ -286,6 +286,9 @@ impl Tasks { // Create shared atomic counter for tracking expected counter value let expected_counter_value = Arc::new(AtomicU64::new(0)); + let latency_state = Arc::new(Mutex::new(LatencyState::default())); + let latency_state_for_increment = latency_state.clone(); + let latency_state_for_tracking = latency_state.clone(); // Create initial increment status let initial_increment_status = ServiceStatus { @@ -297,6 +300,7 @@ impl Tasks { success_count: 0, failure_count: 0, last_tx_id: None, + last_latency_blocks: None, }), }; @@ -323,9 +327,14 @@ impl Tasks { let increment_id = self .handles .spawn(async move { - Box::pin(run_increment_task(config_clone, increment_tx, counter_clone)) - .await - .expect("Counter increment task runs indefinitely"); + Box::pin(run_increment_task( + config_clone, + increment_tx, + counter_clone, + latency_state_for_increment, + )) + .await + .expect("Counter increment task runs indefinitely"); }) .id(); self.names.insert(increment_id, "counter-increment".to_string()); @@ -337,9 +346,14 @@ impl Tasks { let tracking_id = self .handles .spawn(async move { - Box::pin(run_counter_tracking_task(config_clone, tracking_tx, counter_clone)) - .await - .expect("Counter tracking task runs indefinitely"); + Box::pin(run_counter_tracking_task( + config_clone, + tracking_tx, + counter_clone, + latency_state_for_tracking, + )) + .await + .expect("Counter tracking task runs indefinitely"); }) .id(); self.names.insert(tracking_id, "counter-tracking".to_string()); diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 14ab47859d..a30554f06a 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -78,6 +78,17 @@ pub struct IncrementDetails { pub failure_count: u64, /// Last transaction ID (if available). pub last_tx_id: Option, + /// Last measured latency in blocks from submission to state update. + pub last_latency_blocks: Option, +} + +/// Details about an in-flight latency measurement. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct PendingLatencyDetails { + /// Block height returned when the transaction was submitted. + pub submit_height: u32, + /// Counter value we expect to see once the transaction is applied. + pub target_value: u64, } /// Details of the counter tracking service. From 5236322091980798902eb8387ab68d26255007f6 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 18 Dec 2025 19:31:44 -0300 Subject: [PATCH 058/125] feat: add explorer to network monitor (#1450) * feat: add explorer to network monitor * review: add warning for tips mismatch * review: make graphql variable a constant * review: move env example to devnet * review: explicit frontend check in frontend * review: abstract copy button & improve tip diff messagE * review: create explorer.rs, improve code readability * fix: remove duplicated changelog entry --- CHANGELOG.md | 2 +- bin/network-monitor/.env | 6 +- bin/network-monitor/README.md | 10 + bin/network-monitor/assets/index.css | 18 ++ bin/network-monitor/assets/index.html | 145 +++++++++--- bin/network-monitor/src/commands/start.rs | 8 + bin/network-monitor/src/config.rs | 8 + bin/network-monitor/src/explorer.rs | 257 ++++++++++++++++++++++ bin/network-monitor/src/frontend.rs | 6 + bin/network-monitor/src/main.rs | 1 + bin/network-monitor/src/monitor/tasks.rs | 33 +++ bin/network-monitor/src/status.rs | 15 ++ 12 files changed, 475 insertions(+), 34 deletions(-) create mode 100644 bin/network-monitor/src/explorer.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index ea5f192d7f..e0fdf00cbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). - Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). - Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). +- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/miden-node/pull/1450)). ### Changes @@ -23,7 +24,6 @@ - Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). -- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). - Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). - Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). diff --git a/bin/network-monitor/.env b/bin/network-monitor/.env index 9dfb9d7494..8474b06816 100644 --- a/bin/network-monitor/.env +++ b/bin/network-monitor/.env @@ -3,13 +3,13 @@ MIDEN_MONITOR_PORT=3001 MIDEN_MONITOR_ENABLE_OTEL=true MIDEN_MONITOR_REQUEST_TIMEOUT=10s # rpc checks -MIDEN_MONITOR_RPC_URL=http://0.0.0.0:57291 +MIDEN_MONITOR_RPC_URL=https://rpc.devnet.miden.io/ MIDEN_MONITOR_STATUS_CHECK_INTERVAL=30s # remote prover checks MIDEN_MONITOR_REMOTE_PROVER_URLS=https://tx-prover.devnet.miden.io/,https://batch-prover.devnet.miden.io/ MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL=2m # faucet checks -MIDEN_MONITOR_FAUCET_URL=http://localhost:8080 +MIDEN_MONITOR_FAUCET_URL=https://faucet-api.devnet.miden.io/ MIDEN_MONITOR_FAUCET_TEST_INTERVAL=2m # network transaction checks MIDEN_MONITOR_DISABLE_NTX_SERVICE=false @@ -17,3 +17,5 @@ MIDEN_MONITOR_COUNTER_FILEPATH=counter_account.mac MIDEN_MONITOR_WALLET_FILEPATH=wallet_account.mac MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL=30s MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT=2m +# explorer checks +MIDEN_MONITOR_EXPLORER_URL=https://scan-backend-devnet-miden.eu-central-8.gateway.fm/graphql diff --git a/bin/network-monitor/README.md b/bin/network-monitor/README.md index c9245996d6..48106a1a96 100644 --- a/bin/network-monitor/README.md +++ b/bin/network-monitor/README.md @@ -30,6 +30,7 @@ miden-network-monitor start --faucet-url http://localhost:8080 --enable-otel - `--rpc-url`: RPC service URL (default: `http://localhost:50051`) - `--remote-prover-urls`: Comma-separated list of remote prover URLs. If omitted or empty, prover tasks are disabled. - `--faucet-url`: Faucet service URL for testing. If omitted, faucet testing is disabled. +- `--explorer-url`: Explorer service GraphQL endpoint. If omitted, explorer checks are disabled. - `--disable-ntx-service`: Disable the network transaction service checks (enabled by default). The network transaction service consists of two components: counter increment (sending increment transactions) and counter tracking (monitoring counter value changes). - `--remote-prover-test-interval`: Interval at which to test the remote provers services (default: `2m`) - `--faucet-test-interval`: Interval at which to test the faucet services (default: `2m`) @@ -51,6 +52,7 @@ If command-line arguments are not provided, the application falls back to enviro - `MIDEN_MONITOR_RPC_URL`: RPC service URL - `MIDEN_MONITOR_REMOTE_PROVER_URLS`: Comma-separated list of remote prover URLs. If unset or empty, prover tasks are disabled. - `MIDEN_MONITOR_FAUCET_URL`: Faucet service URL for testing. If unset, faucet testing is disabled. +- `MIDEN_MONITOR_EXPLORER_URL`: Explorer service GraphQL endpoint. If unset, explorer checks are disabled. - `MIDEN_MONITOR_DISABLE_NTX_SERVICE`: Set to `true` to disable the network transaction service checks (enabled by default). This affects both counter increment and tracking components. - `MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL`: Interval at which to test the remote provers services - `MIDEN_MONITOR_FAUCET_TEST_INTERVAL`: Interval at which to test the faucet services @@ -153,6 +155,14 @@ The monitor application provides real-time status monitoring for the following M - **Block Producer Status**: - Block producer version and health +### Explorer +- **Service Health**: Explorer availability and freshness of the latest block +- **Latest Block Metadata**: + - Block height and timestamp + - Transactions, nullifiers, notes, and account updates counts + - Block, chain, and proof commitments (shortened display with copy-to-clipboard) +- **Block Delta**: The difference between the explorer's block height and the RPC's chain tip. If the difference is greater than a tolerance, a warning is displayed. This check is performed in the frontend. + ### Remote Provers - **Service Health**: Individual remote prover availability and status - **Version Information**: Remote prover service version diff --git a/bin/network-monitor/assets/index.css b/bin/network-monitor/assets/index.css index 722aa61c75..3a0c8b4416 100644 --- a/bin/network-monitor/assets/index.css +++ b/bin/network-monitor/assets/index.css @@ -450,6 +450,24 @@ body { font-weight: 500; } +.metric-value.warning-delta, +.warning-text { + color: #ff8c00; +} + +.warning-text { + font-weight: 500; + font-size: 12px; +} + +.warning-banner { + margin-top: 8px; + padding: 8px 12px; + border-radius: 4px; + background: rgba(255, 85, 0, 0.08); + border-left: 3px solid #ff8c00; +} + .test-metrics.healthy .metric-value { color: #22C55D; } diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index 9075c48f7e..ea7b9cb64c 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -48,6 +48,23 @@ + diff --git a/bin/network-monitor/assets/index.js b/bin/network-monitor/assets/index.js new file mode 100644 index 0000000000..049de239d1 --- /dev/null +++ b/bin/network-monitor/assets/index.js @@ -0,0 +1,798 @@ +// Miden Network Monitor - Frontend JavaScript +// ================================================================================================ + +let statusData = null; +let updateInterval = null; +const EXPLORER_LAG_TOLERANCE = 20; // max allowed block delta vs RPC, roughly 1 minute + +// Store gRPC-Web probe results keyed by service URL +const grpcWebProbeResults = new Map(); + +// gRPC-Web probe implementation +// ================================================================================================ + +/** + * Performs a gRPC-Web probe to the given URL and path. + * This sends a real browser-originated gRPC-Web request to test connectivity, + * CORS configuration, and gRPC-Web protocol handling. + * + * @param {string} baseUrl - The base URL of the service (e.g., "https://prover.example.com:443") + * @param {string} grpcPath - The gRPC method path (e.g., "/remote_prover.ProxyStatusApi/Status") + * @returns {Promise<{ok: boolean, latencyMs: number, error: string|null}>} + */ +async function probeGrpcWeb(baseUrl, grpcPath) { + const startTime = performance.now(); + + // Normalize URL: remove trailing slash from baseUrl + const normalizedUrl = baseUrl.replace(/\/+$/, ''); + const fullUrl = `${normalizedUrl}${grpcPath}`; + + // gRPC-Web frame for google.protobuf.Empty: + // - 1 byte compressed flag = 0x00 (not compressed) + // - 4 bytes big-endian length = 0x00000000 (empty message) + const emptyGrpcWebFrame = new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00]); + + // Build headers - RPC service requires custom Accept header + const headers = { + 'Content-Type': 'application/grpc-web+proto', + 'X-Grpc-Web': '1', + }; + + // The RPC service requires 'application/vnd.miden' in Accept header + // (this is the custom media type used by the Miden gRPC clients) + // The remote prover accepts standard gRPC-Web content types + if (grpcPath.startsWith('/rpc.')) { + headers['Accept'] = 'application/vnd.miden'; + } else { + headers['Accept'] = 'application/grpc-web+proto'; + } + + try { + const response = await fetch(fullUrl, { + method: 'POST', + headers, + body: emptyGrpcWebFrame, + }); + + const latencyMs = Math.round(performance.now() - startTime); + + if (!response.ok) { + return { + ok: false, + latencyMs, + error: `HTTP ${response.status}: ${response.statusText}`, + }; + } + + // Read the response body as bytes + const responseBytes = new Uint8Array(await response.arrayBuffer()); + + // Parse gRPC-Web response to extract grpc-status from trailers + const grpcStatus = parseGrpcWebTrailers(responseBytes); + + if (grpcStatus === '0' || grpcStatus === null) { + // grpc-status 0 means OK; null means no trailer found (might still be OK) + return { ok: true, latencyMs, error: null }; + } else { + return { + ok: false, + latencyMs, + error: `grpc-status: ${grpcStatus}`, + }; + } + } catch (err) { + const latencyMs = Math.round(performance.now() - startTime); + + // TypeError: Failed to fetch usually indicates CORS or network error + if (err instanceof TypeError) { + return { + ok: false, + latencyMs, + error: 'CORS / Network error: ' + err.message, + }; + } + + return { + ok: false, + latencyMs, + error: err.message || String(err), + }; + } +} + +/** + * Parses gRPC-Web response bytes to extract the grpc-status from trailers. + * gRPC-Web trailers are sent as a frame with flag 0x80. + * + * @param {Uint8Array} data - The response body bytes + * @returns {string|null} - The grpc-status value, or null if not found + */ +function parseGrpcWebTrailers(data) { + let offset = 0; + + while (offset + 5 <= data.length) { + const flag = data[offset]; + const length = (data[offset + 1] << 24) | + (data[offset + 2] << 16) | + (data[offset + 3] << 8) | + data[offset + 4]; + + offset += 5; + + if (offset + length > data.length) break; + + // Flag 0x80 indicates trailers + if (flag === 0x80) { + const trailerBytes = data.slice(offset, offset + length); + const trailerText = new TextDecoder().decode(trailerBytes); + + // Parse trailer headers (format: "key: value\r\n") + const lines = trailerText.split(/\r?\n/); + for (const line of lines) { + const match = line.match(/^grpc-status:\s*(\d+)/i); + if (match) { + return match[1]; + } + } + } + + offset += length; + } + + return null; +} + +// Interval for periodic gRPC-Web probing +let grpcWebProbeInterval = null; +const GRPC_WEB_PROBE_INTERVAL_MS = 30000; // Probe every 30 seconds + +/** + * Collects all gRPC-Web endpoints that need to be probed from the current status data. + * + * @returns {Array<{serviceKey: string, baseUrl: string, grpcPath: string}>} + */ +function collectGrpcWebEndpoints() { + if (!statusData || !statusData.services) return []; + + const endpoints = []; + + for (const service of statusData.services) { + if (service.details) { + // RPC service + if (service.details.RpcStatus && service.details.RpcStatus.url) { + endpoints.push({ + serviceKey: service.details.RpcStatus.url, + baseUrl: service.details.RpcStatus.url, + grpcPath: '/rpc.Api/Status', + }); + } + // Remote Prover service + if (service.details.RemoteProverStatus && service.details.RemoteProverStatus.url) { + endpoints.push({ + serviceKey: service.details.RemoteProverStatus.url, + baseUrl: service.details.RemoteProverStatus.url, + grpcPath: '/remote_prover.ProxyStatusApi/Status', + }); + } + } + } + + return endpoints; +} + +/** + * Runs gRPC-Web probes for all collected endpoints. + * Results are stored in grpcWebProbeResults and display is updated. + */ +async function runGrpcWebProbes() { + const endpoints = collectGrpcWebEndpoints(); + if (endpoints.length === 0) return; + + // Run all probes in parallel + const probePromises = endpoints.map(async ({ serviceKey, baseUrl, grpcPath }) => { + const result = await probeGrpcWeb(baseUrl, grpcPath); + grpcWebProbeResults.set(serviceKey, { + ...result, + timestamp: Date.now(), + }); + }); + + await Promise.all(probePromises); + + // Re-render to show updated results + updateDisplay(); +} + +/** + * Renders the probe result badge for a service. + * + * @param {string} serviceKey - Unique key for the service + * @returns {string} - HTML string for the probe result + */ +function renderProbeResult(serviceKey) { + const result = grpcWebProbeResults.get(serviceKey); + if (!result) return ''; + + const statusClass = result.ok ? 'probe-ok' : 'probe-failed'; + const statusText = result.ok ? 'OK' : 'FAILED'; + const seconds = Math.floor((Date.now() - result.timestamp) / 1000); + const timeAgo = seconds < 60 ? `${seconds}s ago` : seconds < 3600 ? `${Math.floor(seconds / 60)}m ago` : `${Math.floor(seconds / 3600)}h ago`; + const errorDisplay = result.error && result.error.length > 40 ? result.error.substring(0, 40) + '...' : result.error; + + return ` +
+ gRPC-Web: ${statusText} + ${result.latencyMs}ms + ${result.error ? `${errorDisplay}` : ''} + ${timeAgo} +
+ `; +} + +/** + * Renders the gRPC-Web probe result section for a service. + * Shows "Checking..." if no result yet, otherwise shows the probe result. + * + * @param {string} serviceKey - Unique key for the service (the URL) + * @returns {string} - HTML string for the probe result section + */ +function renderGrpcWebProbeSection(serviceKey) { + const result = grpcWebProbeResults.get(serviceKey); + + if (!result) { + return ` +
+
+ + gRPC-Web: Checking... +
+
+ `; + } + + return ` +
+ ${renderProbeResult(serviceKey)} +
+ `; +} + + +const COPY_ICON = ` + + + + +`; + +function renderCopyButton(value, label) { + if (!value) return ''; + const escapedValue = JSON.stringify(value); + return ` + + `; +} + +function formatSuccessRate(successCount, failureCount) { + const total = successCount + failureCount; + if (!total) { + return 'N/A'; + } + + return `${((successCount / total) * 100).toFixed(1)}%`; +} + +async function fetchStatus() { + try { + const response = await fetch('/status'); + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + statusData = await response.json(); + updateDisplay(); + } catch (error) { + console.error('Error fetching status:', error); + showError('Failed to fetch network status: ' + error.message); + } +} + +// Merge Remote Prover status and test entries into a single card per prover. +function mergeProverStatusAndTests(services) { + const testsByName = new Map(); + const merged = []; + const usedTests = new Set(); + + services.forEach(service => { + if (service.details && service.details.RemoteProverTest) { + testsByName.set(service.name, service); + } + }); + + services.forEach(service => { + if (service.details && service.details.RemoteProverStatus) { + const test = testsByName.get(service.name); + if (test) { + usedTests.add(service.name); + } + merged.push({ + ...service, + testDetails: test?.details?.RemoteProverTest ?? null, + testStatus: test?.status ?? null, + testError: test?.error ?? null + }); + } else if (!(service.details && service.details.RemoteProverTest)) { + // Non-prover entries pass through unchanged + merged.push(service); + } + }); + + // Add orphaned tests (in case a test arrives before a status) + testsByName.forEach((test, name) => { + if (!usedTests.has(name)) { + merged.push({ + name, + status: test.status, + last_checked: test.last_checked, + error: test.error, + details: null, + testDetails: test.details.RemoteProverTest, + testStatus: test.status, + testError: test.error + }); + } + }); + + return merged; +} + +function updateDisplay() { + if (!statusData) return; + + const container = document.getElementById('status-container'); + const lastUpdated = document.getElementById('last-updated'); + const overallStatus = document.getElementById('overall-status'); + const servicesCount = document.getElementById('services-count'); + + // Update last updated time + const lastUpdateTime = new Date(statusData.last_updated * 1000); + lastUpdated.textContent = lastUpdateTime.toLocaleString(); + + // Group remote prover status + test into single cards + const processedServices = mergeProverStatusAndTests(statusData.services); + const rpcService = processedServices.find(s => s.details && s.details.RpcStatus); + const rpcChainTip = + rpcService?.details?.RpcStatus?.store_status?.chain_tip ?? + rpcService?.details?.RpcStatus?.block_producer_status?.chain_tip ?? + null; + + // Count healthy vs unhealthy services + const healthyServices = processedServices.filter(s => s.status === 'Healthy').length; + const totalServices = processedServices.length; + const allHealthy = healthyServices === totalServices; + + // Update footer + overallStatus.textContent = allHealthy ? 'All Systems Operational' : `${healthyServices}/${totalServices} Services Healthy`; + overallStatus.style.color = allHealthy ? '#22C55D' : '#ff5500'; + servicesCount.textContent = `${totalServices} Services`; + + // Generate status cards + const serviceCardsHtml = processedServices.map(service => { + const isHealthy = service.status === 'Healthy'; + const statusColor = isHealthy ? '#22C55D' : '#ff5500'; + const statusIcon = isHealthy ? '✓' : '✗'; + const numOrDash = value => isHealthy ? (value?.toLocaleString?.() ?? value ?? '-') : '-'; + const timeOrDash = ts => { + if (!isHealthy) return '-'; + return ts ? new Date(ts * 1000).toLocaleString() : '-'; + }; + const commitmentOrDash = (value, label) => isHealthy && value + ? ` + ${value.substring(0, 20)}... + ${renderCopyButton(value, label)} + ` + : '-'; + + const explorerStats = service.details?.ExplorerStatus; + const isExplorerService = service.name?.toLowerCase().includes('explorer'); + const deltaBlock = (isHealthy && explorerStats && rpcChainTip !== null) + ? explorerStats.block_number - rpcChainTip + : null; + const deltaWarning = + deltaBlock !== null && Math.abs(deltaBlock) > EXPLORER_LAG_TOLERANCE + ? `Explorer tip is ${Math.abs(deltaBlock)} blocks ${deltaBlock > 0 ? 'ahead' : 'behind'}` + : null; + let explorerWarningHtml = ''; + + let detailsHtml = ''; + if (service.details) { + const details = service.details; + detailsHtml = ` +
+ ${details.RpcStatus ? ` +
Version: ${details.RpcStatus.version}
+ ${details.RpcStatus.genesis_commitment ? ` +
+ Genesis: + 0x${details.RpcStatus.genesis_commitment.substring(0, 20)}... + ${renderCopyButton(details.RpcStatus.genesis_commitment, 'genesis commitment')} +
+ ` : ''} + ${details.RpcStatus.url ? renderGrpcWebProbeSection(details.RpcStatus.url) : ''} + ${details.RpcStatus.store_status ? ` +
+
Store
+
+ Version: + ${details.RpcStatus.store_status.version} +
+
+ Status: + ${details.RpcStatus.store_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.store_status.chain_tip} +
+
+ ` : ''} + ${details.RpcStatus.block_producer_status ? ` +
+
Block Producer
+
+ Version: + ${details.RpcStatus.block_producer_status.version} +
+
+ Status: + ${details.RpcStatus.block_producer_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.block_producer_status.chain_tip} +
+
+ Mempool stats: +
+ Unbatched TXs: + ${details.RpcStatus.block_producer_status.mempool.unbatched_transactions} +
+
+ Proposed Batches: + ${details.RpcStatus.block_producer_status.mempool.proposed_batches} +
+
+ Proven Batches: + ${details.RpcStatus.block_producer_status.mempool.proven_batches} +
+
+
+ ` : ''} + ` : ''} + ${details.RemoteProverStatus ? ` +
+ Prover Status (${details.RemoteProverStatus.url}): +
Version: ${details.RemoteProverStatus.version}
+
+ Supported Proof Type: ${details.RemoteProverStatus.supported_proof_type} +
+ ${details.RemoteProverStatus.workers && details.RemoteProverStatus.workers.length > 0 ? ` +
+ Workers (${details.RemoteProverStatus.workers.length}): + ${details.RemoteProverStatus.workers.map(worker => ` +
+ ${worker.name} - + ${worker.version} - + ${worker.status} +
+ `).join('')} +
+ ` : ''} + ${renderGrpcWebProbeSection(details.RemoteProverStatus.url)} +
+ ` : ''} + ${details.FaucetTest ? ` +
+ Faucet: +
+
+ Success Rate: + ${formatSuccessRate(details.FaucetTest.success_count, details.FaucetTest.failure_count)} +
+
+ Last Response Time: + ${details.FaucetTest.test_duration_ms}ms +
+ ${details.FaucetTest.last_tx_id ? ` +
+ Last TX ID: + ${details.FaucetTest.last_tx_id.substring(0, 16)}...${renderCopyButton(details.FaucetTest.last_tx_id, 'TX ID')} +
+ ` : ''} + ${details.FaucetTest.challenge_difficulty ? ` +
+ Last Challenge Difficulty: + ~${details.FaucetTest.challenge_difficulty} bits +
+ ` : ''} +
+
+ ${details.FaucetTest.faucet_metadata ? ` +
+ Faucet Token Info: +
+
+ Token ID: + ${details.FaucetTest.faucet_metadata.id.substring(0, 16)}...${renderCopyButton(details.FaucetTest.faucet_metadata.id, 'token ID')} +
+
+ Version: + ${details.FaucetTest.faucet_metadata.version || '-'} +
+
+ Current Issuance: + ${details.FaucetTest.faucet_metadata.issuance.toLocaleString()} +
+
+ Max Supply: + ${details.FaucetTest.faucet_metadata.max_supply.toLocaleString()} +
+
+ Decimals: + ${details.FaucetTest.faucet_metadata.decimals} +
+
+ Base Amount: + ${details.FaucetTest.faucet_metadata.base_amount.toLocaleString()} +
+
+ PoW Difficulty: + ${details.FaucetTest.faucet_metadata.pow_load_difficulty} +
+ +
+
+ ` : ''} + ` : ''} + ${details.NtxIncrement ? ` +
+ Local Transactions: +
+
+ Success Rate: + ${formatSuccessRate(details.NtxIncrement.success_count, details.NtxIncrement.failure_count)} +
+ ${details.NtxIncrement.last_latency_blocks !== null && details.NtxIncrement.last_latency_blocks !== undefined ? ` +
+ Latency: + ${details.NtxIncrement.last_latency_blocks} blocks +
+ ` : ''} + ${details.NtxIncrement.last_tx_id ? ` +
+ Last TX ID: + ${details.NtxIncrement.last_tx_id.substring(0, 16)}...${renderCopyButton(details.NtxIncrement.last_tx_id, 'TX ID')} +
+ ` : ''} +
+
+ ` : ''} + ${details.NtxTracking ? ` +
+ Network Transactions: +
+
+ Current Value: + ${details.NtxTracking.current_value ?? '-'} +
+ ${details.NtxTracking.expected_value ? ` +
+ Expected Value: + ${details.NtxTracking.expected_value} +
+ ` : ''} + ${details.NtxTracking.pending_increments !== null && details.NtxTracking.pending_increments !== undefined ? ` +
+ Pending Notes: + ${details.NtxTracking.pending_increments} +
+ ` : ''} + ${details.NtxTracking.last_updated ? ` +
+ Last Updated: + ${new Date(details.NtxTracking.last_updated * 1000).toLocaleString()} +
+ ` : ''} +
+
+ ` : ''} + ${service.testDetails ? ` +
+ Proof Generation Testing (${service.testDetails.proof_type}): +
+
+ Success Rate: + ${formatSuccessRate(service.testDetails.success_count, service.testDetails.failure_count)} +
+
+ Last Response Time: + ${service.testDetails.test_duration_ms}ms +
+
+ Last Proof Size: + ${(service.testDetails.proof_size_bytes / 1024).toFixed(2)} KB +
+
+
+ ` : ''} +
+ `; + } + + // Always render explorer block for explorer services, even if stats are missing. + if (isExplorerService) { + detailsHtml += ` +
+
+ Explorer: +
+ Block Height: + ${explorerStats ? numOrDash(explorerStats.block_number) : '-'} +
+
+ RPC Chain Tip: + ${isHealthy && rpcChainTip !== null ? rpcChainTip : '-'} +
+
+ Block Time: + ${explorerStats ? timeOrDash(explorerStats.timestamp) : '-'} +
+
+ Block Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.block_commitment, 'block commitment') : '-'} +
+
+ Chain Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.chain_commitment, 'chain commitment') : '-'} +
+
+ Proof Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.proof_commitment, 'proof commitment') : '-'} +
+
+ Transactions: + ${explorerStats ? numOrDash(explorerStats.number_of_transactions) : '-'} +
+
+ Nullifiers: + ${explorerStats ? numOrDash(explorerStats.number_of_nullifiers) : '-'} +
+
+ Notes: + ${explorerStats ? numOrDash(explorerStats.number_of_notes) : '-'} +
+
+ Account Updates: + ${explorerStats ? numOrDash(explorerStats.number_of_account_updates) : '-'} +
+
+
+ `; + + if (deltaWarning) { + explorerWarningHtml = ` +
+
+ Explorer vs RPC +
+
${deltaWarning}
+
+ `; + } + } + + return ` +
+
+
${service.name}
+
+ ${statusIcon} ${service.status.toUpperCase()} +
+
+
+ ${detailsHtml} + ${explorerWarningHtml} +
+
+ Last checked: ${new Date(service.last_checked * 1000).toLocaleString()} +
+
+ `; + }).join(''); + + container.innerHTML = serviceCardsHtml; + + // Add refresh button that spans the full grid + container.innerHTML += ` +
+ +
+ `; +} + +function showError(message) { + const container = document.getElementById('status-container'); + container.innerHTML = ` +
+ ${message} +
+
+ +
+ `; +} + +async function copyToClipboard(text, event) { + const button = event.target.closest('.copy-button'); + if (!button) return; + + try { + await navigator.clipboard.writeText(text); + // Show a brief success indicator + const originalContent = button.innerHTML; + button.innerHTML = ''; + button.style.color = '#22C55D'; + + setTimeout(() => { + button.innerHTML = originalContent; + button.style.color = ''; + }, 2000); + } catch (err) { + console.error('Failed to copy to clipboard:', err); + // Show error feedback on button + button.style.color = '#ff5500'; + setTimeout(() => { + button.style.color = ''; + }, 2000); + } +} + +// Initialize on DOM ready +document.addEventListener('DOMContentLoaded', () => { + // Initial load and set up auto-refresh + fetchStatus().then(() => { + // Start gRPC-Web probing after initial status fetch + runGrpcWebProbes(); + grpcWebProbeInterval = setInterval(runGrpcWebProbes, GRPC_WEB_PROBE_INTERVAL_MS); + }); + updateInterval = setInterval(fetchStatus, 10000); // Refresh every 10 seconds +}); + +// Clean up on page unload +window.addEventListener('beforeunload', () => { + if (updateInterval) { + clearInterval(updateInterval); + } + if (grpcWebProbeInterval) { + clearInterval(grpcWebProbeInterval); + } +}); + diff --git a/bin/network-monitor/src/frontend.rs b/bin/network-monitor/src/frontend.rs index e458f95385..dd6a8fc5ce 100644 --- a/bin/network-monitor/src/frontend.rs +++ b/bin/network-monitor/src/frontend.rs @@ -42,6 +42,7 @@ pub async fn serve(server_state: ServerState, config: MonitorConfig) { let app = Router::new() // Serve embedded assets .route("/assets/index.css", get(serve_css)) + .route("/assets/index.js", get(serve_js)) .route("/assets/favicon.ico", get(serve_favicon)) // Main dashboard route .route("/", get(get_dashboard)) @@ -116,6 +117,14 @@ async fn serve_css() -> Response { .into_response() } +async fn serve_js() -> Response { + ( + [(header::CONTENT_TYPE, header::HeaderValue::from_static("text/javascript"))], + include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/index.js")), + ) + .into_response() +} + async fn serve_favicon() -> Response { ( [(header::CONTENT_TYPE, header::HeaderValue::from_static("image/x-icon"))], diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index b2c07a3a85..be3be5f3cf 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -75,7 +75,8 @@ impl Tasks { .connect_lazy::(); let current_time = current_unix_timestamp_secs(); - let initial_rpc_status = check_rpc_status(&mut rpc, current_time).await; + let initial_rpc_status = + check_rpc_status(&mut rpc, config.rpc_url.to_string(), current_time).await; // Spawn the RPC checker let (rpc_tx, rpc_rx) = watch::channel(initial_rpc_status); diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index c82cdbb21e..11c77593ef 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -137,6 +137,8 @@ pub enum ServiceDetails { /// service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RpcStatusDetails { + /// The URL of the RPC service (used by the frontend for gRPC-Web probing). + pub url: String, pub version: String, pub genesis_commitment: Option, pub store_status: Option, @@ -278,9 +280,11 @@ impl RemoteProverStatusDetails { } } -impl From for RpcStatusDetails { - fn from(status: RpcStatus) -> Self { +impl RpcStatusDetails { + /// Creates `RpcStatusDetails` from a gRPC `RpcStatus` response and the configured URL. + pub fn from_rpc_status(status: RpcStatus, url: String) -> Self { Self { + url, version: status.version, genesis_commitment: status.genesis_commitment.as_ref().map(|gc| format!("{gc:?}")), store_status: status.store.map(StoreStatusDetails::from), @@ -320,6 +324,7 @@ pub async fn run_rpc_status_task( status_check_interval: Duration, request_timeout: Duration, ) { + let url_str = rpc_url.to_string(); let mut rpc = ClientBuilder::new(rpc_url) .with_tls() .expect("TLS is enabled") @@ -337,7 +342,7 @@ pub async fn run_rpc_status_task( let current_time = current_unix_timestamp_secs(); - let status = check_rpc_status(&mut rpc, current_time).await; + let status = check_rpc_status(&mut rpc, url_str.clone(), current_time).await; // Send the status update; exit if no receivers (shutdown signal) if status_sender.send(status).is_err() { @@ -354,6 +359,7 @@ pub async fn run_rpc_status_task( /// # Arguments /// /// * `rpc` - The RPC client. +/// * `url` - The URL of the RPC service. /// * `current_time` - The current time. /// /// # Returns @@ -369,6 +375,7 @@ pub async fn run_rpc_status_task( )] pub(crate) async fn check_rpc_status( rpc: &mut miden_node_proto::clients::RpcClient, + url: String, current_time: u64, ) -> ServiceStatus { match rpc.status(()).await { @@ -380,7 +387,7 @@ pub(crate) async fn check_rpc_status( status: Status::Healthy, last_checked: current_time, error: None, - details: ServiceDetails::RpcStatus(status.into()), + details: ServiceDetails::RpcStatus(RpcStatusDetails::from_rpc_status(status, url)), } }, Err(e) => { From e3925176eee428504b22bad7a3cfd2a8524a0f14 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 8 Jan 2026 12:03:15 -0300 Subject: [PATCH 072/125] fix: increase pagination limit, return chain tip if exceeded (#1489) * fix: increase pagination limit, return chain tip if exceeded * review: refetch chain tip --- CHANGELOG.md | 1 + crates/ntx-builder/src/store.rs | 12 +++++++----- crates/store/src/errors.rs | 14 ++++++++++++++ crates/store/src/server/ntx_builder.rs | 22 ++++++++++++++-------- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6594094fb4..4f36424518 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). - Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). - Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). +- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). ## v0.12.6 diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 3b6a565b61..447571a5a0 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -179,13 +179,13 @@ impl StoreClient { /// reach the end. /// /// Each page can return up to `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` - /// accounts (~289,000). With 1000 iterations, this supports up to ~524 million network - /// accounts, which is assumed to be sufficient for the foreseeable future. + /// accounts (~289,000). With `100_000` iterations, which is assumed to be sufficient for the + /// foreseeable future. #[instrument(target = COMPONENT, name = "store.client.get_network_account_ids", skip_all, err)] pub async fn get_network_account_ids(&self) -> Result, StoreError> { - const MAX_ITERATIONS: u32 = 1000; + const MAX_ITERATIONS: u32 = 100_000; - let block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); let mut ids = Vec::new(); let mut iterations_count = 0; @@ -216,8 +216,10 @@ impl StoreClient { ids.extend(accounts?); iterations_count += 1; + block_range = + BlockNumber::from(pagination_info.block_num)..=BlockNumber::from(u32::MAX); - if pagination_info.block_num == pagination_info.chain_tip { + if pagination_info.block_num >= pagination_info.chain_tip { break; } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 99f54745ea..abe80e8c3a 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -426,6 +426,20 @@ pub enum SyncStorageMapsError { AccountNotPublic(AccountId), } +// GET NETWORK ACCOUNT IDS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetNetworkAccountIdsError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("invalid block range")] + InvalidBlockRange(#[from] InvalidBlockRange), + #[error("malformed nullifier prefix")] + DeserializationFailed(#[from] ConversionError), +} + // GET BLOCK BY NUMBER ERRORS // ================================================================================================ diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 4106a121d0..ba8e82b4f5 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -12,8 +12,8 @@ use tracing::{debug, instrument}; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::GetNoteScriptByRootError; -use crate::server::api::{StoreApi, internal_error, invalid_argument, read_root}; +use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError}; +use crate::server::api::{StoreApi, internal_error, invalid_argument, read_block_range, read_root}; // NTX BUILDER ENDPOINTS // ================================================================================================ @@ -171,19 +171,25 @@ impl ntx_builder_server::NtxBuilder for StoreApi { &self, request: Request, ) -> Result, Status> { - let block_range = request.into_inner(); - let chain_tip = self.state.latest_block_num().await; + let request = request.into_inner(); - let block_from = BlockNumber::from(block_range.block_from); - let block_to = block_range.block_to.map_or(chain_tip, BlockNumber::from); - let block_range = block_from..=block_to; + let mut chain_tip = self.state.latest_block_num().await; + let block_range = + read_block_range::(Some(request), "GetNetworkAccountIds")? + .into_inclusive_range::(&chain_tip)?; - let (account_ids, last_block_included) = + let (account_ids, mut last_block_included) = self.state.get_all_network_accounts(block_range).await.map_err(internal_error)?; let account_ids: Vec = account_ids.into_iter().map(Into::into).collect(); + if last_block_included > chain_tip { + last_block_included = chain_tip; + } + + chain_tip = self.state.latest_block_num().await; + Ok(Response::new(proto::store::NetworkAccountIdList { account_ids, pagination_info: Some(proto::rpc::PaginationInfo { From 8a2e5fe6cafe91d8f2a4c57c746d369a76d39288 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Fri, 9 Jan 2026 16:33:58 +0100 Subject: [PATCH 073/125] refactor [1/4]: change accounts db schema (#1481) --- CHANGELOG.md | 1 + crates/proto/src/domain/account.rs | 354 +++++---- .../db/migrations/2025062000000_setup/up.sql | 8 +- crates/store/src/db/mod.rs | 85 ++- crates/store/src/db/models/conv.rs | 33 +- .../store/src/db/models/queries/accounts.rs | 517 +++++++------ .../db/models/queries/accounts/at_block.rs | 269 +++++++ .../src/db/models/queries/accounts/tests.rs | 552 ++++++++++++++ crates/store/src/db/schema.rs | 4 +- crates/store/src/db/tests.rs | 712 +++++++++++++++++- crates/store/src/errors.rs | 22 +- crates/store/src/state.rs | 187 +++-- 12 files changed, 2239 insertions(+), 505 deletions(-) create mode 100644 crates/store/src/db/models/queries/accounts/at_block.rs create mode 100644 crates/store/src/db/models/queries/accounts/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f36424518..fe5bc04926 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ - Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). +- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). ### Fixes diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 2ef2be02c6..4330a82deb 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -99,11 +99,35 @@ impl From<&AccountInfo> for proto::account::AccountDetails { fn from(AccountInfo { summary, details }: &AccountInfo) -> Self { Self { summary: Some(summary.into()), - details: details.as_ref().map(miden_protocol::utils::Serializable::to_bytes), + details: details.as_ref().map(Serializable::to_bytes), } } } +// ACCOUNT STORAGE HEADER +//================================================================================================ + +impl TryFrom for AccountStorageHeader { + type Error = ConversionError; + + fn try_from(value: proto::account::AccountStorageHeader) -> Result { + let proto::account::AccountStorageHeader { slots } = value; + + let slot_headers = slots + .into_iter() + .map(|slot| { + let slot_name = StorageSlotName::new(slot.slot_name)?; + let slot_type = storage_slot_type_from_raw(slot.slot_type)?; + let commitment = + slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; + Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) + }) + .collect::, ConversionError>>()?; + + Ok(AccountStorageHeader::new(slot_headers)?) + } +} + // ACCOUNT PROOF REQUEST // ================================================================================================ @@ -163,72 +187,6 @@ impl TryFrom for Accoun } } -impl TryFrom for AccountStorageHeader { - type Error = ConversionError; - - fn try_from(value: proto::account::AccountStorageHeader) -> Result { - let proto::account::AccountStorageHeader { slots } = value; - - let slot_headers = slots - .into_iter() - .map(|slot| { - let slot_name = StorageSlotName::new(slot.slot_name)?; - let slot_type = storage_slot_type_from_raw(slot.slot_type)?; - let commitment = - slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; - Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) - }) - .collect::, ConversionError>>()?; - - Ok(AccountStorageHeader::new(slot_headers)?) - } -} - -impl TryFrom - for AccountStorageMapDetails -{ - type Error = ConversionError; - - fn try_from( - value: proto::rpc::account_storage_details::AccountStorageMapDetails, - ) -> Result { - let proto::rpc::account_storage_details::AccountStorageMapDetails { - slot_name, - too_many_entries, - entries, - } = value; - - let slot_name = StorageSlotName::new(slot_name)?; - - // Extract map_entries from the MapEntries message - let map_entries = if let Some(entries) = entries { - entries - .entries - .into_iter() - .map(|entry| { - let key = entry - .key - .ok_or(proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(key), - ))? - .try_into()?; - let value = entry - .value - .ok_or(proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(value), - ))? - .try_into()?; - Ok((key, value)) - }) - .collect::, ConversionError>>()? - } else { - Vec::new() - }; - - Ok(Self { slot_name, too_many_entries, map_entries }) - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { pub slot_name: StorageSlotName, @@ -346,36 +304,48 @@ impl From for proto::account::AccountStorageHeader { } } +// ACCOUNT VAULT DETAILS +//================================================================================================ + +/// Account vault details +/// +/// When an account contains a large number of assets (> +/// [`AccountVaultDetails::MAX_RETURN_ENTRIES`]), including all assets in a single RPC response +/// creates performance issues. In such cases, the `LimitExceeded` variant indicates to the client +/// to use the `SyncAccountVault` endpoint instead. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct AccountVaultDetails { - pub too_many_assets: bool, - pub assets: Vec, +pub enum AccountVaultDetails { + /// The vault has too many assets to return inline. + /// Clients must use `SyncAccountVault` endpoint instead. + LimitExceeded, + + /// The assets in the vault (up to `MAX_RETURN_ENTRIES`). + Assets(Vec), } + impl AccountVaultDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of vault entries that can be returned in a single response. + /// Accounts with more assets will have `LimitExceeded` variant. + pub const MAX_RETURN_ENTRIES: usize = 1000; pub fn new(vault: &AssetVault) -> Self { if vault.assets().nth(Self::MAX_RETURN_ENTRIES).is_some() { - Self::too_many() + Self::LimitExceeded } else { - Self { - too_many_assets: false, - assets: Vec::from_iter(vault.assets()), - } + Self::Assets(Vec::from_iter(vault.assets())) } } pub fn empty() -> Self { - Self { - too_many_assets: false, - assets: Vec::new(), - } + Self::Assets(Vec::new()) } - fn too_many() -> Self { - Self { - too_many_assets: true, - assets: Vec::new(), + /// Creates `AccountVaultDetails` from a list of assets. + pub fn from_assets(assets: Vec) -> Self { + if assets.len() > Self::MAX_RETURN_ENTRIES { + Self::LimitExceeded + } else { + Self::Assets(assets) } } } @@ -386,40 +356,69 @@ impl TryFrom for AccountVaultDetails { fn try_from(value: proto::rpc::AccountVaultDetails) -> Result { let proto::rpc::AccountVaultDetails { too_many_assets, assets } = value; - let assets = - Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { - let asset = asset - .asset - .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; - let asset = Word::try_from(asset)?; - Asset::try_from(asset).map_err(ConversionError::AssetError) - }))?; - Ok(Self { too_many_assets, assets }) + if too_many_assets { + Ok(Self::LimitExceeded) + } else { + let parsed_assets = + Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { + let asset = asset + .asset + .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; + let asset = Word::try_from(asset)?; + Asset::try_from(asset).map_err(ConversionError::AssetError) + }))?; + Ok(Self::Assets(parsed_assets)) + } } } impl From for proto::rpc::AccountVaultDetails { fn from(value: AccountVaultDetails) -> Self { - let AccountVaultDetails { too_many_assets, assets } = value; - - Self { - too_many_assets, - assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { - asset: Some(proto::primitives::Digest::from(Word::from(asset))), - })), + match value { + AccountVaultDetails::LimitExceeded => Self { + too_many_assets: true, + assets: Vec::new(), + }, + AccountVaultDetails::Assets(assets) => Self { + too_many_assets: false, + assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { + asset: Some(proto::primitives::Digest::from(Word::from(asset))), + })), + }, } } } +// ACCOUNT STORAGE MAP DETAILS +//================================================================================================ + +/// Details about an account storage map slot. #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageMapDetails { pub slot_name: StorageSlotName, - pub too_many_entries: bool, - pub map_entries: Vec<(Word, Word)>, + pub entries: StorageMapEntries, +} + +/// Storage map entries for an account storage slot. +/// +/// When a storage map contains many entries (> [`AccountStorageMapDetails::MAX_RETURN_ENTRIES`]), +/// returning all entries in a single RPC response creates performance issues. In such cases, +/// the `LimitExceeded` variant indicates to the client to use the `SyncStorageMaps` endpoint +/// instead. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageMapEntries { + /// The map has too many entries to return inline. + /// Clients must use `SyncStorageMaps` endpoint instead. + LimitExceeded, + + /// The storage map entries (key-value pairs), up to `MAX_RETURN_ENTRIES`. + /// TODO: For partial responses, also include Merkle proofs and inner SMT nodes. + Entries(Vec<(Word, Word)>), } impl AccountStorageMapDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of storage map entries that can be returned in a single response. + pub const MAX_RETURN_ENTRIES: usize = 1000; pub fn new(slot_name: StorageSlotName, slot_data: SlotData, storage_map: &StorageMap) -> Self { match slot_data { @@ -430,13 +429,15 @@ impl AccountStorageMapDetails { fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_name) + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { let map_entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); Self { slot_name, - too_many_entries: false, - map_entries, + entries: StorageMapEntries::Entries(map_entries), } } } @@ -447,22 +448,54 @@ impl AccountStorageMapDetails { storage_map: &StorageMap, ) -> Self { if keys.len() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_name) + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { // TODO For now, we return all entries instead of specific keys with proofs Self::from_all_entries(slot_name, storage_map) } } +} - pub fn too_many_entries(slot_name: StorageSlotName) -> Self { - Self { - slot_name, - too_many_entries: true, - map_entries: Vec::new(), +impl From + for proto::rpc::account_storage_details::AccountStorageMapDetails +{ + fn from(value: AccountStorageMapDetails) -> Self { + use proto::rpc::account_storage_details::account_storage_map_details; + + let AccountStorageMapDetails { slot_name, entries } = value; + + match entries { + StorageMapEntries::LimitExceeded => Self { + slot_name: slot_name.to_string(), + too_many_entries: true, + entries: Some(account_storage_map_details::MapEntries { entries: Vec::new() }), + }, + StorageMapEntries::Entries(map_entries) => { + let entries = Some(account_storage_map_details::MapEntries { + entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { + account_storage_map_details::map_entries::StorageMapEntry { + key: Some(key.into()), + value: Some(value.into()), + } + })), + }); + + Self { + slot_name: slot_name.to_string(), + too_many_entries: false, + entries, + } + }, } } } +// ACCOUNT STORAGE DETAILS DETAILS +//================================================================================================ + #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageDetails { pub header: AccountStorageHeader, @@ -498,27 +531,68 @@ impl From for proto::rpc::AccountStorageDetails { const fn storage_slot_type_from_raw(slot_type: u32) -> Result { Ok(match slot_type { - 0 => StorageSlotType::Map, - 1 => StorageSlotType::Value, + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, _ => return Err(ConversionError::EnumDiscriminantOutOfRange), }) } const fn storage_slot_type_to_raw(slot_type: StorageSlotType) -> u32 { match slot_type { - StorageSlotType::Map => 0, - StorageSlotType::Value => 1, + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, } } -/// Represents account details returned in response to an account proof request. -pub struct AccountDetails { - pub account_header: AccountHeader, - pub account_code: Option>, - pub vault_details: AccountVaultDetails, - pub storage_details: AccountStorageDetails, +impl TryFrom + for AccountStorageMapDetails +{ + type Error = ConversionError; + + fn try_from( + value: proto::rpc::account_storage_details::AccountStorageMapDetails, + ) -> Result { + use proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry; + let proto::rpc::account_storage_details::AccountStorageMapDetails { + slot_name, + too_many_entries, + entries, + } = value; + + let slot_name = StorageSlotName::new(slot_name)?; + + let entries = if too_many_entries { + StorageMapEntries::LimitExceeded + } else { + let map_entries = if let Some(entries) = entries { + entries + .entries + .into_iter() + .map(|entry| { + let key = entry + .key + .ok_or(StorageMapEntry::missing_field(stringify!(key)))? + .try_into()?; + let value = entry + .value + .ok_or(StorageMapEntry::missing_field(stringify!(value)))? + .try_into()?; + Ok((key, value)) + }) + .collect::, ConversionError>>()? + } else { + Vec::new() + }; + StorageMapEntries::Entries(map_entries) + }; + + Ok(Self { slot_name, entries }) + } } +// ACCOUNT PROOF RESPONSE +//================================================================================================ + /// Represents the response to an account proof request. pub struct AccountProofResponse { pub block_num: BlockNumber, @@ -558,6 +632,17 @@ impl From for proto::rpc::AccountProofResponse { } } +// ACCOUNT DETAILS +//================================================================================================ + +/// Represents account details returned in response to an account proof request. +pub struct AccountDetails { + pub account_header: AccountHeader, + pub account_code: Option>, + pub vault_details: AccountVaultDetails, + pub storage_details: AccountStorageDetails, +} + impl TryFrom for AccountDetails { type Error = ConversionError; @@ -622,31 +707,6 @@ impl From for proto::rpc::account_proof_response::AccountDetails } } -impl From - for proto::rpc::account_storage_details::AccountStorageMapDetails -{ - fn from(value: AccountStorageMapDetails) -> Self { - use proto::rpc::account_storage_details::account_storage_map_details; - - let AccountStorageMapDetails { slot_name, too_many_entries, map_entries } = value; - - let entries = Some(account_storage_map_details::MapEntries { - entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { - account_storage_map_details::map_entries::StorageMapEntry { - key: Some(key.into()), - value: Some(value.into()), - } - })), - }); - - Self { - slot_name: slot_name.to_string(), - too_many_entries, - entries, - } - } -} - // ACCOUNT WITNESS // ================================================================================================ diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index a526af1e0a..adf06e2a32 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -18,18 +18,18 @@ CREATE TABLE accounts ( block_num INTEGER NOT NULL, account_commitment BLOB NOT NULL, code_commitment BLOB, - storage BLOB, - vault BLOB, nonce INTEGER, + storage_header BLOB, -- Serialized AccountStorageHeader from miden-objects + vault_root BLOB, -- Vault root commitment is_latest BOOLEAN NOT NULL DEFAULT 0, -- Indicates if this is the latest state for this account_id created_at_block INTEGER NOT NULL, PRIMARY KEY (account_id, block_num), CONSTRAINT all_null_or_none_null CHECK ( - (code_commitment IS NOT NULL AND storage IS NOT NULL AND vault IS NOT NULL AND nonce IS NOT NULL) + (code_commitment IS NOT NULL AND nonce IS NOT NULL AND storage_header IS NOT NULL AND vault_root IS NOT NULL) OR - (code_commitment IS NULL AND storage IS NULL AND vault IS NULL AND nonce IS NULL) + (code_commitment IS NULL AND nonce IS NULL AND storage_header IS NULL AND vault_root IS NULL) ) ) WITHOUT ROWID; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fd4237593..3db913d648 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -7,7 +7,7 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; use miden_node_proto::generated as proto; use miden_protocol::Word; -use miden_protocol::account::AccountId; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorage}; use miden_protocol::asset::{Asset, AssetVaultKey}; use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; @@ -393,7 +393,7 @@ impl Db { .await } - /// Loads all the account commitments from the DB. + /// TODO marked for removal, replace with paged version #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_all_account_commitments(&self) -> Result> { self.transact("read all account commitments", move |conn| { @@ -402,6 +402,16 @@ impl Db { .await } + /// Returns all account IDs that have public state. + #[allow(dead_code)] // Will be used by InnerForest in next PR + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_public_account_ids(&self) -> Result> { + self.transact("read all public account IDs", move |conn| { + queries::select_all_public_account_ids(conn) + }) + .await + } + /// Loads public account details from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account(&self, id: AccountId) -> Result { @@ -409,19 +419,6 @@ impl Db { .await } - /// Loads account details at a specific block number from the DB. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_historical_account_at( - &self, - id: AccountId, - block_num: BlockNumber, - ) -> Result { - self.transact("Get historical account details", move |conn| { - queries::select_historical_account_at(conn, id, block_num) - }) - .await - } - /// Loads public account details from the DB based on the account ID's prefix. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_network_account_by_prefix( @@ -458,6 +455,64 @@ impl Db { .await } + /// Reconstructs account storage at a specific block from the database + /// + /// This method queries the decomposed storage tables and reconstructs the full + /// `AccountStorage` with SMT backing for Map slots. + // TODO split querying the header from the content + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_storage_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result { + self.transact("Get account storage at block", move |conn| { + queries::select_account_storage_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries vault assets at a specific block + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_vault_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account vault at block", move |conn| { + queries::select_account_vault_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries the account code by its commitment hash. + /// + /// Returns `None` if no code exists with that commitment. + pub async fn select_account_code_by_commitment( + &self, + code_commitment: Word, + ) -> Result>> { + self.transact("Get account code by commitment", move |conn| { + queries::select_account_code_by_commitment(conn, code_commitment) + }) + .await + } + + /// Queries the account header for a specific account at a specific block number. + /// + /// Returns `None` if the account doesn't exist at that block. + pub async fn select_account_header_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account header at block", move |conn| { + queries::select_account_header_at_block(conn, account_id, block_num) + .map(|opt| opt.map(|(header, _storage_header)| header)) + }) + .await + } + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_state_sync( &self, diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 48013b370d..37a9b019fb 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -34,7 +34,7 @@ use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_protocol::Felt; -use miden_protocol::account::StorageSlotName; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; use miden_protocol::block::BlockNumber; use miden_protocol::note::{NoteExecutionMode, NoteTag}; @@ -131,6 +131,33 @@ impl SqlTypeConvert for NoteTag { } } +impl SqlTypeConvert for StorageSlotType { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + + Ok(match raw { + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, + }) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + match self { + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, + } + } +} + impl SqlTypeConvert for StorageSlotName { type Raw = String; @@ -157,9 +184,9 @@ pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { } #[inline(always)] -pub(crate) fn raw_sql_to_nonce(raw: i64) -> u64 { +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { debug_assert!(raw >= 0); - raw as u64 + Felt::new(raw as u64) } #[inline(always)] pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 167ebdd63f..c1ad88d2bc 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -8,8 +9,6 @@ use diesel::{ BoolExpressionMethods, ExpressionMethods, Insertable, - JoinOnDsl, - NullableExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, @@ -24,6 +23,7 @@ use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, }; +use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -31,73 +31,75 @@ use miden_protocol::account::{ AccountDelta, AccountId, AccountStorage, + AccountStorageHeader, NonFungibleDeltaAction, + StorageMap, + StorageSlot, StorageSlotContent, StorageSlotName, + StorageSlotType, }; use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{Felt, Word}; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; +mod at_block; +pub(crate) use at_block::{ + select_account_header_at_block, + select_account_storage_at_block, + select_account_vault_at_block, +}; + +#[cfg(test)] +mod tests; + type StorageMapValueRow = (i64, String, Vec, Vec); -/// Select the latest account details by account id from the DB using the given -/// [`SqliteConnection`]. +// ACCOUNT CODE +// ================================================================================================ + +/// Select account code by its commitment hash from the `account_codes` table. /// /// # Returns /// -/// The latest account details, or an error. +/// The account code bytes if found, or `None` if no code exists with that commitment. /// /// # Raw SQL /// /// ```sql -/// SELECT -/// accounts.account_id, -/// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code -/// FROM -/// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment -/// WHERE -/// account_id = ?1 -/// AND is_latest = 1 +/// SELECT code FROM account_codes WHERE code_commitment = ?1 /// ``` -pub(crate) fn select_account( +pub(crate) fn select_account_code_by_commitment( conn: &mut SqliteConnection, - account_id: AccountId, -) -> Result { - let raw = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), + code_commitment: Word, +) -> Result>, DatabaseError> { + use schema::account_codes; + + let code_commitment_bytes = code_commitment.to_bytes(); + + let result: Option> = SelectDsl::select( + account_codes::table.filter(account_codes::code_commitment.eq(&code_commitment_bytes)), + account_codes::code, ) - .filter(schema::accounts::account_id.eq(account_id.to_bytes())) - .filter(schema::accounts::is_latest.eq(true)) - .get_result::<(AccountRaw, Option>)>(conn) - .optional()? - .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; - let info = AccountWithCodeRawJoined::from(raw).try_into()?; - Ok(info) + .first(conn) + .optional()?; + + Ok(result) } -/// Select account details as they are at the given block height. +// ACCOUNT RETRIEVAL +// ================================================================================================ + +/// Select account by ID from the DB using the given [`SqliteConnection`]. /// /// # Returns /// -/// The account details at the specified block, or an error. +/// The latest account info, or an error. /// /// # Raw SQL /// @@ -105,58 +107,107 @@ pub(crate) fn select_account( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// account_id = ?1 -/// AND block_num <= ?2 -/// ORDER BY -/// block_num DESC -/// LIMIT -/// 1 +/// AND is_latest = 1 /// ``` -pub(crate) fn select_historical_account_at( +pub(crate) fn select_account( conn: &mut SqliteConnection, account_id: AccountId, - block_num: BlockNumber, ) -> Result { - let raw = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter( - schema::accounts::account_id - .eq(account_id.to_bytes()) - .and(schema::accounts::block_num.le(block_num.to_raw_sql())), + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result::(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + + let summary: AccountSummary = raw.try_into()?; + + // Backfill account details from database + // For private accounts, we don't store full details in the database + let details = if account_id.has_public_state() { + Some(select_full_account(conn, account_id)?) + } else { + None + }; + + Ok(AccountInfo { summary, details }) +} + +/// Reconstruct full Account from database tables for the latest account state +/// +/// This function queries the database tables to reconstruct a complete Account object: +/// - Code from `account_codes` table +/// - Nonce and storage header from `accounts` table +/// - Storage map entries from `account_storage_map_values` table +/// - Vault from `account_vault_assets` table +/// +/// # Note +/// +/// A stop-gap solution to retain store API and construct `AccountInfo` types. +/// The function should ultimately be removed, and any queries be served from the +/// `State` which contains an `SmtForest` to serve the latest and most recent +/// historical data. +// TODO: remove eventually once refactoring is complete +fn select_full_account( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + // Get account metadata (nonce, code_commitment) and code in a single join query + let (nonce, code_bytes): (Option, Vec) = SelectDsl::select( + schema::accounts::table.inner_join(schema::account_codes::table), + (schema::accounts::nonce, schema::account_codes::code), ) - .order_by(schema::accounts::block_num.desc()) - .limit(1) - .get_result::<(AccountRaw, Option>)>(conn) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result(conn) .optional()? .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; - let info = AccountWithCodeRawJoined::from(raw).try_into()?; - Ok(info) + + let nonce = raw_sql_to_nonce(nonce.ok_or_else(|| { + DatabaseError::DataCorrupted(format!("No nonce found for account {account_id}")) + })?); + + let code = AccountCode::read_from_bytes(&code_bytes)?; + + // Reconstruct storage using existing helper function + let storage = select_latest_account_storage(conn, account_id)?; + + // Reconstruct vault from account_vault_assets table + let vault_entries: Vec<(Vec, Option>)> = SelectDsl::select( + schema::account_vault_assets::table, + (schema::account_vault_assets::vault_key, schema::account_vault_assets::asset), + ) + .filter(schema::account_vault_assets::account_id.eq(account_id.to_bytes())) + .filter(schema::account_vault_assets::is_latest.eq(true)) + .load(conn)?; + + let mut assets = Vec::new(); + for (_key_bytes, maybe_asset_bytes) in vault_entries { + if let Some(asset_bytes) = maybe_asset_bytes { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + let vault = AssetVault::new(&assets)?; + + Ok(Account::new(account_id, vault, storage, code, nonce, None)?) } -/// Select the latest account details by account ID prefix from the DB using the given -/// [`SqliteConnection`] This method is meant to be used by the network transaction builder. Because -/// network notes get matched through accounts through the account's 30-bit prefix, it is possible -/// that multiple accounts match against a single prefix. In this scenario, the first account is -/// returned. +/// Select the latest account info by account ID prefix from the DB using the given +/// [`SqliteConnection`]. Meant to be used by the network transaction builder. +/// Because network notes get matched through accounts through the account's 30-bit prefix, it is +/// possible that multiple accounts match against a single prefix. In this scenario, the first +/// account is returned. /// /// # Returns /// -/// The latest account details, `None` if the account was not found, or an error. +/// The latest account info, `None` if the account was not found, or an error. /// /// # Raw SQL /// @@ -164,41 +215,34 @@ pub(crate) fn select_historical_account_at( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// network_account_id_prefix = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account_by_id_prefix( conn: &mut SqliteConnection, id_prefix: u32, ) -> Result, DatabaseError> { - let maybe_info = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::is_latest.eq(true)) - .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) - .get_result::<(AccountRaw, Option>)>(conn) - .optional() - .map_err(DatabaseError::Diesel)?; - - let result: Result, DatabaseError> = maybe_info - .map(AccountWithCodeRawJoined::from) - .map(std::convert::TryInto::::try_into) - .transpose(); - - result + let maybe_summary = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) + .get_result::(conn) + .optional() + .map_err(DatabaseError::Diesel)?; + + match maybe_summary { + None => Ok(None), + Some(raw) => { + let summary: AccountSummary = raw.try_into()?; + let account_id = summary.account_id; + // Backfill account details from database + let details = select_full_account(conn, account_id).ok(); + Ok(Some(AccountInfo { summary, details })) + }, + } } /// Select all account commitments from the DB using the given [`SqliteConnection`]. @@ -238,6 +282,48 @@ pub(crate) fn select_all_account_commitments( )) } +/// Select all account IDs that have public state. +/// +/// This filters accounts in-memory after loading only the account IDs (not commitments), +/// which is more efficient than loading full commitments when only IDs are needed. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// account_id +/// FROM +/// accounts +/// WHERE +/// is_latest = 1 +/// ORDER BY +/// block_num ASC +/// ``` +#[allow(dead_code)] // Will be used by InnerForest in next PR +pub(crate) fn select_all_public_account_ids( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + // We could technically use a `LIKE` constraint for both postgres and sqlite backends, + // but diesel doesn't expose that. + let raw: Vec> = + SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::>(conn)?; + + Result::from_iter( + raw.into_iter() + .map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }) + .filter_map(|result| match result { + Ok(id) if id.has_public_state() => Some(Ok(id)), + Ok(_) => None, + Err(e) => Some(Err(e)), + }), + ) +} + /// Select account vault assets within a block range (inclusive). /// /// # Parameters @@ -379,16 +465,11 @@ pub fn select_accounts_by_block_range( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -396,17 +477,23 @@ pub fn select_accounts_by_block_range( pub(crate) fn select_all_accounts( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { - let accounts_raw = QueryDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::is_latest.eq(true)) - .load::<(AccountRaw, Option>)>(conn)?; - let account_infos = vec_raw_try_into::( - accounts_raw.into_iter().map(AccountWithCodeRawJoined::from), - )?; + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::(conn)?; + + let summaries: Vec = vec_raw_try_into(raw)?; + + // Backfill account details from database + let account_infos = summaries + .into_iter() + .map(|summary| { + let account_id = summary.account_id; + let details = select_full_account(conn, account_id).ok(); + AccountInfo { summary, details } + }) + .collect(); + Ok(account_infos) } @@ -616,6 +703,76 @@ pub(crate) fn select_account_storage_map_values( Ok(StorageMapValuesPage { last_block_included, values }) } +/// Select latest account storage by querying `accounts.storage_header` where `is_latest=true` +/// and reconstructing full storage from the header plus map values from +/// `account_storage_map_values`. +pub(crate) fn select_latest_account_storage( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + + // Query storage header blob for this account where is_latest = true + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::is_latest.eq(true)) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all latest map values for this account + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::is_latest.eq(true)) + .load(conn)?; + + // Group map values by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for (slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} + +// ACCOUNT MUTATION +// ================================================================================================ + #[derive(Queryable, Selectable)] #[diesel(table_name = crate::db::schema::account_vault_assets)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] @@ -637,73 +794,6 @@ impl TryFrom for AccountVaultValue { } } -#[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] -#[diesel(table_name = schema::accounts)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct AccountRaw { - pub account_id: Vec, - pub account_commitment: Vec, - pub block_num: i64, - pub storage: Option>, - pub vault: Option>, - pub nonce: Option, -} - -#[derive(Debug, Clone, QueryableByName)] -pub struct AccountWithCodeRawJoined { - #[diesel(embed)] - pub account: AccountRaw, - #[diesel(embed)] - pub code: Option>, -} - -impl From<(AccountRaw, Option>)> for AccountWithCodeRawJoined { - fn from((account, code): (AccountRaw, Option>)) -> Self { - Self { account, code } - } -} - -impl TryInto for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result { - use proto::domain::account::{AccountInfo, AccountSummary}; - - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - let account_commitment = Word::read_from_bytes(&self.account.account_commitment[..])?; - let block_num = BlockNumber::from_raw_sql(self.account.block_num)?; - let summary = AccountSummary { - account_id, - account_commitment, - block_num, - }; - let maybe_account = self.try_into()?; - Ok(AccountInfo { summary, details: maybe_account }) - } -} - -impl TryInto> for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result, Self::Error> { - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - - let details = if let (Some(vault), Some(storage), Some(nonce), Some(code)) = - (self.account.vault, self.account.storage, self.account.nonce, self.code) - { - let vault = AssetVault::read_from_bytes(&vault)?; - let storage = AccountStorage::read_from_bytes(&storage)?; - let code = AccountCode::read_from_bytes(&code)?; - let nonce = raw_sql_to_nonce(nonce); - let nonce = Felt::new(nonce); - let account = Account::new_unchecked(account_id, vault, storage, code, nonce, None); - Some(account) - } else { - // a private account - None - }; - Ok(details) - } -} - #[derive(Debug, Clone, PartialEq, Eq, Selectable, Queryable, QueryableByName)] #[diesel(table_name = schema::accounts)] #[diesel(check_for_backend(Sqlite))] @@ -730,7 +820,7 @@ impl TryInto for AccountSummaryRaw { /// Insert an account vault asset row into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest=true` for the new row and update any existing +/// Sets `is_latest=true` for the new row and updates any existing /// row with the same `(account_id, vault_key)` tuple to `is_latest=false`. /// /// # Returns @@ -770,8 +860,8 @@ pub(crate) fn insert_account_vault_asset( /// Insert an account storage map value into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest=true` for the new row and update any existing -/// row with the same `(account_id, slot, key)` tuple to `is_latest=false`. +/// Sets `is_latest=true` for the new row and updates any existing +/// row with the same `(account_id, slot_index, key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -825,32 +915,6 @@ pub(crate) fn upsert_accounts( ) -> Result { use proto::domain::account::NetworkAccountPrefix; - fn select_details_stmt( - conn: &mut SqliteConnection, - account_id: AccountId, - ) -> Result, DatabaseError> { - let account_id = account_id.to_bytes(); - let accounts = SelectDsl::select( - schema::accounts::table.left_join( - schema::account_codes::table.on(schema::accounts::code_commitment - .eq(schema::account_codes::code_commitment.nullable())), - ), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::account_id.eq(account_id)) - .filter(schema::accounts::is_latest.eq(true)) - .get_results::<(AccountRaw, Option>)>(conn)?; - - // SELECT .. FROM accounts LEFT JOIN account_codes - // ON accounts.code_commitment == account_codes.code_commitment - - let accounts = Result::from_iter(accounts.into_iter().filter_map(|x| { - let account_with_code = AccountWithCodeRawJoined::from(x); - account_with_code.try_into().transpose() - }))?; - Ok(accounts) - } - let mut count = 0; for update in accounts { let account_id = update.account_id(); @@ -923,10 +987,8 @@ pub(crate) fn upsert_accounts( }, AccountUpdateDetails::Delta(delta) => { - let mut rows = select_details_stmt(conn, account_id)?.into_iter(); - let Some(account_before) = rows.next() else { - return Err(DatabaseError::AccountNotFoundInDb(account_id)); - }; + // Reconstruct the full account from database tables + let account = select_full_account(conn, account_id)?; // --- collect storage map updates ---------------------------- @@ -940,8 +1002,7 @@ pub(crate) fn upsert_accounts( // apply delta to the account; we need to do this before we process asset updates // because we currently need to get the current value of fungible assets from the // account - let account_after = - apply_delta(account_before, delta, &update.final_state_commitment())?; + let account_after = apply_delta(account, delta, &update.final_state_commitment())?; // --- process asset updates ---------------------------------- @@ -996,11 +1057,14 @@ pub(crate) fn upsert_accounts( account_commitment: update.final_state_commitment().to_bytes(), block_num: block_num_raw, nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), - storage: full_account.as_ref().map(|account| account.storage().to_bytes()), - vault: full_account.as_ref().map(|account| account.vault().to_bytes()), code_commitment: full_account .as_ref() .map(|account| account.code().commitment().to_bytes()), + // Store only the header (slot metadata + map roots), not full storage with map contents + storage_header: full_account + .as_ref() + .map(|account| account.storage().to_header().to_bytes()), + vault_root: full_account.as_ref().map(|account| account.vault().root().to_bytes()), is_latest: true, created_at_block, }; @@ -1014,7 +1078,6 @@ pub(crate) fn upsert_accounts( insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; } - // insert pending vault-asset entries for (acc_id, vault_key, update) in pending_asset_inserts { insert_account_vault_asset(conn, acc_id, block_num, vault_key, update)?; } @@ -1059,9 +1122,9 @@ pub(crate) struct AccountRowInsert { pub(crate) block_num: i64, pub(crate) account_commitment: Vec, pub(crate) code_commitment: Option>, - pub(crate) storage: Option>, - pub(crate) vault: Option>, pub(crate) nonce: Option, + pub(crate) storage_header: Option>, + pub(crate) vault_root: Option>, pub(crate) is_latest: bool, pub(crate) created_at_block: i64, } diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs new file mode 100644 index 0000000000..dc613a9c61 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -0,0 +1,269 @@ +use std::collections::BTreeMap; + +use diesel::prelude::Queryable; +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ + BoolExpressionMethods, + ExpressionMethods, + OptionalExtension, + QueryDsl, + RunQueryDsl, + SqliteConnection, +}; +use miden_protocol::account::{ + AccountHeader, + AccountId, + AccountStorage, + AccountStorageHeader, + StorageMap, + StorageSlot, + StorageSlotName, + StorageSlotType, +}; +use miden_protocol::asset::Asset; +use miden_protocol::block::BlockNumber; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, FieldElement, Word}; + +use crate::db::models::conv::{SqlTypeConvert, raw_sql_to_nonce}; +use crate::db::schema; +use crate::errors::DatabaseError; + +// ACCOUNT HEADER +// ================================================================================================ + +#[derive(Debug, Clone, Queryable)] +struct AccountHeaderDataRaw { + code_commitment: Option>, + nonce: Option, + storage_header: Option>, + vault_root: Option>, +} + +/// Queries the account header for a specific account at a specific block number. +/// +/// This reconstructs the `AccountHeader` by reading from the `accounts` table: +/// - `account_id`, `nonce`, `code_commitment`, `storage_header`, `vault_root` +/// +/// Returns `None` if the account doesn't exist at that block. +/// +/// # Arguments +/// +/// * `conn` - Database connection +/// * `account_id` - The account ID to query +/// * `block_num` - The block number at which to query the account header +/// +/// # Returns +/// +/// * `Ok(Some(AccountHeader))` - The account header if found +/// * `Ok(None)` - If account doesn't exist at that block +/// * `Err(DatabaseError)` - If there's a database error +pub(crate) fn select_account_header_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::accounts; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + let account_data: Option = SelectDsl::select( + accounts::table + .filter(accounts::account_id.eq(&account_id_bytes)) + .filter(accounts::block_num.le(block_num_sql)) + .order(accounts::block_num.desc()) + .limit(1), + ( + accounts::code_commitment, + accounts::nonce, + accounts::storage_header, + accounts::vault_root, + ), + ) + .first(conn) + .optional()?; + + let Some(AccountHeaderDataRaw { + code_commitment: code_commitment_bytes, + nonce: nonce_raw, + storage_header: storage_header_blob, + vault_root: vault_root_bytes, + }) = account_data + else { + return Ok(None); + }; + + let (storage_commitment, storage_header) = match storage_header_blob { + Some(blob) => { + let header = AccountStorageHeader::read_from_bytes(&blob)?; + let commitment = header.to_commitment(); + (commitment, header) + }, + None => (Word::default(), AccountStorageHeader::new(Vec::new())?), + }; + + let code_commitment = code_commitment_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + let nonce = nonce_raw.map_or(Felt::ZERO, raw_sql_to_nonce); + + let vault_root = vault_root_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + Ok(Some(( + AccountHeader::new(account_id, nonce, vault_root, storage_commitment, code_commitment), + storage_header, + ))) +} + +// ACCOUNT VAULT +// ================================================================================================ + +/// Query vault assets at a specific block by finding the most recent update for each `vault_key`. +pub(crate) fn select_account_vault_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::account_vault_assets as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Since Diesel doesn't support composite keys in subqueries easily, we use a two-step approach: + // Step 1: Get max block_num for each vault_key + let latest_blocks_per_vault_key = Vec::from_iter( + QueryDsl::select( + t::table + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::block_num.le(block_num_sql)) + .group_by(t::vault_key), + (t::vault_key, diesel::dsl::max(t::block_num)), + ) + .load::<(Vec, Option)>(conn)? + .into_iter() + .filter_map(|(key, maybe_block)| maybe_block.map(|block| (key, block))), + ); + + if latest_blocks_per_vault_key.is_empty() { + return Ok(Vec::new()); + } + + // Step 2: Fetch the full rows matching (vault_key, block_num) pairs + let mut assets = Vec::new(); + for (vault_key_bytes, max_block) in latest_blocks_per_vault_key { + // TODO we should not make a query per vault key, but query many at once or + // or find an alternative approach + let result: Option>> = QueryDsl::select( + t::table.filter( + t::account_id + .eq(&account_id_bytes) + .and(t::vault_key.eq(&vault_key_bytes)) + .and(t::block_num.eq(max_block)), + ), + t::asset, + ) + .first(conn) + .optional()?; + if let Some(Some(asset_bytes)) = result { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + // Sort by vault_key for consistent ordering + assets.sort_by_key(Asset::vault_key); + + Ok(assets) +} + +// ACCOUNT STORAGE +// ================================================================================================ + +/// Returns account storage at a given block by reading from `accounts.storage_header` +/// (which contains the `AccountStorageHeader`) and reconstructing full storage from +/// map values in `account_storage_map_values` table. +pub(crate) fn select_account_storage_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Query storage header blob for this account at or before this block + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::block_num.le(block_num_sql)) + .order(schema::accounts::block_num.desc()) + .limit(1) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all map values for this account up to and including this block. + // For each (slot_name, key), we need the latest value at or before block_num. + // First, get all entries up to block_num + let map_values: Vec<(i64, String, Vec, Vec)> = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) + .load(conn)?; + + // For each (slot_name, key) pair, keep only the latest entry (highest block_num) + let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + + for (_, slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + + // Only insert if we haven't seen this (slot_name, key) yet + // (since results are ordered by block_num desc, first one is latest) + latest_map_entries.entry((slot_name, key)).or_insert(value); + } + + // Group entries by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for ((slot_name, key), value) in latest_map_entries { + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs new file mode 100644 index 0000000000..67eb24c1ff --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -0,0 +1,552 @@ +//! Tests for the `accounts` module, specifically for account storage and historical queries. + +use diesel::query_dsl::methods::SelectDsl; +use diesel::{Connection, OptionalExtension, QueryDsl, RunQueryDsl}; +use diesel_migrations::MigrationHarness; +use miden_node_utils::fee::test_fee_params; +use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ + Account, + AccountBuilder, + AccountComponent, + AccountDelta, + AccountId, + AccountIdVersion, + AccountStorageMode, + AccountType, + StorageSlot, + StorageSlotName, +}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Serializable; +use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::code_builder::CodeBuilder; + +use super::*; +use crate::db::migrations::MIGRATIONS; + +fn setup_test_db() -> SqliteConnection { + let mut conn = + SqliteConnection::establish(":memory:").expect("Failed to create in-memory database"); + + conn.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations"); + + conn +} + +fn create_test_account_with_storage() -> (Account, AccountId) { + // Create a simple public account with one value storage slot + let account_id = AccountId::dummy( + [1u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let storage_value = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let component_storage = vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + (account, account_id) +} + +fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { + use crate::db::schema::block_headers; + + let block_header = BlockHeader::new( + 1_u8.into(), + Word::default(), + block_num, + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + SecretKey::new().public_key(), + test_fee_params(), + 0_u8.into(), + ); + + diesel::insert_into(block_headers::table) + .values(( + block_headers::block_num.eq(i64::from(block_num.as_u32())), + block_headers::block_header.eq(block_header.to_bytes()), + )) + .execute(conn) + .expect("Failed to insert block header"); +} + +// ACCOUNT HEADER AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_header_at_block_returns_none_for_nonexistent() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let account_id = AccountId::dummy( + [99u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + // Query for a non-existent account + let result = select_account_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(result.is_none(), "Should return None for non-existent account"); +} + +#[test] +fn test_select_account_header_at_block_returns_correct_header() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query the account header + let (header, _storage_header) = + select_account_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed") + .expect("Header should exist"); + + assert_eq!(header.id(), account_id, "Account ID should match"); + assert_eq!(header.nonce(), account.nonce(), "Nonce should match"); + assert_eq!( + header.code_commitment(), + account.code().commitment(), + "Code commitment should match" + ); +} + +#[test] +fn test_select_account_header_at_block_historical_query() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Insert the account at block 1 + let nonce_1 = account.nonce(); + let delta_1 = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_1 = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Query at block 1 - should return the account + let (header_1, _) = select_account_header_at_block(&mut conn, account_id, block_num_1) + .expect("Query should succeed") + .expect("Header should exist at block 1"); + + assert_eq!(header_1.nonce(), nonce_1, "Nonce at block 1 should match"); + + // Query at block 2 - should return the same account (most recent before block 2) + let (header_2, _) = select_account_header_at_block(&mut conn, account_id, block_num_2) + .expect("Query should succeed") + .expect("Header should exist at block 2"); + + assert_eq!(header_2.nonce(), nonce_1, "Nonce at block 2 should match block 1"); +} + +// ACCOUNT VAULT AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_vault_at_block_empty() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert account without vault assets + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query vault - should return empty (the test account has no assets) + let assets = select_account_vault_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(assets.is_empty(), "Account should have no assets"); +} + +// ACCOUNT STORAGE AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_storage_at_block_returns_storage() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let original_storage_commitment = account.storage().to_commitment(); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query storage + let storage = select_account_storage_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert_eq!( + storage.to_commitment(), + original_storage_commitment, + "Storage commitment should match" + ); +} + +#[test] +fn test_upsert_accounts_inserts_storage_header() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment_original = account.storage().to_commitment(); + let storage_slots_len = account.storage().slots().len(); + let account_commitment = account.commitment(); + + // Create full state delta from the account + let delta = AccountDelta::try_from(account).unwrap(); + assert!(delta.is_full_state(), "Delta should be full state"); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + // Upsert account + let result = upsert_accounts(&mut conn, &[account_update], block_num); + assert!(result.is_ok(), "upsert_accounts failed: {:?}", result.err()); + assert_eq!(result.unwrap(), 1, "Expected 1 account to be inserted"); + + // Query storage header back + let queried_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query storage header"); + + // Verify storage commitment matches + assert_eq!( + queried_storage.to_commitment(), + storage_commitment_original, + "Storage commitment mismatch" + ); + + // Verify number of slots matches + assert_eq!(queried_storage.slots().len(), storage_slots_len, "Storage slots count mismatch"); + + // Verify exactly 1 latest account with storage exists + let header_count: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::storage_header.is_not_null()) + .count() + .get_result(&mut conn) + .expect("Failed to count accounts with storage"); + + assert_eq!(header_count, 1, "Expected exactly 1 latest account with storage"); +} + +#[test] +fn test_upsert_accounts_updates_is_latest_flag() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 and 2 + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Save storage commitment before moving account + let storage_commitment_1 = account.storage().to_commitment(); + let account_commitment_1 = account.commitment(); + + // First update with original account - full state delta + let delta_1 = AccountDelta::try_from(account).unwrap(); + + let account_update_1 = BlockAccountUpdate::new( + account_id, + account_commitment_1, + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Create modified account with different storage value + let storage_value_modified = + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]); + let component_storage_modified = + vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value_modified)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component_2 = AccountComponent::new(account_component_code, component_storage_modified) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account_2 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component_2) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_commitment_2 = account_2.storage().to_commitment(); + let account_commitment_2 = account_2.commitment(); + + // Second update with modified account - full state delta + let delta_2 = AccountDelta::try_from(account_2).unwrap(); + + let account_update_2 = BlockAccountUpdate::new( + account_id, + account_commitment_2, + AccountUpdateDetails::Delta(delta_2), + ); + + upsert_accounts(&mut conn, &[account_update_2], block_num_2).expect("Second upsert failed"); + + // Verify 2 total account rows exist (both historical records) + let total_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .count() + .get_result(&mut conn) + .expect("Failed to count total accounts"); + + assert_eq!(total_accounts, 2, "Expected 2 total account records"); + + // Verify only 1 is marked as latest + let latest_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .count() + .get_result(&mut conn) + .expect("Failed to count latest accounts"); + + assert_eq!(latest_accounts, 1, "Expected exactly 1 latest account"); + + // Verify latest storage matches second update + let latest_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query latest storage"); + + assert_eq!( + latest_storage.to_commitment(), + storage_commitment_2, + "Latest storage should match second update" + ); + + // Verify historical query returns first update + let storage_at_block_1 = select_account_storage_at_block(&mut conn, account_id, block_num_1) + .expect("Failed to query storage at block 1"); + + assert_eq!( + storage_at_block_1.to_commitment(), + storage_commitment_1, + "Storage at block 1 should match first update" + ); +} + +#[test] +fn test_upsert_accounts_with_multiple_storage_slots() { + let mut conn = setup_test_db(); + + // Create account with 3 storage slots + let account_id = AccountId::dummy( + [2u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let slot_value_1 = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let slot_value_2 = Word::from([Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)]); + let slot_value_3 = Word::from([Felt::new(9), Felt::new(10), Felt::new(11), Felt::new(12)]); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), slot_value_1), + StorageSlot::with_value(StorageSlotName::mock(1), slot_value_2), + StorageSlot::with_value(StorageSlotName::mock(2), slot_value_3), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with multiple storage slots failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch" + ); + + // Note: Auth component adds 1 storage slot, so 3 component slots + 1 auth = 4 total + assert_eq!( + queried_storage.slots().len(), + 4, + "Expected 4 storage slots (3 component + 1 auth)" + ); + + // The storage commitment matching proves that all values are correctly preserved. + // We don't check individual slot values by index since slot ordering may vary. +} + +#[test] +fn test_upsert_accounts_with_empty_storage() { + let mut conn = setup_test_db(); + + // Create account with no component storage slots (only auth slot) + let account_id = AccountId::dummy( + [3u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with empty storage failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch for empty storage" + ); + + // Note: Auth component adds 1 storage slot, so even "empty" accounts have 1 slot + assert_eq!(queried_storage.slots().len(), 1, "Expected 1 storage slot (auth component)"); + + // Verify the storage header blob exists in database + let storage_header_exists: Option = SelectDsl::select( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)), + schema::accounts::storage_header.is_not_null(), + ) + .first(&mut conn) + .optional() + .expect("Failed to check storage header existence"); + + assert_eq!( + storage_header_exists, + Some(true), + "Storage header blob should exist even for empty storage" + ); +} diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 4c021ef953..6bf6af3cf6 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -27,9 +27,9 @@ diesel::table! { network_account_id_prefix -> Nullable, account_commitment -> Binary, code_commitment -> Nullable, - storage -> Nullable, - vault -> Nullable, nonce -> Nullable, + storage_header -> Nullable, + vault_root -> Nullable, block_num -> BigInt, is_latest -> Bool, created_at_block -> BigInt, diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 413f8a5242..3988e160da 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -12,6 +12,7 @@ use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, AccountBuilder, + AccountCode, AccountComponent, AccountDelta, AccountId, @@ -21,6 +22,7 @@ use miden_protocol::account::{ AccountType, AccountVaultDelta, StorageSlot, + StorageSlotContent, StorageSlotDelta, StorageSlotName, }; @@ -61,6 +63,7 @@ use miden_protocol::transaction::{ TransactionHeader, TransactionId, }; +use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word, ZERO}; use miden_standards::account::auth::AuthRpoFalcon512; use miden_standards::code_builder::CodeBuilder; @@ -464,27 +467,25 @@ fn sql_unconsumed_network_notes() { create_block(&mut conn, 1.into()); // Create an unconsumed note in each block. - let notes = (0..2) - .map(|i: u32| { - let note = NoteRecord { - block_num: 0.into(), // Created on same block. - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i.into()), - note_commitment: num_to_word(i.into()), - metadata: NoteMetadata::new( - account_note.0, - NoteType::Public, - NoteTag::from_account_id(account_note.0), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: None, - inclusion_path: SparseMerklePath::default(), - }; - (note, Some(num_to_nullifier(i.into()))) - }) - .collect::>(); + let notes = Vec::from_iter((0..2).map(|i: u32| { + let note = NoteRecord { + block_num: 0.into(), // Created on same block. + note_index: BlockNoteIndex::new(0, i as usize).unwrap(), + note_id: num_to_word(i.into()), + note_commitment: num_to_word(i.into()), + metadata: NoteMetadata::new( + account_note.0, + NoteType::Public, + NoteTag::from_account_id(account_note.0), + NoteExecutionHint::none(), + Felt::default(), + ) + .unwrap(), + details: None, + inclusion_path: SparseMerklePath::default(), + }; + (note, Some(num_to_nullifier(i.into()))) + })); queries::insert_scripts(&mut conn, notes.iter().map(|(note, _)| note)).unwrap(); queries::insert_notes(&mut conn, ¬es).unwrap(); @@ -1165,8 +1166,7 @@ fn sql_account_storage_map_values_insertion() { let mut map1 = StorageMapDelta::default(); map1.insert(key1, value1); map1.insert(key2, value2); - let delta1: BTreeMap<_, _> = - [(slot_name.clone(), StorageSlotDelta::Map(map1))].into_iter().collect(); + let delta1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map1))]); let storage1 = AccountStorageDelta::from_raw(delta1); let delta1 = AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); @@ -1326,6 +1326,30 @@ fn mock_block_account_update(account_id: AccountId, num: u64) -> BlockAccountUpd BlockAccountUpdate::new(account_id, num_to_word(num), AccountUpdateDetails::Private) } +// Helper function to create account with specific code for tests +fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", code_str) + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountUpdatableCode); + + AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap() +} + fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader { let initial_state_commitment = Word::try_from([num, 0, 0, 0]).unwrap(); let final_account_commitment = Word::try_from([0, num, 0, 0]).unwrap(); @@ -1428,6 +1452,137 @@ fn mock_account_code_and_storage( .unwrap() } +// ACCOUNT CODE TESTS +// ================================================================================================ + +#[test] +fn test_select_account_code_by_commitment() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + + // Create block 1 + create_block(&mut conn, block_num_1); + + // Create an account with code at block 1 using the existing mock function + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + // Get the code commitment and bytes before inserting + let code_commitment = account.code().commitment(); + let expected_code = account.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account.id(), + account.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Query code by commitment - should return the code + let code = queries::select_account_code_by_commitment(&mut conn, code_commitment) + .unwrap() + .expect("Code should exist"); + assert_eq!(code, expected_code); + + // Query code for non-existent commitment - should return None + let non_existent_commitment = [0u8; 32]; + let non_existent_commitment = Word::read_from_bytes(&non_existent_commitment).unwrap(); + let code_other = + queries::select_account_code_by_commitment(&mut conn, non_existent_commitment).unwrap(); + assert!(code_other.is_none(), "Code should not exist for non-existent commitment"); +} + +#[test] +fn test_select_account_code_by_commitment_multiple_codes() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + let block_num_2 = BlockNumber::from(2); + + // Create blocks + create_block(&mut conn, block_num_1); + create_block(&mut conn, block_num_2); + + // Create account with code v1 at block 1 + let code_v1_str = "\ + pub proc account_procedure_1 + push.1.2 + add + end + "; + let account_v1 = create_account_with_code(code_v1_str, [1u8; 32]); + let code_v1_commitment = account_v1.code().commitment(); + let code_v1 = account_v1.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v1.id(), + account_v1.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v1).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Create account with different code v2 at block 2 + let code_v2_str = "\ + pub proc account_procedure_1 + push.3.4 + mul + end + "; + let account_v2 = create_account_with_code(code_v2_str, [1u8; 32]); // Same seed to keep same account_id + let code_v2_commitment = account_v2.code().commitment(); + let code_v2 = account_v2.code().to_bytes(); + + // Verify that the codes are actually different + assert_ne!( + code_v1, code_v2, + "Test setup error: codes should be different for different code strings" + ); + assert_ne!( + code_v1_commitment, code_v2_commitment, + "Test setup error: code commitments should be different" + ); + + // Insert the updated account at block 2 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v2.id(), + account_v2.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v2).unwrap()), + )], + block_num_2, + ) + .unwrap(); + + // Both codes should be retrievable by their respective commitments + let code_from_v1_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v1_commitment) + .unwrap() + .expect("v1 code should exist"); + assert_eq!(code_from_v1_commitment, code_v1, "v1 commitment should return v1 code"); + + let code_from_v2_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v2_commitment) + .unwrap() + .expect("v2 code should exist"); + assert_eq!(code_from_v2_commitment, code_v2, "v2 commitment should return v2 code"); +} + // GENESIS REGRESSION TESTS // ================================================================================================ @@ -1682,3 +1837,514 @@ fn regression_1461_full_state_delta_inserts_vault_assets() { assert_eq!(vault_asset.asset, Some(expected_asset)); assert_eq!(vault_asset.vault_key, expected_asset.vault_key()); } + +// SERIALIZATION SYMMETRY TESTS +// ================================================================================================ +// +// These tests ensure that `to_bytes` and `from_bytes`/`read_from_bytes` are symmetric for all +// types used in database operations. This guarantees that data inserted into the database can +// always be correctly retrieved. + +#[test] +fn serialization_symmetry_core_types() { + // AccountId + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + let bytes = account_id.to_bytes(); + let restored = AccountId::read_from_bytes(&bytes).unwrap(); + assert_eq!(account_id, restored, "AccountId serialization must be symmetric"); + + // Word + let word = num_to_word(0x1234_5678_9ABC_DEF0); + let bytes = word.to_bytes(); + let restored = Word::read_from_bytes(&bytes).unwrap(); + assert_eq!(word, restored, "Word serialization must be symmetric"); + + // Nullifier + let nullifier = num_to_nullifier(0xDEAD_BEEF); + let bytes = nullifier.to_bytes(); + let restored = Nullifier::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifier, restored, "Nullifier serialization must be symmetric"); + + // TransactionId + let tx_id = TransactionId::new(num_to_word(1), num_to_word(2), num_to_word(3), num_to_word(4)); + let bytes = tx_id.to_bytes(); + let restored = TransactionId::read_from_bytes(&bytes).unwrap(); + assert_eq!(tx_id, restored, "TransactionId serialization must be symmetric"); + + // NoteId + let note_id = NoteId::new(num_to_word(1), num_to_word(2)); + let bytes = note_id.to_bytes(); + let restored = NoteId::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_id, restored, "NoteId serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_block_header() { + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + 3.into(), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + let bytes = block_header.to_bytes(); + let restored = BlockHeader::read_from_bytes(&bytes).unwrap(); + assert_eq!(block_header, restored, "BlockHeader serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_assets() { + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // FungibleAsset + let fungible = FungibleAsset::new(faucet_id, 1000).unwrap(); + let asset: Asset = fungible.into(); + let bytes = asset.to_bytes(); + let restored = Asset::read_from_bytes(&bytes).unwrap(); + assert_eq!(asset, restored, "Asset (fungible) serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_account_code() { + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + let code = account.code(); + let bytes = code.to_bytes(); + let restored = AccountCode::read_from_bytes(&bytes).unwrap(); + assert_eq!(*code, restored, "AccountCode serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_sparse_merkle_path() { + let path = SparseMerklePath::default(); + let bytes = path.to_bytes(); + let restored = SparseMerklePath::read_from_bytes(&bytes).unwrap(); + assert_eq!(path, restored, "SparseMerklePath serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_metadata() { + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type + // bits + let tag = NoteTag::from_account_id(sender); + let metadata = NoteMetadata::new( + sender, + NoteType::Public, + tag, + NoteExecutionHint::always(), + Felt::new(42), + ) + .unwrap(); + + let bytes = metadata.to_bytes(); + let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); + assert_eq!(metadata, restored, "NoteMetadata serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_nullifier_vec() { + let nullifiers: Vec = (0..5).map(num_to_nullifier).collect(); + let bytes = nullifiers.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifiers, restored, "Vec serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_id_vec() { + let note_ids: Vec = + (0..5).map(|i| NoteId::new(num_to_word(i), num_to_word(i + 100))).collect(); + let bytes = note_ids.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_ids, restored, "Vec serialization must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_block_header() { + let mut conn = create_db(); + + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + BlockNumber::from(42), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + // Insert + queries::insert_block_header(&mut conn, &block_header).unwrap(); + + // Retrieve + let retrieved = + queries::select_block_header_by_block_num(&mut conn, Some(block_header.block_num())) + .unwrap() + .expect("Block header should exist"); + + assert_eq!(block_header, retrieved, "BlockHeader DB roundtrip must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_nullifiers() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let nullifiers: Vec = (0..5).map(|i| num_to_nullifier(i << 48)).collect(); + + // Insert + queries::insert_nullifiers_for_block(&mut conn, &nullifiers, block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_nullifiers(&mut conn).unwrap(); + + assert_eq!(nullifiers.len(), retrieved.len(), "Should retrieve same number of nullifiers"); + for (orig, info) in nullifiers.iter().zip(retrieved.iter()) { + assert_eq!(*orig, info.nullifier, "Nullifier DB roundtrip must be symmetric"); + assert_eq!(block_num, info.block_num, "Block number must match"); + } +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + Some([99u8; 32]), + ); + let account_id = account.id(); + let account_commitment = account.commitment(); + + // Insert with full delta (like genesis) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account_commitment, + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_accounts(&mut conn).unwrap(); + assert_eq!(retrieved.len(), 1, "Should have one account"); + + let retrieved_info = &retrieved[0]; + assert_eq!( + retrieved_info.summary.account_id, account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + retrieved_info.summary.account_commitment, account_commitment, + "Account commitment DB roundtrip must be symmetric" + ); + assert_eq!(retrieved_info.summary.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_notes() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(sender, 0)], block_num) + .unwrap(); + + let new_note = create_note(sender); + let note_index = BlockNoteIndex::new(0, 0).unwrap(); + + let note = NoteRecord { + block_num, + note_index, + note_id: new_note.id().as_word(), + note_commitment: new_note.commitment(), + metadata: *new_note.metadata(), + details: Some(NoteDetails::from(&new_note)), + inclusion_path: SparseMerklePath::default(), + }; + + // Insert + queries::insert_scripts(&mut conn, [¬e]).unwrap(); + queries::insert_notes(&mut conn, &[(note.clone(), None)]).unwrap(); + + // Retrieve + let note_ids = vec![NoteId::from_raw(note.note_id)]; + let retrieved = queries::select_notes_by_id(&mut conn, ¬e_ids).unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one note"); + let retrieved_note = &retrieved[0]; + + assert_eq!(note.note_id, retrieved_note.note_id, "NoteId DB roundtrip must be symmetric"); + assert_eq!( + note.note_commitment, retrieved_note.note_commitment, + "Note commitment DB roundtrip must be symmetric" + ); + assert_eq!( + note.metadata, retrieved_note.metadata, + "Metadata DB roundtrip must be symmetric" + ); + assert_eq!( + note.inclusion_path, retrieved_note.inclusion_path, + "Inclusion path DB roundtrip must be symmetric" + ); + assert_eq!( + note.details, retrieved_note.details, + "Note details DB roundtrip must be symmetric" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) + .unwrap(); + + let tx = mock_block_transaction(account_id, 1); + let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); + + // Insert + queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); + + // Retrieve + let retrieved = queries::select_transactions_by_accounts_and_block_range( + &mut conn, + &[account_id], + BlockNumber::from(0)..=BlockNumber::from(2), + ) + .unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one transaction"); + let retrieved_tx = &retrieved[0]; + + assert_eq!( + tx.account_id(), + retrieved_tx.account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + tx.id(), + retrieved_tx.transaction_id, + "TransactionId DB roundtrip must be symmetric" + ); + assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_vault_assets() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + // Create account first + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); + + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + let asset: Asset = fungible_asset.into(); + let vault_key = asset.vault_key(); + + // Insert vault asset + queries::insert_account_vault_asset(&mut conn, account_id, block_num, vault_key, Some(asset)) + .unwrap(); + + // Retrieve + let (_, vault_assets) = queries::select_account_vault_assets( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(vault_assets.len(), 1, "Should have one vault asset"); + let retrieved = &vault_assets[0]; + + assert_eq!(retrieved.asset, Some(asset), "Asset DB roundtrip must be symmetric"); + assert_eq!(retrieved.vault_key, vault_key, "VaultKey DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_storage_map_values() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(5); + let key = num_to_word(12345); + let value = num_to_word(67890); + + // Insert + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block_num, + slot_name.clone(), + key, + value, + ) + .unwrap(); + + // Retrieve + let page = queries::select_account_storage_map_values( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(page.values.len(), 1, "Should have one storage map value"); + let retrieved = &page.values[0]; + + assert_eq!(retrieved.slot_name, slot_name, "StorageSlotName DB roundtrip must be symmetric"); + assert_eq!(retrieved.key, key, "Key (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.value, value, "Value (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account_storage_with_maps() { + use miden_protocol::account::StorageMap; + + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + // Create storage with both value slots and map slots + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), num_to_word(42)), + StorageSlot::with_map(StorageSlotName::mock(1), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(2)), + ]; + + let component_code = "pub proc foo push.1 end"; + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([50u8; 32]) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let account_id = account.id(); + let original_storage = account.storage().clone(); + let original_commitment = original_storage.to_commitment(); + + // Insert the account (this should store header + map values separately) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve the storage using select_latest_account_storage (reconstructs from header + map + // values) + let retrieved_storage = queries::select_latest_account_storage(&mut conn, account_id).unwrap(); + let retrieved_commitment = retrieved_storage.to_commitment(); + + // Verify the commitment matches (this proves the reconstruction is correct) + assert_eq!( + original_commitment, retrieved_commitment, + "Storage commitment must match after DB roundtrip" + ); + + // Verify slot count matches + assert_eq!( + original_storage.slots().len(), + retrieved_storage.slots().len(), + "Number of slots must match" + ); + + // Verify each slot + for (original_slot, retrieved_slot) in + original_storage.slots().iter().zip(retrieved_storage.slots().iter()) + { + assert_eq!(original_slot.name(), retrieved_slot.name(), "Slot names must match"); + assert_eq!(original_slot.slot_type(), retrieved_slot.slot_type(), "Slot types must match"); + + match (original_slot.content(), retrieved_slot.content()) { + (StorageSlotContent::Value(orig), StorageSlotContent::Value(retr)) => { + assert_eq!(orig, retr, "Value slot contents must match"); + }, + (StorageSlotContent::Map(orig_map), StorageSlotContent::Map(retr_map)) => { + assert_eq!(orig_map.root(), retr_map.root(), "Map slot roots must match"); + for (key, value) in orig_map.entries() { + let retrieved_value = retr_map.get(key); + assert_eq!(*value, retrieved_value, "Map entry for key {:?} must match", key); + } + }, + // The slot_type assertion above guarantees matching variants, so this is unreachable + _ => unreachable!(), + } + } + + // Also verify full account reconstruction via select_account (which calls select_full_account) + let account_info = queries::select_account(&mut conn, account_id).unwrap(); + assert!(account_info.details.is_some(), "Public account should have details"); + let retrieved_account = account_info.details.unwrap(); + assert_eq!( + account.commitment(), + retrieved_account.commitment(), + "Full account commitment must match after DB roundtrip" + ); +} diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index abe80e8c3a..7ac836ed3f 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -8,9 +8,10 @@ use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_node_utils::limiter::QueryLimitError; use miden_protocol::account::AccountId; use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::MerkleError; use miden_protocol::crypto::merkle::mmr::MmrError; use miden_protocol::crypto::utils::DeserializationError; -use miden_protocol::note::Nullifier; +use miden_protocol::note::{NoteId, Nullifier}; use miden_protocol::transaction::OutputNote; use miden_protocol::{ AccountDeltaError, @@ -21,6 +22,7 @@ use miden_protocol::{ FeeError, NoteError, NullifierTreeError, + StorageMapError, Word, }; use thiserror::Error; @@ -56,11 +58,13 @@ pub enum DatabaseError { #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] - MerkleError(#[from] miden_protocol::crypto::merkle::MerkleError), + MerkleError(#[from] MerkleError), #[error("network account error")] NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), + #[error("storage map error")] + StorageMapError(#[from] StorageMapError), #[error("setup deadpool connection pool failed")] Deadpool(#[from] deadpool::managed::PoolError), #[error("setup deadpool connection pool failed")] @@ -98,16 +102,18 @@ pub enum DatabaseError { AccountNotFoundInDb(AccountId), #[error("account {0} state at block height {1} not found")] AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), + #[error("block {0} not found in database")] + BlockNotFound(BlockNumber), #[error("historical block {block_num} not available: {reason}")] HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), - #[error("account {0} details missing")] - AccountDetailsMissing(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, + #[error("invalid storage slot type: {0}")] + InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), #[error("SQLite pool interaction failed: {0}")] @@ -177,6 +183,8 @@ impl From for Status { pub enum StateInitializationError { #[error("account tree IO error: {0}")] AccountTreeIoError(String), + #[error("nullifier tree IO error: {0}")] + NullifierTreeIoError(String), #[error("database error")] DatabaseError(#[from] DatabaseError), #[error("failed to create nullifier tree")] @@ -250,6 +258,8 @@ pub enum InvalidBlockError { NewBlockNullifierAlreadySpent(#[source] NullifierTreeError), #[error("duplicate account ID prefix in new block")] NewBlockDuplicateAccountIdPrefix(#[source] AccountTreeError), + #[error("failed to build note tree: {0}")] + FailedToBuildNoteTree(String), } #[derive(Error, Debug)] @@ -463,9 +473,9 @@ pub enum GetNotesByIdError { #[error("malformed note ID")] DeserializationFailed(#[from] ConversionError), #[error("note {0} not found")] - NoteNotFound(miden_protocol::note::NoteId), + NoteNotFound(NoteId), #[error("note {0} is not public")] - NoteNotPublic(miden_protocol::note::NoteId), + NoteNotPublic(NoteId), } // GET NOTE SCRIPT BY ROOT ERRORS diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 02e3bb60d9..830939305a 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -23,11 +23,11 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; -use miden_protocol::account::{AccountHeader, AccountId, StorageSlot, StorageSlotContent}; +use miden_protocol::account::{AccountId, StorageSlotContent}; use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; -use miden_protocol::crypto::merkle::mmr::{Forest, Mmr, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{ LargeSmt, LargeSmtError, @@ -100,7 +100,10 @@ where } } -/// The rollup state +// CHAIN STATE +// ================================================================================================ + +/// The rollup state. pub struct State { /// The database which stores block headers, nullifiers, notes, and the latest states of /// accounts. @@ -120,6 +123,9 @@ pub struct State { } impl State { + // CONSTRUCTOR + // -------------------------------------------------------------------------------------------- + /// Loads the state from the `db`. #[instrument(target = COMPONENT, skip_all)] pub async fn load(data_path: &Path) -> Result { @@ -136,21 +142,12 @@ impl State { .await .map_err(StateInitializationError::DatabaseLoadError)?; - let chain_mmr = load_mmr(&mut db).await?; - let block_headers = db.select_all_block_headers().await?; - let latest_block_num = block_headers - .last() - .map_or(BlockNumber::GENESIS, miden_protocol::block::BlockHeader::block_num); + let blockchain = load_mmr(&mut db).await?; + let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); let account_tree = load_account_tree(&mut db, latest_block_num).await?; let nullifier_tree = load_nullifier_tree(&mut db).await?; - let inner = RwLock::new(InnerState { - nullifier_tree, - // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX - // entries. - blockchain: Blockchain::from_mmr_unchecked(chain_mmr), - account_tree, - }); + let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); let writer = Mutex::new(()); let db = Arc::new(db); @@ -158,6 +155,9 @@ impl State { Ok(Self { db, block_store, inner, writer }) } + // STATE MUTATOR + // -------------------------------------------------------------------------------------------- + /// Apply changes of a new block to the DB and in-memory data structures. /// /// ## Note on state consistency @@ -200,7 +200,7 @@ impl State { } let block_num = header.block_num(); - let block_commitment = block.header().commitment(); + let block_commitment = header.commitment(); // ensures the right block header is being processed let prev_block = self @@ -249,7 +249,7 @@ impl State { .body() .created_nullifiers() .iter() - .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some()) + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) .copied() .collect(); if !duplicate_nullifiers.is_empty() { @@ -418,6 +418,9 @@ impl State { Ok(()) } + // STATE ACCESSORS + // -------------------------------------------------------------------------------------------- + /// Queries a [BlockHeader] from the database, and returns it alongside its inclusion proof. /// /// If [None] is given as the value of `block_num`, the data for the latest [BlockHeader] is @@ -936,7 +939,7 @@ impl State { return Err(DatabaseError::AccountNotPublic(account_id)); } - let (block_num, witness) = self.get_block_witness(block_num, account_id).await?; + let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; let details = if let Some(request) = details { Some(self.fetch_public_account_details(account_id, block_num, request).await?) @@ -951,7 +954,7 @@ impl State { /// /// If `block_num` is provided, returns the witness at that historical block, /// if not present, returns the witness at the latest block. - async fn get_block_witness( + async fn get_account_witness( &self, block_num: Option, account_id: AccountId, @@ -997,67 +1000,71 @@ impl State { storage_requests, } = detail_request; - let account_info = self.db.select_historical_account_at(account_id, block_num).await?; + if !account_id.has_public_state() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } - // If we get a query for a public account but the details are missing from the database, - // it indicates an inconsistent state in the database. - let Some(account) = account_info.details else { - return Err(DatabaseError::AccountDetailsMissing(account_id)); + // Validate block exists in the blockchain before querying the database + self.validate_block_exists(block_num).await?; + + let account_header = self + .db + .select_account_header_at_block(account_id, block_num) + .await? + .ok_or_else(|| DatabaseError::AccountNotPublic(account_id))?; + + let account_code = match code_commitment { + Some(commitment) if commitment == account_header.code_commitment() => None, + Some(_) => { + self.db + .select_account_code_by_commitment(account_header.code_commitment()) + .await? + }, + None => None, }; - let storage_header = account.storage().to_header(); + let vault_details = match asset_vault_commitment { + Some(commitment) if commitment == account_header.vault_root() => { + AccountVaultDetails::empty() + }, + Some(_) => { + let vault_assets = + self.db.select_account_vault_at_block(account_id, block_num).await?; + AccountVaultDetails::from_assets(vault_assets) + }, + None => AccountVaultDetails::empty(), + }; + // TODO: don't load the entire storage at once, load what is required + let storage = self.db.select_account_storage_at_block(account_id, block_num).await?; + let storage_header = storage.to_header(); let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); for StorageMapRequest { slot_name, slot_data } in storage_requests { - let Some(StorageSlotContent::Map(storage_map)) = - account.storage().get(&slot_name).map(StorageSlot::content) - else { - return Err(AccountError::StorageSlotNotMap(slot_name).into()); + let Some(slot) = storage.slots().iter().find(|s| s.name() == &slot_name) else { + continue; }; + + let storage_map = match slot.content() { + StorageSlotContent::Map(map) => map, + StorageSlotContent::Value(_) => { + return Err(AccountError::StorageSlotNotMap(slot_name).into()); + }, + }; + let details = AccountStorageMapDetails::new(slot_name, slot_data, storage_map); storage_map_details.push(details); } - // Only include unknown account code blobs, which is equal to a account code digest - // mismatch. If `None` was requested, don't return any. - let account_code = code_commitment - .is_some_and(|code_commitment| code_commitment != account.code().commitment()) - .then(|| account.code().to_bytes()); - - // storage details - let storage_details = AccountStorageDetails { - header: storage_header, - map_details: storage_map_details, - }; - - // Handle vault details based on the `asset_vault_commitment`. - // Similar to `code_commitment`, if the provided commitment matches, we don't return - // vault data. If no commitment is provided or it doesn't match, we return - // the vault data. If the number of vault contained assets are exceeding a - // limit, we signal this back in the response and the user must handle that - // in follow-up request. - let vault_details = match asset_vault_commitment { - Some(commitment) if commitment == account.vault().root() => { - // The client already has the correct vault data - AccountVaultDetails::empty() - }, - Some(_) => { - // The commitment doesn't match, so return vault data - AccountVaultDetails::new(account.vault()) - }, - None => { - // No commitment provided, so don't return vault data - AccountVaultDetails::empty() - }, - }; - Ok(AccountDetails { - account_header: AccountHeader::from(account), + account_header, account_code, vault_details, - storage_details, + storage_details: AccountStorageDetails { + header: storage_header, + map_details: storage_map_details, + }, }) } @@ -1086,6 +1093,26 @@ impl State { self.inner.read().await.latest_block_num() } + /// Validates that a block exists in the blockchain + /// + /// # Attention + /// + /// Acquires a *read lock** on `self.inner`. + /// + /// # Errors + /// + /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. + async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(DatabaseError::BlockNotFound(block_num)); + } + + Ok(()) + } + /// Runs database optimization. pub async fn optimize_db(&self) -> Result<(), DatabaseError> { self.db.optimize().await @@ -1132,9 +1159,25 @@ impl State { } } -// UTILITIES +// INNER STATE LOADING // ================================================================================================ +#[instrument(level = "info", target = COMPONENT, skip_all)] +async fn load_mmr(db: &mut Db) -> Result { + let block_commitments: Vec = db + .select_all_block_headers() + .await? + .iter() + .map(BlockHeader::commitment) + .collect(); + + // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX + // entries. + let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + + Ok(chain_mmr) +} + #[instrument(level = "info", target = COMPONENT, skip_all)] async fn load_nullifier_tree( db: &mut Db, @@ -1149,24 +1192,12 @@ async fn load_nullifier_tree( .map_err(StateInitializationError::FailedToCreateNullifierTree) } -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); - - Ok(block_commitments.into()) -} - #[instrument(level = "info", target = COMPONENT, skip_all)] async fn load_account_tree( db: &mut Db, block_number: BlockNumber, ) -> Result, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?.into_iter().collect::>(); + let account_data = Vec::from_iter(db.select_all_account_commitments().await?); let smt_entries = account_data .into_iter() From 4484ec6abf87bd5f495d7b6e732066b17969236f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Sat, 10 Jan 2026 15:04:12 +0100 Subject: [PATCH 074/125] db: drop the cyclic "optimize" task (#1497) --- CHANGELOG.md | 1 + Cargo.lock | 10 +++---- crates/store/src/db/mod.rs | 12 -------- crates/store/src/lib.rs | 5 ---- crates/store/src/server/db_maintenance.rs | 35 ----------------------- crates/store/src/server/mod.rs | 12 +------- crates/store/src/state.rs | 5 ---- docs/internal/src/store.md | 2 -- 8 files changed, 7 insertions(+), 75 deletions(-) delete mode 100644 crates/store/src/server/db_maintenance.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index fe5bc04926..62b85c4412 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ - Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). - [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). +- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). ### Fixes diff --git a/Cargo.lock b/Cargo.lock index e911e94d03..a3a7a95d24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1114,9 +1114,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.3.4" +version = "2.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c415189028b232660655e4893e8bc25ca7aee8e96888db66d9edb400535456a" +checksum = "e130c806dccc85428c564f2dc5a96e05b6615a27c9a28776bd7761a9af4bb552" dependencies = [ "bigdecimal", "diesel_derives", @@ -3385,7 +3385,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -4274,7 +4274,7 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "heck 0.5.0", + "heck 0.4.1", "itertools 0.10.5", "log", "multimap", @@ -5382,7 +5382,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 3db913d648..8a5a835a42 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -626,18 +626,6 @@ impl Db { .await } - /// Runs database optimization. - #[instrument(level = "debug", target = COMPONENT, skip_all, err)] - pub async fn optimize(&self) -> Result<(), DatabaseError> { - self.transact("db optimization", |conn| { - diesel::sql_query("PRAGMA optimize") - .execute(conn) - .map_err(DatabaseError::Diesel) - }) - .await?; - Ok(()) - } - /// Loads the network notes for an account that are unconsumed by a specified block number. /// Pagination is used to limit the number of notes returned. pub(crate) async fn select_unconsumed_network_notes( diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index ce49564703..636225da15 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - mod accounts; mod blocks; mod db; @@ -15,6 +13,3 @@ pub use server::{DataDirectory, Store}; // CONSTANTS // ================================================================================================= const COMPONENT: &str = "miden-store"; - -/// How often to run the database maintenance routine. -const DATABASE_MAINTENANCE_INTERVAL: Duration = Duration::from_secs(24 * 60 * 60); diff --git a/crates/store/src/server/db_maintenance.rs b/crates/store/src/server/db_maintenance.rs deleted file mode 100644 index fce2676772..0000000000 --- a/crates/store/src/server/db_maintenance.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use tracing::{Instrument, Span}; - -use crate::state::State; - -pub struct DbMaintenance { - state: Arc, - optimization_interval: Duration, -} - -impl DbMaintenance { - pub fn new(state: Arc, optimization_interval: Duration) -> Self { - Self { state, optimization_interval } - } - - /// Runs infinite maintenance loop. - pub async fn run(self) { - loop { - tokio::time::sleep(self.optimization_interval).await; - - let root_span = tracing::info_span!( - "optimize_database", - interval = self.optimization_interval.as_secs_f32() - ); - self.state - .optimize_db() - .instrument(root_span) - .await - .unwrap_or_else(|err| Span::current().set_error(&err)); - } - } -} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 4271377577..de51256ad8 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -21,13 +21,11 @@ use tracing::{info, instrument}; use crate::blocks::BlockStore; use crate::db::Db; -use crate::server::db_maintenance::DbMaintenance; use crate::state::State; -use crate::{COMPONENT, DATABASE_MAINTENANCE_INTERVAL, GenesisState}; +use crate::{COMPONENT, GenesisState}; mod api; mod block_producer; -mod db_maintenance; mod ntx_builder; mod rpc_api; @@ -95,9 +93,6 @@ impl Store { let state = Arc::new(State::load(&self.data_directory).await.context("failed to load state")?); - let db_maintenance_service = - DbMaintenance::new(Arc::clone(&state), DATABASE_MAINTENANCE_INTERVAL); - let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { @@ -129,11 +124,6 @@ impl Store { let mut join_set = JoinSet::new(); - join_set.spawn(async move { - db_maintenance_service.run().await; - Ok(()) - }); - // Build the gRPC server with the API services and trace layer. join_set.spawn( tonic::transport::Server::builder() diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 830939305a..7c72ae8182 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -1113,11 +1113,6 @@ impl State { Ok(()) } - /// Runs database optimization. - pub async fn optimize_db(&self) -> Result<(), DatabaseError> { - self.db.optimize().await - } - /// Returns account vault updates for specified account within a block range. pub async fn sync_account_vault( &self, diff --git a/docs/internal/src/store.md b/docs/internal/src/store.md index 5f6f5b0361..1929b7c491 100644 --- a/docs/internal/src/store.md +++ b/docs/internal/src/store.md @@ -18,5 +18,3 @@ startup its likely that you created the database _before_ making schema changes The store consists mainly of a gRPC server which answers requests from the RPC and block-producer components, as well as new block submissions from the block-producer. - -A lightweight background process performs database query optimisation by analysing database queries and statistics. From 46dadde8b8d77a0e5d8b051fae58fd901a35082a Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Sun, 11 Jan 2026 14:09:22 -0800 Subject: [PATCH 075/125] chore: minor block producer code reorg --- crates/block-producer/src/server/mod.rs | 296 +++++++++++++----------- 1 file changed, 157 insertions(+), 139 deletions(-) diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 901bf32e89..e4f8edfccb 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -79,6 +79,9 @@ pub struct BlockProducer { pub mempool_tx_capacity: NonZeroUsize, } +// BLOCK PRODUCER +// ================================================================================================ + impl BlockProducer { /// Serves the block-producer RPC API, the batch-builder and the block-builder. /// @@ -218,28 +221,8 @@ impl BlockProducer { } } -/// Mempool statistics that are updated periodically to avoid locking the mempool. -#[derive(Clone, Copy, Default)] -struct MempoolStats { - /// The mempool's current view of the chain tip height. - chain_tip: BlockNumber, - /// Number of transactions currently in the mempool waiting to be batched. - unbatched_transactions: u64, - /// Number of batches currently being proven. - proposed_batches: u64, - /// Number of proven batches waiting for block inclusion. - proven_batches: u64, -} - -impl From for proto::rpc::MempoolStats { - fn from(stats: MempoolStats) -> Self { - proto::rpc::MempoolStats { - unbatched_transactions: stats.unbatched_transactions, - proposed_batches: stats.proposed_batches, - proven_batches: stats.proven_batches, - } - } -} +// BLOCK PRODUCER RPC SERVER +// ================================================================================================ /// Serves the block producer's RPC [api](api_server::Api). struct BlockProducerRpcServer { @@ -258,93 +241,6 @@ struct BlockProducerRpcServer { cached_mempool_stats: Arc>, } -#[tonic::async_trait] -impl api_server::Api for BlockProducerRpcServer { - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> Result, Status> { - self.submit_proven_transaction(request.into_inner()) - .await - .map(tonic::Response::new) - // This Status::from mapping takes care of hiding internal errors. - .map_err(Into::into) - } - - async fn submit_proven_batch( - &self, - request: tonic::Request, - ) -> Result, Status> { - self.submit_proven_batch(request.into_inner()) - .await - .map(tonic::Response::new) - // This Status::from mapping takes care of hiding internal errors. - .map_err(Into::into) - } - - #[instrument( - target = COMPONENT, - name = "block_producer.server.status", - skip_all, - err - )] - async fn status( - &self, - _request: tonic::Request<()>, - ) -> Result, Status> { - let mempool_stats = *self.cached_mempool_stats.read().await; - - Ok(tonic::Response::new(proto::rpc::BlockProducerStatus { - version: env!("CARGO_PKG_VERSION").to_string(), - status: "connected".to_string(), - chain_tip: mempool_stats.chain_tip.as_u32(), - mempool_stats: Some(mempool_stats.into()), - })) - } - - type MempoolSubscriptionStream = MempoolEventSubscription; - - async fn mempool_subscription( - &self, - request: tonic::Request, - ) -> Result, tonic::Status> { - let chain_tip = BlockNumber::from(request.into_inner().chain_tip); - - let subscription = - self.mempool - .lock() - .await - .lock() - .await - .subscribe(chain_tip) - .map_err(|mempool_tip| { - tonic::Status::invalid_argument(format!( - "Mempool's chain tip {mempool_tip} does not match request's {chain_tip}" - )) - })?; - let subscription = ReceiverStream::new(subscription); - - Ok(tonic::Response::new(MempoolEventSubscription { inner: subscription })) - } -} - -struct MempoolEventSubscription { - inner: ReceiverStream, -} - -impl tokio_stream::Stream for MempoolEventSubscription { - type Item = Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.inner - .poll_next_unpin(cx) - .map(|x| x.map(proto::block_producer::MempoolEvent::from).map(Result::Ok)) - } -} - impl BlockProducerRpcServer { pub fn new(mempool: SharedMempool, store: StoreClient) -> Self { Self { @@ -354,6 +250,40 @@ impl BlockProducerRpcServer { } } + // SERVER STARTUP + // -------------------------------------------------------------------------------------------- + + async fn serve(self, listener: TcpListener, timeout: Duration) -> anyhow::Result<()> { + // Start background task to periodically update cached mempool stats + self.spawn_mempool_stats_updater().await; + + let reflection_service = tonic_reflection::server::Builder::configure() + .register_file_descriptor_set(block_producer_api_descriptor()) + .build_v1() + .context("failed to build reflection service")?; + + // This is currently required for postman to work properly because + // it doesn't support the new version yet. + // + // See: . + let reflection_service_alpha = tonic_reflection::server::Builder::configure() + .register_file_descriptor_set(block_producer_api_descriptor()) + .build_v1alpha() + .context("failed to build reflection service")?; + + // Build the gRPC server with the API service and trace layer. + tonic::transport::Server::builder() + .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) + .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) + .timeout(timeout) + .add_service(api_server::ApiServer::new(self)) + .add_service(reflection_service) + .add_service(reflection_service_alpha) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .context("failed to serve block producer API") + } + /// Starts a background task that periodically updates the cached mempool statistics. /// /// This prevents the need to lock the mempool for each status request. @@ -388,36 +318,8 @@ impl BlockProducerRpcServer { }); } - async fn serve(self, listener: TcpListener, timeout: Duration) -> anyhow::Result<()> { - // Start background task to periodically update cached mempool stats - self.spawn_mempool_stats_updater().await; - - let reflection_service = tonic_reflection::server::Builder::configure() - .register_file_descriptor_set(block_producer_api_descriptor()) - .build_v1() - .context("failed to build reflection service")?; - - // This is currently required for postman to work properly because - // it doesn't support the new version yet. - // - // See: . - let reflection_service_alpha = tonic_reflection::server::Builder::configure() - .register_file_descriptor_set(block_producer_api_descriptor()) - .build_v1alpha() - .context("failed to build reflection service")?; - - // Build the gRPC server with the API service and trace layer. - tonic::transport::Server::builder() - .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) - .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .timeout(timeout) - .add_service(api_server::ApiServer::new(self)) - .add_service(reflection_service) - .add_service(reflection_service_alpha) - .serve_with_incoming(TcpListenerStream::new(listener)) - .await - .context("failed to serve block producer API") - } + // RPC ENDPOINTS + // -------------------------------------------------------------------------------------------- #[instrument( target = COMPONENT, @@ -479,3 +381,119 @@ impl BlockProducerRpcServer { todo!(); } } + +#[tonic::async_trait] +impl api_server::Api for BlockProducerRpcServer { + type MempoolSubscriptionStream = MempoolEventSubscription; + + async fn submit_proven_transaction( + &self, + request: tonic::Request, + ) -> Result, Status> { + self.submit_proven_transaction(request.into_inner()) + .await + .map(tonic::Response::new) + // This Status::from mapping takes care of hiding internal errors. + .map_err(Into::into) + } + + async fn submit_proven_batch( + &self, + request: tonic::Request, + ) -> Result, Status> { + self.submit_proven_batch(request.into_inner()) + .await + .map(tonic::Response::new) + // This Status::from mapping takes care of hiding internal errors. + .map_err(Into::into) + } + + #[instrument( + target = COMPONENT, + name = "block_producer.server.status", + skip_all, + err + )] + async fn status( + &self, + _request: tonic::Request<()>, + ) -> Result, Status> { + let mempool_stats = *self.cached_mempool_stats.read().await; + + Ok(tonic::Response::new(proto::rpc::BlockProducerStatus { + version: env!("CARGO_PKG_VERSION").to_string(), + status: "connected".to_string(), + chain_tip: mempool_stats.chain_tip.as_u32(), + mempool_stats: Some(mempool_stats.into()), + })) + } + + async fn mempool_subscription( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let chain_tip = BlockNumber::from(request.into_inner().chain_tip); + + let subscription = + self.mempool + .lock() + .await + .lock() + .await + .subscribe(chain_tip) + .map_err(|mempool_tip| { + tonic::Status::invalid_argument(format!( + "Mempool's chain tip {mempool_tip} does not match request's {chain_tip}" + )) + })?; + let subscription = ReceiverStream::new(subscription); + + Ok(tonic::Response::new(MempoolEventSubscription { inner: subscription })) + } +} + +// MEMPOOL SUBSCRIPTION +// ================================================================================================ + +struct MempoolEventSubscription { + inner: ReceiverStream, +} + +impl tokio_stream::Stream for MempoolEventSubscription { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner + .poll_next_unpin(cx) + .map(|x| x.map(proto::block_producer::MempoolEvent::from).map(Result::Ok)) + } +} + +// MEMPOOL STATISTICS +// ================================================================================================ + +/// Mempool statistics that are updated periodically to avoid locking the mempool. +#[derive(Clone, Copy, Default)] +struct MempoolStats { + /// The mempool's current view of the chain tip height. + chain_tip: BlockNumber, + /// Number of transactions currently in the mempool waiting to be batched. + unbatched_transactions: u64, + /// Number of batches currently being proven. + proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + proven_batches: u64, +} + +impl From for proto::rpc::MempoolStats { + fn from(stats: MempoolStats) -> Self { + proto::rpc::MempoolStats { + unbatched_transactions: stats.unbatched_transactions, + proposed_batches: stats.proposed_batches, + proven_batches: stats.proven_batches, + } + } +} From a1f4f6e326ca07ad23b4029fb7061c7b10da61ed Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Mon, 12 Jan 2026 08:46:21 +0200 Subject: [PATCH 076/125] feat(ntx-builder): script cache capacity is now configurable (#1454) --- bin/node/.env | 1 + bin/node/src/commands/bundled.rs | 1 + bin/node/src/commands/mod.rs | 10 ++++++++++ crates/ntx-builder/src/builder.rs | 9 ++------- 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/bin/node/.env b/bin/node/.env index 01e699aff2..fc4c2793e3 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -14,3 +14,4 @@ MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true +MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE= diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index a51c191eba..594959aa40 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -322,6 +322,7 @@ impl BundledCommand { ntx_builder.tx_prover_url, ntx_builder.ticker_interval, checkpoint, + ntx_builder.script_cache_size, ) .run() .await diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index ecfee995fa..7e8fa7e69f 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -35,10 +35,12 @@ const ENV_GENESIS_CONFIG_FILE: &str = "MIDEN_GENESIS_CONFIG_FILE"; const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; +const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +const DEFAULT_NTX_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); // Formats a Duration into a human-readable string for display in clap help text. fn duration_to_human_readable_string(duration: Duration) -> String { @@ -65,6 +67,14 @@ pub struct NtxBuilderConfig { value_name = "DURATION" )] pub ticker_interval: Duration, + + #[arg( + long = "ntx-builder.script-cache-size", + env = ENV_NTX_SCRIPT_CACHE_SIZE, + value_name = "NUM", + default_value_t = DEFAULT_NTX_SCRIPT_CACHE_SIZE + )] + pub script_cache_size: NonZeroUsize, } /// Configuration for the Block Producer component diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 34ebdc06fb..3d0a00aabd 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -91,12 +91,6 @@ pub struct NetworkTransactionBuilder { } impl NetworkTransactionBuilder { - /// Default cache size for note scripts. - /// - /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage - /// depends on the complexity of the scripts being cached. - const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); - /// Creates a new instance of the network transaction builder. pub fn new( store_url: Url, @@ -104,8 +98,9 @@ impl NetworkTransactionBuilder { tx_prover_url: Option, ticker_interval: Duration, bp_checkpoint: Arc, + script_cache_size: NonZeroUsize, ) -> Self { - let script_cache = LruCache::new(Self::DEFAULT_SCRIPT_CACHE_SIZE); + let script_cache = LruCache::new(script_cache_size); let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); Self { store_url, From 67f470ed0600bd72d27ad48822e1687c9b959bbd Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 12 Jan 2026 08:53:54 +0100 Subject: [PATCH 077/125] feat: [2/4] integrate smtforest, avoid ser/de of full account/vault data in database (#1394) --- CHANGELOG.md | 7 +- crates/proto/src/domain/account.rs | 92 ++-- crates/store/src/accounts/tests.rs | 2 +- crates/store/src/db/mod.rs | 6 +- .../store/src/db/models/queries/accounts.rs | 1 - crates/store/src/errors.rs | 5 + crates/store/src/inner_forest/mod.rs | 341 ++++++++++++++ crates/store/src/inner_forest/tests.rs | 421 ++++++++++++++++++ crates/store/src/lib.rs | 1 + crates/store/src/server/api.rs | 2 +- crates/store/src/server/ntx_builder.rs | 3 +- crates/store/src/state.rs | 76 +++- crates/utils/src/limiter.rs | 1 + 13 files changed, 890 insertions(+), 68 deletions(-) create mode 100644 crates/store/src/inner_forest/mod.rs create mode 100644 crates/store/src/inner_forest/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 62b85c4412..f93bb299d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,13 +30,14 @@ - Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). - [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). - Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). -- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). -- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). +- Refactor account table and introduce tracking forest ([#1394](https://github.com/0xMiden/miden-node/pull/1394)). - [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). -- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). +- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). - [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). +- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). - Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). +- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). - [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). - [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). - Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 4330a82deb..5bc6b4ecc4 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -187,6 +187,52 @@ impl TryFrom for Accoun } } +impl TryFrom + for AccountStorageMapDetails +{ + type Error = ConversionError; + + fn try_from( + value: proto::rpc::account_storage_details::AccountStorageMapDetails, + ) -> Result { + use proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry; + let proto::rpc::account_storage_details::AccountStorageMapDetails { + slot_name, + too_many_entries, + entries, + } = value; + + let slot_name = StorageSlotName::new(slot_name)?; + + let entries = if too_many_entries { + StorageMapEntries::LimitExceeded + } else { + let map_entries = if let Some(entries) = entries { + entries + .entries + .into_iter() + .map(|entry| { + let key = entry + .key + .ok_or(StorageMapEntry::missing_field(stringify!(key)))? + .try_into()?; + let value = entry + .value + .ok_or(StorageMapEntry::missing_field(stringify!(value)))? + .try_into()?; + Ok((key, value)) + }) + .collect::, ConversionError>>()? + } else { + Vec::new() + }; + StorageMapEntries::Entries(map_entries) + }; + + Ok(Self { slot_name, entries }) + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { pub slot_name: StorageSlotName, @@ -544,52 +590,6 @@ const fn storage_slot_type_to_raw(slot_type: StorageSlotType) -> u32 { } } -impl TryFrom - for AccountStorageMapDetails -{ - type Error = ConversionError; - - fn try_from( - value: proto::rpc::account_storage_details::AccountStorageMapDetails, - ) -> Result { - use proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry; - let proto::rpc::account_storage_details::AccountStorageMapDetails { - slot_name, - too_many_entries, - entries, - } = value; - - let slot_name = StorageSlotName::new(slot_name)?; - - let entries = if too_many_entries { - StorageMapEntries::LimitExceeded - } else { - let map_entries = if let Some(entries) = entries { - entries - .entries - .into_iter() - .map(|entry| { - let key = entry - .key - .ok_or(StorageMapEntry::missing_field(stringify!(key)))? - .try_into()?; - let value = entry - .value - .ok_or(StorageMapEntry::missing_field(stringify!(value)))? - .try_into()?; - Ok((key, value)) - }) - .collect::, ConversionError>>()? - } else { - Vec::new() - }; - StorageMapEntries::Entries(map_entries) - }; - - Ok(Self { slot_name, entries }) - } -} - // ACCOUNT PROOF RESPONSE //================================================================================================ diff --git a/crates/store/src/accounts/tests.rs b/crates/store/src/accounts/tests.rs index 5880d39825..f709289469 100644 --- a/crates/store/src/accounts/tests.rs +++ b/crates/store/src/accounts/tests.rs @@ -18,7 +18,7 @@ mod account_tree_with_history_tests { /// Helper function to create an `AccountTree` from entries using the new API fn create_account_tree( entries: impl IntoIterator, - ) -> AccountTree> { + ) -> InMemoryAccountTree { let smt_entries = entries .into_iter() .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 8a5a835a42..3d54a501f2 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -113,8 +113,7 @@ impl TransactionRecord { self, note_records: Vec, ) -> proto::rpc::TransactionRecord { - let output_notes: Vec = - note_records.into_iter().map(Into::into).collect(); + let output_notes = Vec::from_iter(note_records.into_iter().map(Into::into)); proto::rpc::TransactionRecord { header: Some(proto::transaction::TransactionHeader { @@ -324,7 +323,7 @@ impl Db { /// Loads all the nullifiers from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_nullifiers(&self) -> Result> { + pub(crate) async fn select_all_nullifiers(&self) -> Result> { self.transact("all nullifiers", move |conn| { let nullifiers = queries::select_all_nullifiers(conn)?; Ok(nullifiers) @@ -403,7 +402,6 @@ impl Db { } /// Returns all account IDs that have public state. - #[allow(dead_code)] // Will be used by InnerForest in next PR #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_all_public_account_ids(&self) -> Result> { self.transact("read all public account IDs", move |conn| { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index c1ad88d2bc..3c615c51b4 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -299,7 +299,6 @@ pub(crate) fn select_all_account_commitments( /// ORDER BY /// block_num ASC /// ``` -#[allow(dead_code)] // Will be used by InnerForest in next PR pub(crate) fn select_all_public_account_ids( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 7ac836ed3f..7e0c326a23 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -31,6 +31,7 @@ use tonic::Status; use crate::db::manager::ConnectionManagerError; use crate::db::models::conv::DatabaseTypeConversionError; +use crate::inner_forest::InnerForestError; // DATABASE ERRORS // ================================================================================================= @@ -197,6 +198,8 @@ pub enum StateInitializationError { BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] DatabaseLoadError(#[from] DatabaseSetupError), + #[error("inner forest error")] + InnerForestError(#[from] InnerForestError), } #[derive(Debug, Error)] @@ -274,6 +277,8 @@ pub enum ApplyBlockError { TokioJoinError(#[from] tokio::task::JoinError), #[error("invalid block error")] InvalidBlockError(#[from] InvalidBlockError), + #[error("inner forest error")] + InnerForestError(#[from] InnerForestError), // OTHER ERRORS // --------------------------------------------------------------------------------------------- diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs new file mode 100644 index 0000000000..d368896f26 --- /dev/null +++ b/crates/store/src/inner_forest/mod.rs @@ -0,0 +1,341 @@ +use std::collections::BTreeMap; + +use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; +use miden_protocol::account::{AccountId, NonFungibleDeltaAction, StorageSlotName}; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::EmptySubtreeRoots; +use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; +use miden_protocol::{EMPTY_WORD, Word}; +use thiserror::Error; + +#[cfg(test)] +mod tests; + +// ERRORS +// ================================================================================================ + +#[derive(Debug, Error)] +pub enum InnerForestError { + #[error( + "balance underflow: account {account_id}, faucet {faucet_id}, \ + previous balance {prev_balance}, delta {delta}" + )] + BalanceUnderflow { + account_id: AccountId, + faucet_id: AccountId, + prev_balance: u64, + delta: i64, + }, +} + +// INNER FOREST +// ================================================================================================ + +/// Container for forest-related state that needs to be updated atomically. +pub(crate) struct InnerForest { + /// `SmtForest` for efficient account storage reconstruction. + /// Populated during block import with storage and vault SMTs. + forest: SmtForest, + + /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. + /// Populated during block import for all storage map slots. + storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, + + /// Maps (`account_id`, `block_num`) to vault SMT root. + /// Tracks asset vault versions across all blocks with structural sharing. + vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, +} + +impl InnerForest { + pub(crate) fn new() -> Self { + Self { + forest: SmtForest::new(), + storage_map_roots: BTreeMap::new(), + vault_roots: BTreeMap::new(), + } + } + + // HELPERS + // -------------------------------------------------------------------------------------------- + + /// Returns the root of an empty SMT. + const fn empty_smt_root() -> Word { + *EmptySubtreeRoots::entry(SMT_DEPTH, 0) + } + + /// Retrieves the most recent vault SMT root for an account. + /// + /// Returns the latest vault root entry regardless of block number. + /// Used when applying incremental deltas where we always want the previous state. + /// + /// If no vault root is found for the account, returns an empty SMT root. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, returns an empty SMT root (for new accounts or DB + /// reconstruction where delta values are absolute). If `false`, looks up the previous state + /// (for incremental updates where delta values are relative changes). + fn get_latest_vault_root(&self, account_id: AccountId, is_full_state: bool) -> Word { + if is_full_state { + return Self::empty_smt_root(); + } + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..) + .take_while(|((id, _), _)| *id == account_id) + .last() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Retrieves the most recent storage map SMT root for an account slot. + /// + /// Returns the latest storage root entry regardless of block number. + /// Used when applying incremental deltas where we always want the previous state. + /// + /// If no storage root is found for the slot, returns an empty SMT root. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, returns an empty SMT root (for new accounts or DB + /// reconstruction where delta values are absolute). If `false`, looks up the previous state + /// (for incremental updates where delta values are relative changes). + fn get_latest_storage_map_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + is_full_state: bool, + ) -> Word { + if is_full_state { + return Self::empty_smt_root(); + } + + self.storage_map_roots + .range((account_id, slot_name.clone(), BlockNumber::GENESIS)..) + .take_while(|((id, name, _), _)| *id == account_id && name == slot_name) + .last() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Retrieves the vault SMT root for an account at or before the given block. + /// + /// Finds the most recent vault root entry for the account, since vault state persists + /// across blocks where no changes occur. + // + // TODO: a fallback to DB lookup is required once pruning lands. + // Currently returns empty root which would be incorrect + #[cfg(test)] + fn get_vault_root(&self, account_id: AccountId, block_num: BlockNumber) -> Word { + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + // PUBLIC INTERFACE + // -------------------------------------------------------------------------------------------- + + /// Applies account updates from a block to the forest. + /// + /// Iterates through account updates and applies each delta to the forest. + /// Private accounts should be filtered out before calling this method. + /// + /// # Arguments + /// + /// * `block_num` - Block number for which these updates apply + /// * `account_updates` - Iterator of `AccountDelta` for public accounts + /// + /// # Errors + /// + /// Returns an error if applying a vault delta results in a negative balance. + pub(crate) fn apply_block_updates( + &mut self, + block_num: BlockNumber, + account_updates: impl IntoIterator, + ) -> Result<(), InnerForestError> { + for delta in account_updates { + self.update_account(block_num, &delta)?; + + tracing::debug!( + target: crate::COMPONENT, + account_id = %delta.id(), + %block_num, + is_full_state = delta.is_full_state(), + "Updated forest with account delta" + ); + } + Ok(()) + } + + /// Updates the forest with account vault and storage changes from a delta. + /// + /// Unified interface for updating all account state in the forest, handling both full-state + /// deltas (new accounts or reconstruction from DB) and partial deltas (incremental updates + /// during block application). + /// + /// Full-state deltas (`delta.is_full_state() == true`) populate the forest from scratch using + /// an empty SMT root. Partial deltas apply changes on top of the previous block's state. + /// + /// # Errors + /// + /// Returns an error if applying a vault delta results in a negative balance. + pub(crate) fn update_account( + &mut self, + block_num: BlockNumber, + delta: &AccountDelta, + ) -> Result<(), InnerForestError> { + let account_id = delta.id(); + let is_full_state = delta.is_full_state(); + + if !delta.vault().is_empty() { + self.update_account_vault(block_num, account_id, delta.vault(), is_full_state)?; + } + + if !delta.storage().is_empty() { + self.update_account_storage(block_num, account_id, delta.storage(), is_full_state); + } + Ok(()) + } + + // PRIVATE METHODS + // -------------------------------------------------------------------------------------------- + + /// Updates the forest with vault changes from a delta. + /// + /// Processes both fungible and non-fungible asset changes, building entries for the vault SMT + /// and tracking the new root. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + /// + /// # Errors + /// + /// Returns an error if applying a delta results in a negative balance. + fn update_account_vault( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + vault_delta: &AccountVaultDelta, + is_full_state: bool, + ) -> Result<(), InnerForestError> { + let prev_root = self.get_latest_vault_root(account_id, is_full_state); + + let mut entries = Vec::new(); + + // Process fungible assets + for (faucet_id, amount_delta) in vault_delta.fungible().iter() { + let key: Word = + FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); + + let new_amount = if is_full_state { + // For full-state deltas, amount is the absolute value + (*amount_delta).try_into().expect("full-state amount should be non-negative") + } else { + // For partial deltas, amount is a change that must be applied to previous balance. + // + // TODO: SmtForest only exposes `fn open()` which computes a full Merkle + // proof. We only need the leaf, so a direct `fn get()` method would be faster. + let prev_amount = self + .forest + .open(prev_root, key) + .ok() + .and_then(|proof| proof.get(&key)) + .and_then(|word| FungibleAsset::try_from(word).ok()) + .map_or(0, |asset| asset.amount()); + + let new_balance = i128::from(prev_amount) + i128::from(*amount_delta); + u64::try_from(new_balance).map_err(|_| InnerForestError::BalanceUnderflow { + account_id, + faucet_id: *faucet_id, + prev_balance: prev_amount, + delta: *amount_delta, + })? + }; + + let value = if new_amount == 0 { + EMPTY_WORD + } else { + let asset: Asset = FungibleAsset::new(*faucet_id, new_amount) + .expect("valid fungible asset") + .into(); + Word::from(asset) + }; + entries.push((key, value)); + } + + // Process non-fungible assets + for (asset, action) in vault_delta.non_fungible().iter() { + let value = match action { + NonFungibleDeltaAction::Add => Word::from(Asset::NonFungible(*asset)), + NonFungibleDeltaAction::Remove => EMPTY_WORD, + }; + entries.push((asset.vault_key().into(), value)); + } + + if entries.is_empty() { + return Ok(()); + } + + let updated_root = self + .forest + .batch_insert(prev_root, entries.iter().copied()) + .expect("forest insertion should succeed"); + + self.vault_roots.insert((account_id, block_num), updated_root); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + vault_entries = entries.len(), + "Updated vault in forest" + ); + Ok(()) + } + + /// Updates the forest with storage map changes from a delta. + /// + /// Processes storage map slot deltas, building SMTs for each modified slot + /// and tracking the new roots. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + fn update_account_storage( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + storage_delta: &AccountStorageDelta, + is_full_state: bool, + ) { + for (slot_name, map_delta) in storage_delta.maps() { + let prev_root = self.get_latest_storage_map_root(account_id, slot_name, is_full_state); + + let entries: Vec<_> = + map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); + + if entries.is_empty() { + continue; + } + + let updated_root = self + .forest + .batch_insert(prev_root, entries.iter().copied()) + .expect("forest insertion should succeed"); + + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), updated_root); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + ?slot_name, + entries = entries.len(), + "Updated storage map in forest" + ); + } + } +} diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs new file mode 100644 index 0000000000..fb6ceb917a --- /dev/null +++ b/crates/store/src/inner_forest/tests.rs @@ -0,0 +1,421 @@ +use miden_protocol::account::AccountCode; +use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, +}; +use miden_protocol::{Felt, FieldElement}; + +use super::*; + +fn dummy_account() -> AccountId { + AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap() +} + +fn dummy_faucet() -> AccountId { + AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap() +} + +fn dummy_fungible_asset(faucet_id: AccountId, amount: u64) -> Asset { + FungibleAsset::new(faucet_id, amount).unwrap().into() +} + +/// Creates a partial `AccountDelta` (without code) for testing incremental updates. +fn dummy_partial_delta( + account_id: AccountId, + vault_delta: AccountVaultDelta, + storage_delta: AccountStorageDelta, +) -> AccountDelta { + // For partial deltas, nonce_delta must be > 0 if there are changes + let nonce_delta = if vault_delta.is_empty() && storage_delta.is_empty() { + Felt::ZERO + } else { + Felt::ONE + }; + AccountDelta::new(account_id, storage_delta, vault_delta, nonce_delta).unwrap() +} + +/// Creates a full-state `AccountDelta` (with code) for testing DB reconstruction. +fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDelta { + use miden_protocol::account::{Account, AccountStorage}; + + // Create a minimal account with the given assets + let vault = AssetVault::new(assets).unwrap(); + let storage = AccountStorage::new(vec![]).unwrap(); + let code = AccountCode::mock(); + let nonce = Felt::ONE; + + let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); + + // Convert to delta - this will be a full-state delta because it has code + AccountDelta::try_from(account).unwrap() +} + +#[test] +fn test_empty_smt_root_is_recognized() { + use miden_protocol::crypto::merkle::smt::Smt; + + let empty_root = InnerForest::empty_smt_root(); + + // Verify an empty SMT has the expected root + assert_eq!(Smt::default().root(), empty_root); + + // Test that SmtForest accepts this root in batch_insert + let mut forest = SmtForest::new(); + let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; + + assert!(forest.batch_insert(empty_root, entries).is_ok()); +} + +#[test] +fn test_inner_forest_basic_initialization() { + let forest = InnerForest::new(); + assert!(forest.storage_map_roots.is_empty()); + assert!(forest.vault_roots.is_empty()); +} + +#[test] +fn test_update_account_with_empty_deltas() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let block_num = BlockNumber::GENESIS.child(); + + let delta = dummy_partial_delta( + account_id, + AccountVaultDelta::default(), + AccountStorageDelta::default(), + ); + + forest.update_account(block_num, &delta).unwrap(); + + // Empty deltas should not create entries + assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); + assert!(forest.storage_map_roots.is_empty()); +} + +#[test] +fn test_update_vault_with_fungible_asset() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + + let asset = dummy_fungible_asset(faucet_id, 100); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset).unwrap(); + + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + + let vault_root = forest.vault_roots[&(account_id, block_num)]; + assert_ne!(vault_root, EMPTY_WORD); +} + +#[test] +fn test_compare_partial_vs_full_state_delta_vault() { + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + let asset = dummy_fungible_asset(faucet_id, 100); + + // Approach 1: Partial delta (simulates block application) + let mut forest_partial = InnerForest::new(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset).unwrap(); + let partial_delta = + dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest_partial.update_account(block_num, &partial_delta).unwrap(); + + // Approach 2: Full-state delta (simulates DB reconstruction) + let mut forest_full = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[asset]); + forest_full.update_account(block_num, &full_delta).unwrap(); + + // Both approaches must produce identical vault roots + let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); + let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); + + assert_eq!(root_partial, root_full); + assert_ne!(*root_partial, EMPTY_WORD); +} + +#[test] +fn test_incremental_vault_updates() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: 100 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_1 = forest.vault_roots[&(account_id, block_1)]; + + // Block 2: 150 tokens (update) + let block_2 = block_1.child(); + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); + let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2).unwrap(); + let root_2 = forest.vault_roots[&(account_id, block_2)]; + + assert_ne!(root_1, root_2); +} + +#[test] +fn test_full_state_delta_starts_from_empty_root() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + + // Simulate a pre-existing vault state that should be ignored for full-state deltas + let mut vault_delta_pre = AccountVaultDelta::default(); + vault_delta_pre.add_asset(dummy_fungible_asset(faucet_id, 999)).unwrap(); + let delta_pre = + dummy_partial_delta(account_id, vault_delta_pre, AccountStorageDelta::default()); + forest.update_account(block_num, &delta_pre).unwrap(); + assert!(forest.vault_roots.contains_key(&(account_id, block_num))); + + // Now create a full-state delta at the same block + // A full-state delta should start from an empty root, not from the previous state + let asset = dummy_fungible_asset(faucet_id, 100); + let full_delta = dummy_full_state_delta(account_id, &[asset]); + + // Create a fresh forest to compare + let mut fresh_forest = InnerForest::new(); + fresh_forest.update_account(block_num, &full_delta).unwrap(); + let fresh_root = fresh_forest.vault_roots[&(account_id, block_num)]; + + // Update the original forest with the full-state delta + forest.update_account(block_num, &full_delta).unwrap(); + let updated_root = forest.vault_roots[&(account_id, block_num)]; + + // The full-state delta should produce the same root regardless of prior state + assert_eq!(updated_root, fresh_root); +} + +#[test] +fn test_vault_state_persists_across_blocks_without_changes() { + // Regression test for issue #7: vault state should persist across blocks + // where no changes occur, not reset to empty. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: Add 100 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; + + // Blocks 2-5: No changes to this account (simulated by not calling update_account) + // This means no entries are added to vault_roots for these blocks. + + // Block 6: Add 50 more tokens + // The previous root lookup should find block_1's root, not return empty. + let block_6 = BlockNumber::from(6); + let mut vault_delta_6 = AccountVaultDelta::default(); + vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); // 100 + 50 = 150 + let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); + forest.update_account(block_6, &delta_6).unwrap(); + + // The root at block 6 should be different from block 1 (we added more tokens) + let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; + assert_ne!(root_after_block_1, root_after_block_6); + + // Verify get_vault_root finds the correct previous root for intermediate blocks + // Block 3 should return block 1's root (most recent before block 3) + let root_at_block_3 = forest.get_vault_root(account_id, BlockNumber::from(3)); + assert_eq!(root_at_block_3, root_after_block_1); + + // Block 5 should also return block 1's root + let root_at_block_5 = forest.get_vault_root(account_id, BlockNumber::from(5)); + assert_eq!(root_at_block_5, root_after_block_1); + + // Block 6 should return block 6's root + let root_at_block_6 = forest.get_vault_root(account_id, block_6); + assert_eq!(root_at_block_6, root_after_block_6); +} + +#[test] +fn test_partial_delta_applies_fungible_changes_correctly() { + // Regression test for issue #8: partial deltas should apply changes to previous balance, + // not treat amounts as absolute values. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: Add 100 tokens (partial delta with +100) + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_100 = forest.vault_roots[&(account_id, block_1)]; + + // Block 2: Add 50 more tokens (partial delta with +50) + // Result should be 150 tokens, not 50 tokens + let block_2 = block_1.child(); + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); + let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2).unwrap(); + let root_after_150 = forest.vault_roots[&(account_id, block_2)]; + + // Roots should be different (100 tokens vs 150 tokens) + assert_ne!(root_after_100, root_after_150); + + // Block 3: Remove 30 tokens (partial delta with -30) + // Result should be 120 tokens + let block_3 = block_2.child(); + let mut vault_delta_3 = AccountVaultDelta::default(); + vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); + let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); + forest.update_account(block_3, &delta_3).unwrap(); + let root_after_120 = forest.vault_roots[&(account_id, block_3)]; + + // Root should change again + assert_ne!(root_after_150, root_after_120); + + // Verify by creating a fresh forest with a full-state delta of 120 tokens + // The roots should match + let mut fresh_forest = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); + fresh_forest.update_account(block_3, &full_delta).unwrap(); + let root_full_state_120 = fresh_forest.vault_roots[&(account_id, block_3)]; + + assert_eq!(root_after_120, root_full_state_120); +} + +#[test] +fn test_partial_delta_across_long_block_range() { + // Validation test: partial deltas should work across 101+ blocks. + // + // This test passes now because InnerForest keeps all history. Once pruning is implemented + // (estimated ~50 blocks), this test will fail unless DB fallback is also implemented. + // When that happens, the test should be updated to use DB fallback or converted to an + // integration test that has DB access. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: Add 1000 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_1000 = forest.vault_roots[&(account_id, block_1)]; + + // Blocks 2-100: No changes to this account (simulating long gap) + + // Block 101: Add 500 more tokens (partial delta with +500) + // This requires looking up block 1's state across a 100-block gap. + let block_101 = BlockNumber::from(101); + let mut vault_delta_101 = AccountVaultDelta::default(); + vault_delta_101.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); + let delta_101 = + dummy_partial_delta(account_id, vault_delta_101, AccountStorageDelta::default()); + forest.update_account(block_101, &delta_101).unwrap(); + let root_after_1500 = forest.vault_roots[&(account_id, block_101)]; + + // Roots should be different (1000 tokens vs 1500 tokens) + assert_ne!(root_after_1000, root_after_1500); + + // Verify the final state matches a fresh forest with 1500 tokens + let mut fresh_forest = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 1500)]); + fresh_forest.update_account(block_101, &full_delta).unwrap(); + let root_full_state_1500 = fresh_forest.vault_roots[&(account_id, block_101)]; + + assert_eq!(root_after_1500, root_full_state_1500); +} + +#[test] +fn test_update_storage_map() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let block_num = BlockNumber::GENESIS.child(); + + let slot_name = StorageSlotName::mock(3); + let key = Word::from([1u32, 2, 3, 4]); + let value = Word::from([5u32, 6, 7, 8]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + // Verify storage root was created + assert!( + forest + .storage_map_roots + .contains_key(&(account_id, slot_name.clone(), block_num)) + ); + let storage_root = forest.storage_map_roots[&(account_id, slot_name, block_num)]; + assert_ne!(storage_root, InnerForest::empty_smt_root()); +} + +#[test] +fn test_storage_map_incremental_updates() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + + let slot_name = StorageSlotName::mock(3); + let key1 = Word::from([1u32, 0, 0, 0]); + let key2 = Word::from([2u32, 0, 0, 0]); + let value1 = Word::from([10u32, 0, 0, 0]); + let value2 = Word::from([20u32, 0, 0, 0]); + let value3 = Word::from([30u32, 0, 0, 0]); + + // Block 1: Insert key1 -> value1 + let block_1 = BlockNumber::GENESIS.child(); + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key1, value1); + let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; + + // Block 2: Insert key2 -> value2 (key1 should persist) + let block_2 = block_1.child(); + let mut map_delta_2 = StorageMapDelta::default(); + map_delta_2.insert(key2, value2); + let raw_2 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_2))]); + let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); + forest.update_account(block_2, &delta_2).unwrap(); + let root_2 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_2)]; + + // Block 3: Update key1 -> value3 + let block_3 = block_2.child(); + let mut map_delta_3 = StorageMapDelta::default(); + map_delta_3.insert(key1, value3); + let raw_3 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_3))]); + let storage_delta_3 = AccountStorageDelta::from_raw(raw_3); + let delta_3 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_3); + forest.update_account(block_3, &delta_3).unwrap(); + let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; + + // All roots should be different + assert_ne!(root_1, root_2); + assert_ne!(root_2, root_3); + assert_ne!(root_1, root_3); +} diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 636225da15..5a9dc5ee2f 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -3,6 +3,7 @@ mod blocks; mod db; mod errors; pub mod genesis; +mod inner_forest; mod server; pub mod state; diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 6974b8dcb5..324a9dde3e 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -163,5 +163,5 @@ pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result< #[instrument(level = "debug",target = COMPONENT, skip_all)] pub fn read_block_numbers(block_numbers: &[u32]) -> BTreeSet { - block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number)).collect() + BTreeSet::from_iter(block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number))) } diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index ba8e82b4f5..40f1ae5b39 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -181,8 +181,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { let (account_ids, mut last_block_included) = self.state.get_all_network_accounts(block_range).await.map_err(internal_error)?; - let account_ids: Vec = - account_ids.into_iter().map(Into::into).collect(); + let account_ids = Vec::from_iter(account_ids.into_iter().map(Into::into)); if last_block_included > chain_tip { last_block_included = chain_tip; diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 7c72ae8182..43e653dcf6 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -23,6 +23,7 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; +use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{AccountId, StorageSlotContent}; use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; @@ -42,6 +43,7 @@ use miden_protocol::{AccountError, Word}; use tokio::sync::{Mutex, RwLock, oneshot}; use tracing::{info, info_span, instrument}; +use crate::accounts::{AccountTreeWithHistory, HistoricalError}; use crate::blocks::BlockStore; use crate::db::models::Page; use crate::db::models::queries::StorageMapValuesPage; @@ -65,7 +67,8 @@ use crate::errors::{ StateInitializationError, StateSyncError, }; -use crate::{AccountTreeWithHistory, COMPONENT, DataDirectory}; +use crate::inner_forest::InnerForest; +use crate::{COMPONENT, DataDirectory}; // STRUCTURES // ================================================================================================ @@ -117,6 +120,9 @@ pub struct State { /// The lock is writer-preferring, meaning the writer won't be starved. inner: RwLock, + /// Forest-related state `(SmtForest, storage_map_roots, vault_roots)` with its own lock. + forest: RwLock, + /// To allow readers to access the tree data while an update in being performed, and prevent /// TOCTOU issues, there must be no concurrent writers. This locks to serialize the writers. writer: Mutex<()>, @@ -146,13 +152,15 @@ impl State { let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); let account_tree = load_account_tree(&mut db, latest_block_num).await?; let nullifier_tree = load_nullifier_tree(&mut db).await?; + let forest = load_smt_forest(&mut db, latest_block_num).await?; let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); + let forest = RwLock::new(forest); let writer = Mutex::new(()); let db = Arc::new(db); - Ok(Self { db, block_store, inner, writer }) + Ok(Self { db, block_store, inner, forest, writer }) } // STATE MUTATOR @@ -291,10 +299,10 @@ impl State { .map(|update| (update.account_id(), update.final_state_commitment())), ) .map_err(|e| match e { - crate::HistoricalError::AccountTreeError(err) => { + HistoricalError::AccountTreeError(err) => { InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) }, - crate::HistoricalError::MerkleError(_) => { + HistoricalError::MerkleError(_) => { panic!("Unexpected MerkleError during account tree mutation computation") }, })?; @@ -354,6 +362,16 @@ impl State { // Signals the write lock has been acquired, and the transaction can be committed let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); + // Extract public account updates with deltas before block is moved into async task. + // Private accounts are filtered out since they don't expose their state changes. + let account_deltas = + Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { + match update.details() { + AccountUpdateDetails::Delta(delta) => Some(delta.clone()), + AccountUpdateDetails::Private => None, + } + })); + // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the // in-memory write lock. This requires the DB update to run concurrently, so a new task is @@ -413,6 +431,8 @@ impl State { inner.blockchain.push(block_commitment); } + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); Ok(()) @@ -1007,11 +1027,11 @@ impl State { // Validate block exists in the blockchain before querying the database self.validate_block_exists(block_num).await?; - let account_header = self - .db - .select_account_header_at_block(account_id, block_num) - .await? - .ok_or_else(|| DatabaseError::AccountNotPublic(account_id))?; + let account_header = + self.db + .select_account_header_at_block(account_id, block_num) + .await? + .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; let account_code = match code_commitment { Some(commitment) if commitment == account_header.code_commitment() => None, @@ -1121,7 +1141,6 @@ impl State { ) -> Result<(BlockNumber, Vec), DatabaseError> { self.db.get_account_vault_sync(account_id, block_range).await } - /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -1215,3 +1234,40 @@ async fn load_account_tree( Ok(AccountTreeWithHistory::new(account_tree, block_number)) } + +/// Loads SMT forest with storage map and vault Merkle paths for all public accounts. +#[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] +async fn load_smt_forest( + db: &mut Db, + block_num: BlockNumber, +) -> Result { + use miden_protocol::account::delta::AccountDelta; + + let public_account_ids = db.select_all_public_account_ids().await?; + + // Acquire write lock once for the entire initialization + let mut forest = InnerForest::new(); + + // Process each account + for account_id in public_account_ids { + // Get the full account from the database + let account_info = db.select_account(account_id).await?; + let account = account_info.details.expect("public accounts always have details in DB"); + + // Convert the full account to a full-state delta + let delta = + AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + + // Use the unified update method (will recognize it's a full-state delta) + forest.update_account(block_num, &delta)?; + + tracing::debug!( + target: COMPONENT, + %account_id, + %block_num, + "Initialized forest for account from DB" + ); + } + + Ok(forest) +} diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 4e580d302b..03b8aeb54f 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -10,6 +10,7 @@ //! //! Add new limits here so callers share the same values and rationale. +/// Basic request limit. pub const GENERAL_REQUEST_LIMIT: usize = 1000; #[allow(missing_docs)] From f64266c4f212b612c407425f5864c805a64b3bdc Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 13 Jan 2026 08:37:42 -0300 Subject: [PATCH 078/125] fix(monito): send tx_inputs and use named storage slots (#1501) --- CHANGELOG.md | 1 + .../src/assets/counter_program.masm | 10 +++++----- bin/network-monitor/src/counter.rs | 17 ++++++++++++----- bin/network-monitor/src/deploy/counter.rs | 4 ++-- bin/network-monitor/src/deploy/mod.rs | 4 +++- 5 files changed, 23 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f93bb299d9..38dd9ed07a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). - Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). - Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). +- Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/miden-node/pull/1501)). ## v0.12.6 diff --git a/bin/network-monitor/src/assets/counter_program.masm b/bin/network-monitor/src/assets/counter_program.masm index 175e7e9697..9cd6536f45 100644 --- a/bin/network-monitor/src/assets/counter_program.masm +++ b/bin/network-monitor/src/assets/counter_program.masm @@ -1,7 +1,7 @@ # Counter program for network monitoring with note authentication # Storage layout: -# - Slot 0: counter value (u64) -# - Slot 1: authorized wallet account id as [prefix, suffix, 0, 0] +# - OWNER_SLOT: authorized wallet account id as [prefix, suffix, 0, 0] +# - COUNTER_SLOT: counter value (u64) use miden::core::sys use miden::protocol::active_account @@ -11,14 +11,14 @@ use miden::protocol::account_id use miden::protocol::tx -# The slot in this component's storage layout where the counter is stored. const COUNTER_SLOT = word("miden::monitor::counter_contract::counter") +const OWNER_SLOT = word("miden::monitor::counter_contract::owner") # Increment function with note authentication # => [] pub proc increment - # Ensure the note sender matches the authorized wallet stored in slot 1. - push.1 exec.active_account::get_item + # Ensure the note sender matches the authorized wallet. + push.OWNER_SLOT[0..2] exec.active_account::get_item # => [owner_prefix, owner_suffix, 0, 0] exec.active_note::get_sender diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 4c89c74f83..b3cd8f94d2 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -43,6 +43,7 @@ use tracing::{error, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; +use crate::deploy::counter::COUNTER_SLOT_NAME; use crate::deploy::{MonitorDataStore, create_genesis_aware_rpc_client, get_counter_library}; use crate::status::{ CounterTrackingDetails, @@ -83,7 +84,7 @@ async fn get_genesis_block_header(rpc_client: &mut RpcClient) -> Result = LazyLock::new(|| { +pub static OWNER_SLOT_NAME: LazyLock = LazyLock::new(|| { StorageSlotName::new("miden::monitor::counter_contract::owner") .expect("storage slot name should be valid") }); -static COUNTER_SLOT_NAME: LazyLock = LazyLock::new(|| { +pub static COUNTER_SLOT_NAME: LazyLock = LazyLock::new(|| { StorageSlotName::new("miden::monitor::counter_contract::counter") .expect("storage slot name should be valid") }); diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index 0a6c4ebe5a..235905f139 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -199,13 +199,15 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> .await .context("Failed to execute transaction")?; + let transaction_inputs = executed_tx.tx_inputs().to_bytes(); + let prover = LocalTransactionProver::default(); let proven_tx = prover.prove(executed_tx).context("Failed to prove transaction")?; let request = ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: None, + transaction_inputs: Some(transaction_inputs), }; rpc_client From 4fd136eb9c0490d227bbcf68d082d9323018cde5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= <4142+huitseeker@users.noreply.github.com> Date: Tue, 13 Jan 2026 14:30:30 +0100 Subject: [PATCH 079/125] chore(tests): fix flakey tests (#1503) Tests using shutdown_background() to restart the store can fail with "database is locked" because it returns immediately without waiting for connections to close. Use shutdown_timeout() via spawn_blocking to properly wait. --- CHANGELOG.md | 1 + crates/block-producer/src/server/tests.rs | 5 ++++- crates/rpc/src/tests.rs | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38dd9ed07a..b0f2063930 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ - [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). - [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). - Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). +- Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). ### Fixes diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 1e5415925c..cbfd27fe02 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -114,7 +114,10 @@ async fn block_producer_startup_is_robust_to_network_failures() { assert!(response.is_ok()); // kill the store - store_runtime.shutdown_background(); + // Use spawn_blocking because shutdown_timeout blocks and can't run in async context + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); // test: request against block-producer api should fail immediately let response = send_request(block_producer_client.clone(), 1).await; diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index e80083319c..263ef9bfbc 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -137,7 +137,10 @@ async fn rpc_startup_is_robust_to_network_failures() { assert!(response.unwrap().into_inner().block_header.is_some()); // Test: shutdown the store and should fail - store_runtime.shutdown_background(); + // Use spawn_blocking because shutdown_timeout blocks and can't run in async context + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); let response = send_request(&mut rpc_client).await; assert!(response.is_err()); From 9c48876063e2b8c608ef6335d23906d918cbdb89 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 13 Jan 2026 16:09:03 +0100 Subject: [PATCH 080/125] fix/db: improve DB query complexity for vault assets (#1496) --- CHANGELOG.md | 1 + .../db/models/queries/accounts/at_block.rs | 111 ++++++----- .../src/db/models/queries/accounts/tests.rs | 178 ++++++++++++++++++ 3 files changed, 232 insertions(+), 58 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0f2063930..90d4a3b959 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). - Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). +- Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). ### Changes diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs index dc613a9c61..307edd0b79 100644 --- a/crates/store/src/db/models/queries/accounts/at_block.rs +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -1,15 +1,8 @@ use std::collections::BTreeMap; -use diesel::prelude::Queryable; +use diesel::prelude::{Queryable, QueryableByName}; use diesel::query_dsl::methods::SelectDsl; -use diesel::{ - BoolExpressionMethods, - ExpressionMethods, - OptionalExtension, - QueryDsl, - RunQueryDsl, - SqliteConnection, -}; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; use miden_protocol::account::{ AccountHeader, AccountId, @@ -125,63 +118,64 @@ pub(crate) fn select_account_header_at_block( // ================================================================================================ /// Query vault assets at a specific block by finding the most recent update for each `vault_key`. +/// +/// Uses a single raw SQL query with a subquery join: +/// ```sql +/// SELECT a.asset FROM account_vault_assets a +/// INNER JOIN ( +/// SELECT vault_key, MAX(block_num) as max_block +/// FROM account_vault_assets +/// WHERE account_id = ? AND block_num <= ? +/// GROUP BY vault_key +/// ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block +/// WHERE a.account_id = ? +/// ``` pub(crate) fn select_account_vault_at_block( conn: &mut SqliteConnection, account_id: AccountId, block_num: BlockNumber, ) -> Result, DatabaseError> { - use schema::account_vault_assets as t; + use diesel::sql_types::{BigInt, Binary}; let account_id_bytes = account_id.to_bytes(); let block_num_sql = block_num.to_raw_sql(); - // Since Diesel doesn't support composite keys in subqueries easily, we use a two-step approach: - // Step 1: Get max block_num for each vault_key - let latest_blocks_per_vault_key = Vec::from_iter( - QueryDsl::select( - t::table - .filter(t::account_id.eq(&account_id_bytes)) - .filter(t::block_num.le(block_num_sql)) - .group_by(t::vault_key), - (t::vault_key, diesel::dsl::max(t::block_num)), - ) - .load::<(Vec, Option)>(conn)? - .into_iter() - .filter_map(|(key, maybe_block)| maybe_block.map(|block| (key, block))), - ); - - if latest_blocks_per_vault_key.is_empty() { - return Ok(Vec::new()); - } - - // Step 2: Fetch the full rows matching (vault_key, block_num) pairs + let entries: Vec>> = diesel::sql_query( + r" + SELECT a.asset FROM account_vault_assets a + INNER JOIN ( + SELECT vault_key, MAX(block_num) as max_block + FROM account_vault_assets + WHERE account_id = ? AND block_num <= ? + GROUP BY vault_key + ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block + WHERE a.account_id = ? + ", + ) + .bind::(&account_id_bytes) + .bind::(block_num_sql) + .bind::(&account_id_bytes) + .load::(conn)? + .into_iter() + .map(|row| row.asset) + .collect(); + + // Convert to assets, filtering out deletions (None values) let mut assets = Vec::new(); - for (vault_key_bytes, max_block) in latest_blocks_per_vault_key { - // TODO we should not make a query per vault key, but query many at once or - // or find an alternative approach - let result: Option>> = QueryDsl::select( - t::table.filter( - t::account_id - .eq(&account_id_bytes) - .and(t::vault_key.eq(&vault_key_bytes)) - .and(t::block_num.eq(max_block)), - ), - t::asset, - ) - .first(conn) - .optional()?; - if let Some(Some(asset_bytes)) = result { - let asset = Asset::read_from_bytes(&asset_bytes)?; - assets.push(asset); - } + for asset_bytes in entries.into_iter().flatten() { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); } - // Sort by vault_key for consistent ordering - assets.sort_by_key(Asset::vault_key); - Ok(assets) } +#[derive(QueryableByName)] +struct AssetRow { + #[diesel(sql_type = diesel::sql_types::Nullable)] + asset: Option>, +} + // ACCOUNT STORAGE // ================================================================================================ @@ -218,18 +212,19 @@ pub(crate) fn select_account_storage_at_block( let header = AccountStorageHeader::read_from_bytes(&blob)?; // Query all map values for this account up to and including this block. - // For each (slot_name, key), we need the latest value at or before block_num. - // First, get all entries up to block_num - let map_values: Vec<(i64, String, Vec, Vec)> = - SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) - .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + // Order by (slot_name, key) ascending, then block_num descending so the first entry + // for each (slot_name, key) pair is the latest one. + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::block_num.le(block_num_sql)) .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) .load(conn)?; - // For each (slot_name, key) pair, keep only the latest entry (highest block_num) + // For each (slot_name, key) pair, keep only the latest entry (first one due to ordering) let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); - for (_, slot_name_str, key_bytes, value_bytes) in map_values { + for (slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 67eb24c1ff..a0a23f3b54 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -550,3 +550,181 @@ fn test_upsert_accounts_with_empty_storage() { "Storage header blob should exist even for empty storage" ); } + +// VAULT AT BLOCK HISTORICAL QUERY TESTS +// ================================================================================================ + +/// Tests that querying vault at an older block returns the correct historical state, +/// even when the same `vault_key` has been updated in later blocks. +/// +/// Focuses on deduplication logic that relies on ordering by (`vault_key` ASC and `block_num` +/// DESC). +#[test] +fn test_select_account_vault_at_block_historical_with_updates() { + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + // Faucet ID is needed for creating FungibleAssets + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + let block_3 = BlockNumber::from_epoch(2); + + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + insert_block_header(&mut conn, block_3); + + // Insert account at block 1 + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + // Insert vault asset at block 1: vault_key_1 = 1000 tokens + let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(1), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset_v1 = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); + + insert_account_vault_asset(&mut conn, account_id, block_1, vault_key_1, Some(asset_v1)) + .expect("insert vault asset failed"); + + // Update vault asset at block 2: vault_key_1 = 2000 tokens (updated value) + let asset_v2 = Asset::Fungible(FungibleAsset::new(faucet_id, 2000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_1, Some(asset_v2)) + .expect("insert vault asset update failed"); + + // Add a second vault_key at block 2 + let vault_key_2 = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(2), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset_key2 = Asset::Fungible(FungibleAsset::new(faucet_id, 500).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_2, Some(asset_key2)) + .expect("insert second vault asset failed"); + + // Update vault_key_1 again at block 3: vault_key_1 = 3000 tokens + let asset_v3 = Asset::Fungible(FungibleAsset::new(faucet_id, 3000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_3, vault_key_1, Some(asset_v3)) + .expect("insert vault asset update 2 failed"); + + // Query at block 1: should only see vault_key_1 with 1000 tokens + let assets_at_block_1 = select_account_vault_at_block(&mut conn, account_id, block_1) + .expect("Query at block 1 should succeed"); + + assert_eq!(assets_at_block_1.len(), 1, "Should have 1 asset at block 1"); + assert_matches!(&assets_at_block_1[0], Asset::Fungible(f) if f.amount() == 1000); + + // Query at block 2: should see vault_key_1 with 2000 tokens AND vault_key_2 with 500 tokens + let assets_at_block_2 = select_account_vault_at_block(&mut conn, account_id, block_2) + .expect("Query at block 2 should succeed"); + + assert_eq!(assets_at_block_2.len(), 2, "Should have 2 assets at block 2"); + + // Find the amounts (order may vary) + let amounts: Vec = assets_at_block_2 + .iter() + .map(|a| assert_matches!(a, Asset::Fungible(f) => f.amount())) + .collect(); + + assert!(amounts.contains(&2000), "Block 2 should have vault_key_1 with 2000 tokens"); + assert!(amounts.contains(&500), "Block 2 should have vault_key_2 with 500 tokens"); + + // Query at block 3: should see vault_key_1 with 3000 tokens AND vault_key_2 with 500 tokens + let assets_at_block_3 = select_account_vault_at_block(&mut conn, account_id, block_3) + .expect("Query at block 3 should succeed"); + + assert_eq!(assets_at_block_3.len(), 2, "Should have 2 assets at block 3"); + + let amounts: Vec = assets_at_block_3 + .iter() + .map(|a| assert_matches!(a, Asset::Fungible(f) => f.amount())) + .collect(); + + assert!(amounts.contains(&3000), "Block 3 should have vault_key_1 with 3000 tokens"); + assert!(amounts.contains(&500), "Block 3 should have vault_key_2 with 500 tokens"); +} + +/// Tests that deleted vault assets (asset = None) are correctly excluded from results, +/// and that the deduplication handles deletion entries properly. +#[test] +fn test_select_account_vault_at_block_with_deletion() { + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + // Faucet ID is needed for creating FungibleAssets + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + let block_3 = BlockNumber::from_epoch(2); + + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + insert_block_header(&mut conn, block_3); + + // Insert account at block 1 + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + // Insert vault asset at block 1 + let vault_key = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(1), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); + + insert_account_vault_asset(&mut conn, account_id, block_1, vault_key, Some(asset)) + .expect("insert vault asset failed"); + + // Delete the vault asset at block 2 (insert with asset = None) + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key, None) + .expect("delete vault asset failed"); + + // Re-add the vault asset at block 3 with different amount + let asset_v3 = Asset::Fungible(FungibleAsset::new(faucet_id, 2000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_3, vault_key, Some(asset_v3)) + .expect("re-add vault asset failed"); + + // Query at block 1: should see the asset + let assets_at_block_1 = select_account_vault_at_block(&mut conn, account_id, block_1) + .expect("Query at block 1 should succeed"); + assert_eq!(assets_at_block_1.len(), 1, "Should have 1 asset at block 1"); + + // Query at block 2: should NOT see the asset (it was deleted) + let assets_at_block_2 = select_account_vault_at_block(&mut conn, account_id, block_2) + .expect("Query at block 2 should succeed"); + assert!(assets_at_block_2.is_empty(), "Should have no assets at block 2 (deleted)"); + + // Query at block 3: should see the re-added asset with new amount + let assets_at_block_3 = select_account_vault_at_block(&mut conn, account_id, block_3) + .expect("Query at block 3 should succeed"); + assert_eq!(assets_at_block_3.len(), 1, "Should have 1 asset at block 3"); + assert_matches!(&assets_at_block_3[0], Asset::Fungible(f) if f.amount() == 2000); +} From 50680fe7644ab9a491feb18c70bd9a560c23e796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= <4142+huitseeker@users.noreply.github.com> Date: Tue, 13 Jan 2026 19:40:46 +0100 Subject: [PATCH 081/125] ci: move MSRV check from PR CI to release workflows (#1509) The MSRV check now only runs on push to main/next (in publish-dry-run.yml) and when publishing a release (in publish-main.yml), rather than on every PR. This makes PR CI faster and less resource-intensive. Any MSRV issues will still be caught before publication, and can be fixed locally using scripts/check-msrv.sh. Changes: - Delete .github/workflows/msrv.yml - Add cleanup-runner action for freeing disk space - Add MSRV check steps to publish-dry-run.yml - Add MSRV check steps to publish-main.yml See also: - https://github.com/0xMiden/miden-base/pull/2233 - https://github.com/0xMiden/miden-vm/pull/2468 - https://github.com/0xMiden/miden-vm/pull/2460 --- .github/actions/cleanup-runner/action.yml | 12 ++++++++ .github/workflows/msrv.yml | 37 ----------------------- .github/workflows/publish-dry-run.yml | 21 +++++++++++-- .github/workflows/publish-main.yml | 15 ++++++++- 4 files changed, 45 insertions(+), 40 deletions(-) create mode 100644 .github/actions/cleanup-runner/action.yml delete mode 100644 .github/workflows/msrv.yml diff --git a/.github/actions/cleanup-runner/action.yml b/.github/actions/cleanup-runner/action.yml new file mode 100644 index 0000000000..22edac443f --- /dev/null +++ b/.github/actions/cleanup-runner/action.yml @@ -0,0 +1,12 @@ +name: 'Cleanup Runner' +description: 'Remove unused tools in the runner image to free disk space' + +runs: + using: 'composite' + steps: + - name: Remove unused tools in the runner image + shell: bash + run: | + sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL + sudo docker image prune --all --force || true + sudo docker builder prune -a --force || true diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml deleted file mode 100644 index 046ca7663a..0000000000 --- a/.github/workflows/msrv.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Check MSRV - -on: - push: - branches: [next] - pull_request: - types: [opened, reopened, synchronize] - -# Limits workflow concurrency to only the latest commit in the PR. -concurrency: - group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" - cancel-in-progress: true - -permissions: - contents: read - -env: - # Reduce cache usage by removing debug information. - CARGO_PROFILE_DEV_DEBUG: 0 - -jobs: - # Check MSRV (aka `rust-version`) in `Cargo.toml` is valid for workspace members - msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y jq - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Install cargo-msrv - run: cargo install cargo-msrv - - name: Check MSRV for each workspace member - run: | - ./scripts/check-msrv.sh diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml index 9679c6d093..2acaab2faa 100644 --- a/.github/workflows/publish-dry-run.yml +++ b/.github/workflows/publish-dry-run.yml @@ -5,7 +5,11 @@ permissions: on: push: - branches: [main] + branches: [main, next] + +concurrency: + group: "${{ github.workflow }} @ ${{ github.ref }}" + cancel-in-progress: true jobs: publish-dry-run: @@ -17,9 +21,22 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + - uses: taiki-e/install-action@v2 + with: + tool: cargo-binstall + - name: Install cargo-msrv + run: cargo binstall --no-confirm --force cargo-msrv + - name: Check MSRV for each workspace member run: | - rustup update --no-self-update + export PATH="$HOME/.cargo/bin:$PATH" + ./scripts/check-msrv.sh - name: Run cargo publish dry-run run: cargo publish --workspace --dry-run env: diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-main.yml index dcc12a71bd..25fe4552cf 100644 --- a/.github/workflows/publish-main.yml +++ b/.github/workflows/publish-main.yml @@ -34,9 +34,22 @@ jobs: exit 1 fi echo "Release tag matches main HEAD — continuing." + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + - uses: taiki-e/install-action@v2 + with: + tool: cargo-binstall + - name: Install cargo-msrv + run: cargo binstall --no-confirm --force cargo-msrv + - name: Check MSRV for each workspace member run: | - rustup update --no-self-update + export PATH="$HOME/.cargo/bin:$PATH" + ./scripts/check-msrv.sh - name: Run cargo publish run: cargo publish --workspace env: From 8349feba724985ef08ad4b8ff15d6a9659ff6df3 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Tue, 13 Jan 2026 14:42:56 -0800 Subject: [PATCH 082/125] chore: refresh Cargo.lock file --- Cargo.lock | 800 +++++++++++++++++++++++------------------------------ 1 file changed, 342 insertions(+), 458 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3a7a95d24..7676ec4234 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.16", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -157,9 +146,12 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] [[package]] name = "arrayref" @@ -188,28 +180,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "async-trait" version = "0.1.89" @@ -218,7 +188,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -255,9 +225,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ "axum-core", "bytes", @@ -288,9 +258,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", @@ -317,7 +287,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -343,9 +313,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bech32" @@ -361,9 +331,9 @@ checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" [[package]] name = "bigdecimal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" dependencies = [ "autocfg", "libm", @@ -410,15 +380,16 @@ dependencies = [ [[package]] name = "blake3" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", + "cpufeatures", ] [[package]] @@ -453,9 +424,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "bytemuck" @@ -477,9 +448,9 @@ checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" [[package]] name = "camino" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" +checksum = "e629a66d692cb9ff1a1c664e41771b3dcaf961985a9774c0eb0bd1b51cf60a48" dependencies = [ "serde_core", ] @@ -515,9 +486,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.49" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "jobserver", @@ -593,7 +564,7 @@ dependencies = [ "js-sys", "num-traits", "wasm-bindgen", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -653,9 +624,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive 4.5.49", @@ -663,13 +634,13 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.6", + "clap_lex 0.7.7", "strsim 0.11.1", ] @@ -695,7 +666,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -709,15 +680,15 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -736,9 +707,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" [[package]] name = "core-foundation" @@ -793,7 +764,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.53", + "clap 4.5.54", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -907,7 +878,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -950,7 +921,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -964,7 +935,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -975,7 +946,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -986,7 +957,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1078,7 +1049,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1088,28 +1059,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "derive_more" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1131,15 +1102,15 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.3.5" +version = "2.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8587cbca3c929fb198e7950d761d31ca72b80aa6e07c1b7bec5879d187720436" +checksum = "c30b2969f923fa1f73744b92bb7df60b858df8832742d9a3aceb79236c0be1d2" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1159,7 +1130,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1188,7 +1159,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1214,7 +1185,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1335,7 +1306,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -1378,9 +1349,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "fixedbitset" @@ -1390,9 +1361,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "libz-ng-sys", @@ -1455,9 +1426,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.0" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" +checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" dependencies = [ "autocfg", ] @@ -1518,7 +1489,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1559,16 +1530,17 @@ dependencies = [ [[package]] name = "generator" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" +checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", - "windows", + "windows-link", + "windows-result", ] [[package]] @@ -1584,9 +1556,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", @@ -1618,7 +1590,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1646,9 +1618,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -1656,7 +1628,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -1679,9 +1651,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -1767,7 +1736,7 @@ checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" dependencies = [ "cfg-if", "libc", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -1927,7 +1896,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core", ] [[package]] @@ -1987,9 +1956,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -2001,9 +1970,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -2065,9 +2034,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -2096,9 +2065,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ "memchr", "serde", @@ -2112,7 +2081,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -2147,15 +2116,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jiff" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49cce2b81f2098e7e3efc35bc2e0a6b7abec9d34128283d7a26fa8f32a6dbb35" +checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" dependencies = [ "jiff-static", "log", @@ -2166,13 +2135,13 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "980af8b43c3ad5d8d349ace167ec8170839f753a42d233ba19e08afe1850fa69" +checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2256,9 +2225,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.178" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libm" @@ -2312,13 +2281,12 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.6" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786c72d9739fc316a7acf9b22d9c2794ac9cb91074e9668feb04304ab7219783" +checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" dependencies = [ "libc", "neli", - "thiserror 2.0.17", "windows-sys 0.61.2", ] @@ -2359,7 +2327,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2395,9 +2363,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96051b46fc183dc9cd4a223960ef37b9af631b55191852a8274bfef064cda20f" +checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" [[package]] name = "matchers" @@ -2437,9 +2405,9 @@ dependencies = [ [[package]] name = "miden-air" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e663337017ed028dff8c18a0ce1db64aad0e850996e3214f137f98317533c2e1" +checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" dependencies = [ "miden-core", "miden-utils-indexing", @@ -2450,9 +2418,9 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001249195c227624695529c82ebf51c390ec1c28e99a567549ce3a272a2aedf3" +checksum = "24c6a18e29c03141cf9044604390a00691c7342924ec865b4acfdd560ff41ede" dependencies = [ "env_logger", "log", @@ -2465,9 +2433,9 @@ dependencies = [ [[package]] name = "miden-assembly-syntax" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1963cfa667aa6a157c99982df340a7bd42b054652e6f33d5e3513217531eca73" +checksum = "7458ff670f5a514bf972aa84d6e1851a4c4e9afa351f53b71bdc2218b99254b6" dependencies = [ "aho-corasick", "env_logger", @@ -2490,7 +2458,7 @@ dependencies = [ [[package]] name = "miden-block-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "miden-protocol", "thiserror 2.0.17", @@ -2498,9 +2466,9 @@ dependencies = [ [[package]] name = "miden-core" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136debf5474190dc584df3252710dac07a0e45315740c9538a7fc0b72c596365" +checksum = "21a5c9c8c3d42ae8381ed49e47ff9ad2d2e345c4726761be36b7d4000ebb40ae" dependencies = [ "derive_more", "itertools 0.14.0", @@ -2520,9 +2488,9 @@ dependencies = [ [[package]] name = "miden-core-lib" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcec9fb9a256d2fae347162d9a94653a1790dd33b4af73ad29686475b63deb34" +checksum = "6556494ea5576803730fa15015bee6bd9d1a117450f22e7df0883421e7423674" dependencies = [ "env_logger", "fs-err", @@ -2556,7 +2524,7 @@ dependencies = [ "num-complex", "rand 0.9.2", "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", "rand_hc", "rayon", "sha2", @@ -2576,14 +2544,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83479e7af490784c6f2d2e02cec5210fd6e5bc6ce3d4427734e36a773bca72d2" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "miden-debug-types" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dc25083822c3d582c42ad10aeee0138dec15a130f3017b05495bb91e31fde4a" +checksum = "19123e896f24b575e69921a79a39a0a4babeb98404a8601017feb13b75d653b3" dependencies = [ "memchr", "miden-crypto", @@ -2593,7 +2561,7 @@ dependencies = [ "miden-utils-sync", "paste", "serde", - "serde_spanned 1.0.3", + "serde_spanned 1.0.4", "thiserror 2.0.17", ] @@ -2608,9 +2576,9 @@ dependencies = [ [[package]] name = "miden-mast-package" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da35f2fc1eacbfd0b6b995e888c2b778bd646acebf34dab27f9f7ed9b3effaa2" +checksum = "f0d6a322b91efa1bb71e224395ca1fb9ca00e2614f89427e35d8c42a903868a3" dependencies = [ "derive_more", "miden-assembly-syntax", @@ -2641,7 +2609,7 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.111", + "syn 2.0.114", "terminal_size 0.3.0", "textwrap", "thiserror 2.0.17", @@ -2657,7 +2625,7 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2666,7 +2634,7 @@ version = "0.13.0" dependencies = [ "anyhow", "axum", - "clap 4.5.53", + "clap 4.5.54", "hex", "humantime", "miden-node-proto", @@ -2692,7 +2660,7 @@ name = "miden-node" version = "0.13.0" dependencies = [ "anyhow", - "clap 4.5.53", + "clap 4.5.54", "figment", "fs-err", "hex", @@ -2749,7 +2717,7 @@ name = "miden-node-grpc-error-macro" version = "0.13.0" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2758,7 +2726,7 @@ version = "0.13.0" dependencies = [ "anyhow", "futures", - "indexmap 2.12.1", + "indexmap 2.13.0", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", @@ -2854,7 +2822,7 @@ dependencies = [ "diesel_migrations", "fs-err", "hex", - "indexmap 2.12.1", + "indexmap 2.13.0", "miden-node-proto", "miden-node-proto-build", "miden-node-test-macro", @@ -2870,7 +2838,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tokio-stream", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", "tonic", "tonic-reflection", "tower-http", @@ -2881,7 +2849,7 @@ dependencies = [ name = "miden-node-stress-test" version = "0.13.0" dependencies = [ - "clap 4.5.53", + "clap 4.5.54", "fs-err", "futures", "miden-air", @@ -2904,7 +2872,7 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2918,7 +2886,7 @@ dependencies = [ "http", "http-body-util", "itertools 0.14.0", - "lru 0.16.2", + "lru 0.16.3", "miden-protocol", "opentelemetry", "opentelemetry-otlp", @@ -2959,9 +2927,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb298dbdda739080497c18eace4d56c58f3e8d257676c9b2f407be441131ecd" +checksum = "4a659fac55de14647e2695f03d96b83ff94fe65fd31e74d81c225ec52af25acf" dependencies = [ "itertools 0.14.0", "miden-air", @@ -2980,7 +2948,7 @@ dependencies = [ [[package]] name = "miden-protocol" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "bech32", "fs-err", @@ -3002,7 +2970,7 @@ dependencies = [ "semver 1.0.27", "serde", "thiserror 2.0.17", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", "walkdir", "winter-rand-utils", ] @@ -3010,18 +2978,18 @@ dependencies = [ [[package]] name = "miden-protocol-macros" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "miden-prover" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8506c8eb4d980134c0145887af50bd4631df4010eb23d6e454764cb1ee28836c" +checksum = "4e5df61f50f27886f6f777d6e0cdf785f7db87dd881799a84a801e7330c189c8" dependencies = [ "miden-air", "miden-debug-types", @@ -3039,7 +3007,7 @@ dependencies = [ "async-trait", "axum", "bytes", - "clap 4.5.53", + "clap 4.5.54", "http", "humantime", "miden-block-prover", @@ -3100,7 +3068,7 @@ dependencies = [ [[package]] name = "miden-standards" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "fs-err", "miden-assembly", @@ -3117,7 +3085,7 @@ dependencies = [ [[package]] name = "miden-testing" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3135,7 +3103,7 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "miden-processor", "miden-protocol", @@ -3148,7 +3116,7 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" dependencies = [ "miden-protocol", "miden-tx", @@ -3156,9 +3124,9 @@ dependencies = [ [[package]] name = "miden-utils-core-derive" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0807840c07a4491a292153258cfae27914333e1a7240777a77c22d8ca3b55873" +checksum = "aa207ffd8b26a79d9b5b246a352812f0015c0bb8f75492ec089c5c8e6d5f9e2b" dependencies = [ "proc-macro2", "quote", @@ -3167,9 +3135,9 @@ dependencies = [ [[package]] name = "miden-utils-diagnostics" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b28b1b29e300b471b0f1cbc286997a1326c900814a73b0b28338d5926ce192c" +checksum = "6b2f55477d410542a5d8990ca04856adf5bef91bfa3b54ca3c03a5ff14a6e25c" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3180,18 +3148,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8bd0c1966de07d48a4ed0b2821466919c061f4866296be87afc56970a49716a" +checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" dependencies = [ "thiserror 2.0.17", ] [[package]] name = "miden-utils-sync" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fa7e37db2fbf2dee6ba6e411b3570ef48d52ec780b9c8125623f9ddca30da3" +checksum = "da7fa8f5fd27f122c83f55752f2a964bbfc2b713de419e9c152f7dcc05c194ec" dependencies = [ "lock_api", "loom", @@ -3200,9 +3168,9 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383c934eed92f89be4c1e3dbc97ccf37b48433a0b33727c92a5abbfa2d45f420" +checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" dependencies = [ "miden-air", "miden-core", @@ -3249,7 +3217,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3259,7 +3227,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -3312,7 +3280,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] @@ -3324,7 +3292,7 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "schannel", "security-framework 2.11.1", @@ -3334,9 +3302,9 @@ dependencies = [ [[package]] name = "neli" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fe4204517c0dafc04a1d99ecb577d52c0ffc81e1bbe5cf322769aa8fbd1b05" +checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -3350,15 +3318,15 @@ dependencies = [ [[package]] name = "neli-proc-macros" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e502fe5db321c6e0ae649ccda600675680125a8e8dee327744fe1910b19332" +checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3385,7 +3353,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3435,7 +3403,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3554,7 +3522,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3563,6 +3531,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" + [[package]] name = "openssl-sys" version = "0.9.111" @@ -3667,7 +3641,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -3696,7 +3670,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3712,7 +3686,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.1", + "indexmap 2.13.0", ] [[package]] @@ -3741,7 +3715,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3775,7 +3749,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3811,7 +3785,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -3829,7 +3803,7 @@ dependencies = [ "log", "nix", "once_cell", - "openssl-probe", + "openssl-probe 0.1.6", "parking_lot", "percent-encoding", "pingora-error", @@ -3901,7 +3875,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -3933,7 +3907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4058,9 +4032,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "portable-atomic-util" @@ -4118,7 +4092,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4127,7 +4101,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.9", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -4173,14 +4147,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -4193,7 +4167,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "version_check", "yansi", ] @@ -4255,7 +4229,7 @@ checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4274,8 +4248,8 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "heck 0.4.1", - "itertools 0.10.5", + "heck 0.5.0", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -4286,21 +4260,21 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.111", + "syn 2.0.114", "tempfile", ] [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4405,9 +4379,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -4436,7 +4410,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4456,7 +4430,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4465,14 +4439,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -4492,7 +4466,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4501,7 +4475,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4570,9 +4544,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64", "bytes", @@ -4626,7 +4600,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -4634,22 +4608,19 @@ dependencies = [ [[package]] name = "rmp" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c" dependencies = [ - "byteorder", "num-traits", - "paste", ] [[package]] name = "rmp-serde" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" +checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155" dependencies = [ - "byteorder", "rmp", "serde", ] @@ -4679,7 +4650,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.111", + "syn 2.0.114", "unicode-ident", ] @@ -4727,27 +4698,27 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "log", "once_cell", @@ -4760,11 +4731,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.0", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4772,9 +4743,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "zeroize", ] @@ -4810,9 +4781,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -4961,20 +4932,20 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -5010,9 +4981,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" dependencies = [ "serde_core", ] @@ -5043,11 +5014,12 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" dependencies = [ - "futures", + "futures-executor", + "futures-util", "log", "once_cell", "parking_lot", @@ -5057,13 +5029,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5073,7 +5045,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" dependencies = [ "base64", - "indexmap 2.12.1", + "indexmap 2.13.0", "rust_decimal", ] @@ -5115,10 +5087,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -5134,9 +5107,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "siphasher" @@ -5193,17 +5166,15 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.4.8" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60bdd87fcb4c9764b024805fb2df5f1d659bea6e629fdbdcdcfc4042b9a640d0" +checksum = "05e98301bf8b0540c7de45ecd760539b9c62f5772aed172f08efba597c11cd5d" dependencies = [ + "cc", + "hashbrown 0.16.1", "js-sys", - "once_cell", "thiserror 2.0.17", - "tokio", "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", ] [[package]] @@ -5264,7 +5235,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5284,9 +5255,9 @@ dependencies = [ [[package]] name = "supports-hyperlinks" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f44ed3c63152de6a9f90acbea1a110441de43006ea51bcce8f436196a288b" +checksum = "e396b6523b11ccb83120b115a0b7366de372751aa6edf19844dfb13a6af97e91" [[package]] name = "supports-unicode" @@ -5307,9 +5278,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -5333,7 +5304,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5365,15 +5336,15 @@ checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" [[package]] name = "tempfile" -version = "3.23.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", "getrandom 0.3.4", "once_cell", - "rustix 1.1.2", - "windows-sys 0.52.0", + "rustix 1.1.3", + "windows-sys 0.61.2", ] [[package]] @@ -5382,7 +5353,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5410,7 +5381,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.1.2", + "rustix 1.1.3", "windows-sys 0.60.2", ] @@ -5457,7 +5428,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5468,7 +5439,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5492,9 +5463,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", @@ -5502,22 +5473,22 @@ dependencies = [ "num-conv", "num_threads", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" dependencies = [ "num-conv", "time-core", @@ -5545,9 +5516,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", @@ -5568,7 +5539,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5593,9 +5564,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -5605,12 +5576,10 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" +checksum = "3f6d24790a10a7af737693a3e8f1d03faef7e6ca0cc99aae5066f533766de545" dependencies = [ - "async-stream", - "bytes", "futures-core", "tokio", "tokio-stream", @@ -5618,9 +5587,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -5643,14 +5612,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.8" +version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde_core", - "serde_spanned 1.0.3", - "toml_datetime 0.7.3", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", "winnow", @@ -5667,9 +5636,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] @@ -5680,7 +5649,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -5690,21 +5659,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.9" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", - "toml_datetime 0.7.3", + "indexmap 2.13.0", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", ] [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -5717,9 +5686,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" @@ -5761,7 +5730,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5799,7 +5768,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.111", + "syn 2.0.114", "tempfile", "tonic-build", ] @@ -5863,13 +5832,13 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -5882,9 +5851,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "bitflags 2.10.0", "bytes", @@ -5914,9 +5883,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -5932,14 +5901,14 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -5971,16 +5940,13 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" +checksum = "1ac28f2d093c6c477eaa76b23525478f38de514fa9aeb1285738d4b97a9552fc" dependencies = [ "js-sys", "opentelemetry", - "opentelemetry_sdk", - "rustversion", "smallvec", - "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -6057,7 +6023,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -6083,9 +6049,9 @@ dependencies = [ [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -6135,14 +6101,15 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -6323,7 +6290,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "wasm-bindgen-shared", ] @@ -6391,7 +6358,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6400,41 +6367,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.61.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" -dependencies = [ - "windows-collections", - "windows-core 0.61.2", - "windows-future", - "windows-link 0.1.3", - "windows-numerics", -] - -[[package]] -name = "windows-collections" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" -dependencies = [ - "windows-core 0.61.2", -] - -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", -] - [[package]] name = "windows-core" version = "0.62.2" @@ -6443,20 +6375,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - -[[package]] -name = "windows-future" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", - "windows-threading", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -6467,7 +6388,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6478,49 +6399,24 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -[[package]] -name = "windows-numerics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", -] - [[package]] name = "windows-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -6529,16 +6425,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", + "windows-link", ] [[package]] @@ -6547,7 +6434,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6592,7 +6479,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6632,7 +6519,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.1", + "windows-link", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -6643,15 +6530,6 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] -[[package]] -name = "windows-threading" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -6851,7 +6729,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6968,28 +6846,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -7009,7 +6887,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -7049,9 +6927,15 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] +[[package]] +name = "zmij" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" + [[package]] name = "zstd" version = "0.13.3" From d0df78eaf0f418ee8fd89e86d19ee41d53035859 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 14 Jan 2026 11:30:26 +0100 Subject: [PATCH 083/125] feat: [3/4] partial storage map queries (#1428) --- CHANGELOG.md | 1 + Cargo.lock | 1 + crates/proto/Cargo.toml | 3 +- crates/proto/src/domain/account.rs | 246 ++++++++++++------ crates/proto/src/domain/account/tests.rs | 41 +++ crates/proto/src/generated/rpc.rs | 60 ++++- crates/store/src/db/mod.rs | 31 +-- .../store/src/db/models/queries/accounts.rs | 3 +- .../db/models/queries/accounts/at_block.rs | 124 +-------- .../src/db/models/queries/accounts/tests.rs | 152 +++++++---- crates/store/src/errors.rs | 6 + crates/store/src/inner_forest/mod.rs | 141 ++++++++-- crates/store/src/inner_forest/tests.rs | 23 +- crates/store/src/state.rs | 52 ++-- proto/proto/rpc.proto | 33 ++- 15 files changed, 582 insertions(+), 335 deletions(-) create mode 100644 crates/proto/src/domain/account/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 90d4a3b959..5a1f1db02d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - Added pagination to `GetNetworkAccountIds` endpoint ([#1452](https://github.com/0xMiden/miden-node/pull/1452)). - Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). - Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). +- Add partial storage map queries to RPC ([#1428](https://github.com/0xMiden/miden-node/pull/1428)). - Added validated transactions check to block validation logc in Validator ([#1460](https://github.com/0xMiden/miden-node/pull/1460)). - Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/miden-node/pull/1450)). - Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). diff --git a/Cargo.lock b/Cargo.lock index 7676ec4234..1ad02438bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2749,6 +2749,7 @@ name = "miden-node-proto" version = "0.13.0" dependencies = [ "anyhow", + "assert_matches", "fs-err", "hex", "http", diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 738eade6b4..255b27c9df 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -28,7 +28,8 @@ tonic-prost = { workspace = true } url = { workspace = true } [dev-dependencies] -proptest = { version = "1.7" } +assert_matches = { workspace = true } +proptest = { version = "1.7" } [build-dependencies] fs-err = { workspace = true } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 5bc6b4ecc4..6d736b2431 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -16,6 +16,7 @@ use miden_protocol::asset::{Asset, AssetVault}; use miden_protocol::block::BlockNumber; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::note::{NoteExecutionMode, NoteTag}; use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use thiserror::Error; @@ -24,6 +25,9 @@ use super::try_convert; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated::{self as proto}; +#[cfg(test)] +mod tests; + // ACCOUNT ID // ================================================================================================ @@ -187,52 +191,6 @@ impl TryFrom for Accoun } } -impl TryFrom - for AccountStorageMapDetails -{ - type Error = ConversionError; - - fn try_from( - value: proto::rpc::account_storage_details::AccountStorageMapDetails, - ) -> Result { - use proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry; - let proto::rpc::account_storage_details::AccountStorageMapDetails { - slot_name, - too_many_entries, - entries, - } = value; - - let slot_name = StorageSlotName::new(slot_name)?; - - let entries = if too_many_entries { - StorageMapEntries::LimitExceeded - } else { - let map_entries = if let Some(entries) = entries { - entries - .entries - .into_iter() - .map(|entry| { - let key = entry - .key - .ok_or(StorageMapEntry::missing_field(stringify!(key)))? - .try_into()?; - let value = entry - .value - .ok_or(StorageMapEntry::missing_field(stringify!(value)))? - .try_into()?; - Ok((key, value)) - }) - .collect::, ConversionError>>()? - } else { - Vec::new() - }; - StorageMapEntries::Entries(map_entries) - }; - - Ok(Self { slot_name, entries }) - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { pub slot_name: StorageSlotName, @@ -259,6 +217,7 @@ impl TryFrom), + /// All storage map entries (key-value pairs) without proofs. + /// Used when all entries are requested for small maps. + AllEntries(Vec<(Word, Word)>), + + /// Specific entries with their SMT proofs for client-side verification. + /// Used when specific keys are requested from the storage map. + EntriesWithProofs(Vec), } impl AccountStorageMapDetails { /// Maximum number of storage map entries that can be returned in a single response. pub const MAX_RETURN_ENTRIES: usize = 1000; - pub fn new(slot_name: StorageSlotName, slot_data: SlotData, storage_map: &StorageMap) -> Self { - match slot_data { - SlotData::All => Self::from_all_entries(slot_name, storage_map), - SlotData::MapKeys(keys) => Self::from_specific_keys(slot_name, &keys[..], storage_map), + /// Creates storage map details with all entries from the storage map. + /// + /// If the storage map has too many entries (> `MAX_RETURN_ENTRIES`), + /// returns `LimitExceeded` variant. + pub fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { + if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } + } else { + let entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); + Self { + slot_name, + entries: StorageMapEntries::AllEntries(entries), + } } } - fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { - if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { + /// Creates storage map details from forest-queried entries. + /// + /// Returns `LimitExceeded` if too many entries. + pub fn from_forest_entries(slot_name: StorageSlotName, entries: Vec<(Word, Word)>) -> Self { + if entries.len() > Self::MAX_RETURN_ENTRIES { Self { slot_name, entries: StorageMapEntries::LimitExceeded, } } else { - let map_entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); Self { slot_name, - entries: StorageMapEntries::Entries(map_entries), + entries: StorageMapEntries::AllEntries(entries), } } } - fn from_specific_keys( - slot_name: StorageSlotName, - keys: &[Word], - storage_map: &StorageMap, - ) -> Self { - if keys.len() > Self::MAX_RETURN_ENTRIES { + /// Creates storage map details from pre-computed SMT proofs. + /// + /// Use this when the caller has already obtained the proofs from an `SmtForest`. + /// Returns `LimitExceeded` if too many proofs are provided. + pub fn from_proofs(slot_name: StorageSlotName, proofs: Vec) -> Self { + if proofs.len() > Self::MAX_RETURN_ENTRIES { Self { slot_name, entries: StorageMapEntries::LimitExceeded, } } else { - // TODO For now, we return all entries instead of specific keys with proofs - Self::from_all_entries(slot_name, storage_map) + Self { + slot_name, + entries: StorageMapEntries::EntriesWithProofs(proofs), + } } } } +impl TryFrom + for AccountStorageMapDetails +{ + type Error = ConversionError; + + fn try_from( + value: proto::rpc::account_storage_details::AccountStorageMapDetails, + ) -> Result { + use proto::rpc::account_storage_details::account_storage_map_details::{ + all_map_entries::StorageMapEntry, + map_entries_with_proofs::StorageMapEntryWithProof, + AllMapEntries, + Entries as ProtoEntries, + MapEntriesWithProofs, + }; + + let proto::rpc::account_storage_details::AccountStorageMapDetails { + slot_name, + too_many_entries, + entries, + } = value; + + let slot_name = StorageSlotName::new(slot_name)?; + + let entries = if too_many_entries { + StorageMapEntries::LimitExceeded + } else { + match entries { + None => { + return Err( + proto::rpc::account_storage_details::AccountStorageMapDetails::missing_field( + stringify!(entries), + ), + ); + }, + Some(ProtoEntries::AllEntries(AllMapEntries { entries })) => { + let entries = entries + .into_iter() + .map(|entry| { + let key = entry + .key + .ok_or(StorageMapEntry::missing_field(stringify!(key)))? + .try_into()?; + let value = entry + .value + .ok_or(StorageMapEntry::missing_field(stringify!(value)))? + .try_into()?; + Ok((key, value)) + }) + .collect::, ConversionError>>()?; + StorageMapEntries::AllEntries(entries) + }, + Some(ProtoEntries::EntriesWithProofs(MapEntriesWithProofs { entries })) => { + let proofs = entries + .into_iter() + .map(|entry| { + let smt_opening = entry.proof.ok_or( + StorageMapEntryWithProof::missing_field(stringify!(proof)), + )?; + SmtProof::try_from(smt_opening) + }) + .collect::, ConversionError>>()?; + StorageMapEntries::EntriesWithProofs(proofs) + }, + } + }; + + Ok(Self { slot_name, entries }) + } +} + impl From for proto::rpc::account_storage_details::AccountStorageMapDetails { fn from(value: AccountStorageMapDetails) -> Self { - use proto::rpc::account_storage_details::account_storage_map_details; + use proto::rpc::account_storage_details::account_storage_map_details::{ + AllMapEntries, + Entries as ProtoEntries, + MapEntriesWithProofs, + }; let AccountStorageMapDetails { slot_name, entries } = value; - match entries { - StorageMapEntries::LimitExceeded => Self { - slot_name: slot_name.to_string(), - too_many_entries: true, - entries: Some(account_storage_map_details::MapEntries { entries: Vec::new() }), + let (too_many_entries, proto_entries) = match entries { + StorageMapEntries::LimitExceeded => (true, None), + StorageMapEntries::AllEntries(entries) => { + let all = AllMapEntries { + entries: Vec::from_iter(entries.into_iter().map(|(key, value)| { + proto::rpc::account_storage_details::account_storage_map_details::all_map_entries::StorageMapEntry { + key: Some(key.into()), + value: Some(value.into()), + } + })), + }; + (false, Some(ProtoEntries::AllEntries(all))) }, - StorageMapEntries::Entries(map_entries) => { - let entries = Some(account_storage_map_details::MapEntries { - entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { - account_storage_map_details::map_entries::StorageMapEntry { + StorageMapEntries::EntriesWithProofs(proofs) => { + use miden_protocol::crypto::merkle::smt::SmtLeaf; + + let with_proofs = MapEntriesWithProofs { + entries: Vec::from_iter(proofs.into_iter().map(|proof| { + // Get key/value from the leaf before consuming the proof + let (key, value) = match proof.leaf() { + SmtLeaf::Empty(_) => { + (miden_protocol::EMPTY_WORD, miden_protocol::EMPTY_WORD) + }, + SmtLeaf::Single((k, v)) => (*k, *v), + SmtLeaf::Multiple(entries) => entries.iter().next().map_or( + (miden_protocol::EMPTY_WORD, miden_protocol::EMPTY_WORD), + |(k, v)| (*k, *v), + ), + }; + let smt_opening = proto::primitives::SmtOpening::from(proof); + proto::rpc::account_storage_details::account_storage_map_details::map_entries_with_proofs::StorageMapEntryWithProof { key: Some(key.into()), value: Some(value.into()), + proof: Some(smt_opening), } })), - }); - - Self { - slot_name: slot_name.to_string(), - too_many_entries: false, - entries, - } + }; + (false, Some(ProtoEntries::EntriesWithProofs(with_proofs))) }, + }; + + Self { + slot_name: slot_name.to_string(), + too_many_entries, + entries: proto_entries, } } } -// ACCOUNT STORAGE DETAILS DETAILS -//================================================================================================ - -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq)] pub struct AccountStorageDetails { pub header: AccountStorageHeader, pub map_details: Vec, diff --git a/crates/proto/src/domain/account/tests.rs b/crates/proto/src/domain/account/tests.rs new file mode 100644 index 0000000000..695813d990 --- /dev/null +++ b/crates/proto/src/domain/account/tests.rs @@ -0,0 +1,41 @@ +use super::*; + +fn word_from_u32(arr: [u32; 4]) -> Word { + Word::from(arr) +} + +fn test_slot_name() -> StorageSlotName { + StorageSlotName::new("miden::test::storage::slot").unwrap() +} + +#[test] +fn account_storage_map_details_from_forest_entries() { + let slot_name = test_slot_name(); + let entries = vec![ + (word_from_u32([1, 2, 3, 4]), word_from_u32([5, 6, 7, 8])), + (word_from_u32([9, 10, 11, 12]), word_from_u32([13, 14, 15, 16])), + ]; + + let details = AccountStorageMapDetails::from_forest_entries(slot_name.clone(), entries.clone()); + + assert_eq!(details.slot_name, slot_name); + assert_eq!(details.entries, StorageMapEntries::AllEntries(entries)); +} + +#[test] +fn account_storage_map_details_from_forest_entries_limit_exceeded() { + let slot_name = test_slot_name(); + // Create more entries than MAX_RETURN_ENTRIES + let entries: Vec<_> = (0..=AccountStorageMapDetails::MAX_RETURN_ENTRIES) + .map(|i| { + let key = word_from_u32([i as u32, 0, 0, 0]); + let value = word_from_u32([0, 0, 0, i as u32]); + (key, value) + }) + .collect(); + + let details = AccountStorageMapDetails::from_forest_entries(slot_name.clone(), entries); + + assert_eq!(details.slot_name, slot_name); + assert_eq!(details.entries, StorageMapEntries::LimitExceeded); +} diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index f9a59e39c4..755009e2c0 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -233,25 +233,53 @@ pub mod account_storage_details { /// Storage slot name. #[prost(string, tag = "1")] pub slot_name: ::prost::alloc::string::String, - /// A flag that is set to `true` if the number of to-be-returned entries in the - /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - /// endpoint should be used to get all storage map data. + /// True when the number of entries exceeds the response limit. + /// When set, clients should use the `SyncStorageMaps` endpoint. #[prost(bool, tag = "2")] pub too_many_entries: bool, - /// By default we provide all storage entries. - #[prost(message, optional, tag = "3")] - pub entries: ::core::option::Option, + /// The map entries (with or without proofs). Empty when too_many_entries is true. + #[prost(oneof = "account_storage_map_details::Entries", tags = "3, 4")] + pub entries: ::core::option::Option, } /// Nested message and enum types in `AccountStorageMapDetails`. pub mod account_storage_map_details { - /// Wrapper for repeated storage map entries + /// Wrapper for repeated storage map entries including their proofs. + /// Used when specific keys are requested to enable client-side verification. #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapEntries { + pub struct MapEntriesWithProofs { #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, + pub entries: ::prost::alloc::vec::Vec< + map_entries_with_proofs::StorageMapEntryWithProof, + >, } - /// Nested message and enum types in `MapEntries`. - pub mod map_entries { + /// Nested message and enum types in `MapEntriesWithProofs`. + pub mod map_entries_with_proofs { + /// Definition of individual storage entries including a proof. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageMapEntryWithProof { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "3")] + pub proof: ::core::option::Option< + super::super::super::super::primitives::SmtOpening, + >, + } + } + /// Wrapper for repeated storage map entries (without proofs). + /// Used when all entries are requested for small maps. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AllMapEntries { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + } + /// Nested message and enum types in `AllMapEntries`. + pub mod all_map_entries { /// Definition of individual storage entries. #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct StorageMapEntry { @@ -265,6 +293,16 @@ pub mod account_storage_details { >, } } + /// The map entries (with or without proofs). Empty when too_many_entries is true. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Entries { + /// All storage entries without proofs (for small maps or full requests). + #[prost(message, tag = "3")] + AllEntries(AllMapEntries), + /// Specific entries with their SMT proofs (for partial requests). + #[prost(message, tag = "4")] + EntriesWithProofs(MapEntriesWithProofs), + } } } /// List of nullifiers to return proofs for. diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 3d54a501f2..a2dacd2358 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -7,7 +7,7 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; use miden_node_proto::generated as proto; use miden_protocol::Word; -use miden_protocol::account::{AccountHeader, AccountId, AccountStorage}; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; use miden_protocol::asset::{Asset, AssetVaultKey}; use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; @@ -453,23 +453,6 @@ impl Db { .await } - /// Reconstructs account storage at a specific block from the database - /// - /// This method queries the decomposed storage tables and reconstructs the full - /// `AccountStorage` with SMT backing for Map slots. - // TODO split querying the header from the content - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_account_storage_at_block( - &self, - account_id: AccountId, - block_num: BlockNumber, - ) -> Result { - self.transact("Get account storage at block", move |conn| { - queries::select_account_storage_at_block(conn, account_id, block_num) - }) - .await - } - /// Queries vault assets at a specific block #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account_vault_at_block( @@ -496,17 +479,17 @@ impl Db { .await } - /// Queries the account header for a specific account at a specific block number. + /// Queries the account header and storage header for a specific account at a block. /// + /// Returns both in a single query to avoid querying the database twice. /// Returns `None` if the account doesn't exist at that block. - pub async fn select_account_header_at_block( + pub async fn select_account_header_with_storage_header_at_block( &self, account_id: AccountId, block_num: BlockNumber, - ) -> Result> { - self.transact("Get account header at block", move |conn| { - queries::select_account_header_at_block(conn, account_id, block_num) - .map(|opt| opt.map(|(header, _storage_header)| header)) + ) -> Result> { + self.transact("Get account header with storage header at block", move |conn| { + queries::select_account_header_with_storage_header_at_block(conn, account_id, block_num) }) .await } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 3c615c51b4..f517360cd6 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -50,8 +50,7 @@ use crate::errors::DatabaseError; mod at_block; pub(crate) use at_block::{ - select_account_header_at_block, - select_account_storage_at_block, + select_account_header_with_storage_header_at_block, select_account_vault_at_block, }; diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs index 307edd0b79..41ec035f3f 100644 --- a/crates/store/src/db/models/queries/accounts/at_block.rs +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -1,18 +1,7 @@ -use std::collections::BTreeMap; - use diesel::prelude::{Queryable, QueryableByName}; use diesel::query_dsl::methods::SelectDsl; use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; -use miden_protocol::account::{ - AccountHeader, - AccountId, - AccountStorage, - AccountStorageHeader, - StorageMap, - StorageSlot, - StorageSlotName, - StorageSlotType, -}; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; use miden_protocol::asset::Asset; use miden_protocol::block::BlockNumber; use miden_protocol::utils::{Deserializable, Serializable}; @@ -48,10 +37,10 @@ struct AccountHeaderDataRaw { /// /// # Returns /// -/// * `Ok(Some(AccountHeader))` - The account header if found +/// * `Ok(Some((AccountHeader, AccountStorageHeader)))` - The headers if found /// * `Ok(None)` - If account doesn't exist at that block /// * `Err(DatabaseError)` - If there's a database error -pub(crate) fn select_account_header_at_block( +pub(crate) fn select_account_header_with_storage_header_at_block( conn: &mut SqliteConnection, account_id: AccountId, block_num: BlockNumber, @@ -87,15 +76,13 @@ pub(crate) fn select_account_header_at_block( return Ok(None); }; - let (storage_commitment, storage_header) = match storage_header_blob { - Some(blob) => { - let header = AccountStorageHeader::read_from_bytes(&blob)?; - let commitment = header.to_commitment(); - (commitment, header) - }, - None => (Word::default(), AccountStorageHeader::new(Vec::new())?), + let storage_header = match &storage_header_blob { + Some(blob) => AccountStorageHeader::read_from_bytes(blob)?, + None => AccountStorageHeader::new(Vec::new())?, }; + let storage_commitment = storage_header.to_commitment(); + let code_commitment = code_commitment_bytes .map(|bytes| Word::read_from_bytes(&bytes)) .transpose()? @@ -108,10 +95,10 @@ pub(crate) fn select_account_header_at_block( .transpose()? .unwrap_or(Word::default()); - Ok(Some(( - AccountHeader::new(account_id, nonce, vault_root, storage_commitment, code_commitment), - storage_header, - ))) + let account_header = + AccountHeader::new(account_id, nonce, vault_root, storage_commitment, code_commitment); + + Ok(Some((account_header, storage_header))) } // ACCOUNT VAULT @@ -175,90 +162,3 @@ struct AssetRow { #[diesel(sql_type = diesel::sql_types::Nullable)] asset: Option>, } - -// ACCOUNT STORAGE -// ================================================================================================ - -/// Returns account storage at a given block by reading from `accounts.storage_header` -/// (which contains the `AccountStorageHeader`) and reconstructing full storage from -/// map values in `account_storage_map_values` table. -pub(crate) fn select_account_storage_at_block( - conn: &mut SqliteConnection, - account_id: AccountId, - block_num: BlockNumber, -) -> Result { - use schema::account_storage_map_values as t; - - let account_id_bytes = account_id.to_bytes(); - let block_num_sql = block_num.to_raw_sql(); - - // Query storage header blob for this account at or before this block - let storage_blob: Option> = - SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) - .filter(schema::accounts::account_id.eq(&account_id_bytes)) - .filter(schema::accounts::block_num.le(block_num_sql)) - .order(schema::accounts::block_num.desc()) - .limit(1) - .first(conn) - .optional()? - .flatten(); - - let Some(blob) = storage_blob else { - // No storage means empty storage - return Ok(AccountStorage::new(Vec::new())?); - }; - - // Deserialize the AccountStorageHeader from the blob - let header = AccountStorageHeader::read_from_bytes(&blob)?; - - // Query all map values for this account up to and including this block. - // Order by (slot_name, key) ascending, then block_num descending so the first entry - // for each (slot_name, key) pair is the latest one. - let map_values: Vec<(String, Vec, Vec)> = - SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) - .filter(t::account_id.eq(&account_id_bytes)) - .filter(t::block_num.le(block_num_sql)) - .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) - .load(conn)?; - - // For each (slot_name, key) pair, keep only the latest entry (first one due to ordering) - let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); - - for (slot_name_str, key_bytes, value_bytes) in map_values { - let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { - DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) - })?; - let key = Word::read_from_bytes(&key_bytes)?; - let value = Word::read_from_bytes(&value_bytes)?; - - // Only insert if we haven't seen this (slot_name, key) yet - // (since results are ordered by block_num desc, first one is latest) - latest_map_entries.entry((slot_name, key)).or_insert(value); - } - - // Group entries by slot name - let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); - for ((slot_name, key), value) in latest_map_entries { - map_entries_by_slot.entry(slot_name).or_default().push((key, value)); - } - - // Reconstruct StorageSlots from header slots + map entries - let mut slots = Vec::new(); - for slot_header in header.slots() { - let slot = match slot_header.slot_type() { - StorageSlotType::Value => { - // For value slots, the header value IS the slot value - StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) - }, - StorageSlotType::Map => { - // For map slots, reconstruct from map entries - let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); - let storage_map = StorageMap::with_entries(entries)?; - StorageSlot::with_map(slot_header.name().clone(), storage_map) - }, - }; - slots.push(slot); - } - - Ok(AccountStorage::new(slots)?) -} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index a0a23f3b54..2df6309877 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -1,7 +1,16 @@ //! Tests for the `accounts` module, specifically for account storage and historical queries. +use std::collections::BTreeMap; + use diesel::query_dsl::methods::SelectDsl; -use diesel::{Connection, OptionalExtension, QueryDsl, RunQueryDsl}; +use diesel::{ + BoolExpressionMethods, + Connection, + ExpressionMethods, + OptionalExtension, + QueryDsl, + RunQueryDsl, +}; use diesel_migrations::MigrationHarness; use miden_node_utils::fee::test_fee_params; use miden_protocol::account::auth::PublicKeyCommitment; @@ -13,20 +22,27 @@ use miden_protocol::account::{ AccountDelta, AccountId, AccountIdVersion, + AccountStorage, + AccountStorageHeader, AccountStorageMode, AccountType, + StorageMap, StorageSlot, StorageSlotName, + StorageSlotType, }; use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; -use miden_protocol::utils::Serializable; +use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, Word}; use miden_standards::account::auth::AuthRpoFalcon512; use miden_standards::code_builder::CodeBuilder; use super::*; use crate::db::migrations::MIGRATIONS; +use crate::db::models::conv::SqlTypeConvert; +use crate::db::schema; +use crate::errors::DatabaseError; fn setup_test_db() -> SqliteConnection { let mut conn = @@ -37,6 +53,80 @@ fn setup_test_db() -> SqliteConnection { conn } +/// Test helper: reconstructs account storage at a given block from DB. +/// +/// Reads `accounts.storage_header` and `account_storage_map_values` to reconstruct +/// the full `AccountStorage` at the specified block. +fn reconstruct_account_storage_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Query storage header blob for this account at or before this block + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::block_num.le(block_num_sql)) + .order(schema::accounts::block_num.desc()) + .limit(1) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + return Ok(AccountStorage::new(Vec::new())?); + }; + + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all map values for this account up to and including this block. + let map_values: Vec<(i64, String, Vec, Vec)> = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) + .load(conn)?; + + // For each (slot_name, key) pair, keep only the latest entry + let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + for (_, slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + latest_map_entries.entry((slot_name, key)).or_insert(value); + } + + // Group entries by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for ((slot_name, key), value) in latest_map_entries { + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} + fn create_test_account_with_storage() -> (Account, AccountId) { // Create a simple public account with one value storage slot let account_id = AccountId::dummy( @@ -112,8 +202,9 @@ fn test_select_account_header_at_block_returns_none_for_nonexistent() { ); // Query for a non-existent account - let result = select_account_header_at_block(&mut conn, account_id, block_num) - .expect("Query should succeed"); + let result = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); assert!(result.is_none(), "Should return None for non-existent account"); } @@ -139,7 +230,7 @@ fn test_select_account_header_at_block_returns_correct_header() { // Query the account header let (header, _storage_header) = - select_account_header_at_block(&mut conn, account_id, block_num) + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num) .expect("Query should succeed") .expect("Header should exist"); @@ -175,16 +266,18 @@ fn test_select_account_header_at_block_historical_query() { upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); // Query at block 1 - should return the account - let (header_1, _) = select_account_header_at_block(&mut conn, account_id, block_num_1) - .expect("Query should succeed") - .expect("Header should exist at block 1"); + let (header_1, _) = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num_1) + .expect("Query should succeed") + .expect("Header should exist at block 1"); assert_eq!(header_1.nonce(), nonce_1, "Nonce at block 1 should match"); // Query at block 2 - should return the same account (most recent before block 2) - let (header_2, _) = select_account_header_at_block(&mut conn, account_id, block_num_2) - .expect("Query should succeed") - .expect("Header should exist at block 2"); + let (header_2, _) = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num_2) + .expect("Query should succeed") + .expect("Header should exist at block 2"); assert_eq!(header_2.nonce(), nonce_1, "Nonce at block 2 should match block 1"); } @@ -221,38 +314,6 @@ fn test_select_account_vault_at_block_empty() { // ACCOUNT STORAGE AT BLOCK TESTS // ================================================================================================ -#[test] -fn test_select_account_storage_at_block_returns_storage() { - let mut conn = setup_test_db(); - let (account, _) = create_test_account_with_storage(); - let account_id = account.id(); - - let block_num = BlockNumber::from_epoch(0); - insert_block_header(&mut conn, block_num); - - let original_storage_commitment = account.storage().to_commitment(); - - // Insert the account - let delta = AccountDelta::try_from(account.clone()).unwrap(); - let account_update = BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Delta(delta), - ); - - upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); - - // Query storage - let storage = select_account_storage_at_block(&mut conn, account_id, block_num) - .expect("Query should succeed"); - - assert_eq!( - storage.to_commitment(), - original_storage_commitment, - "Storage commitment should match" - ); -} - #[test] fn test_upsert_accounts_inserts_storage_header() { let mut conn = setup_test_db(); @@ -397,8 +458,9 @@ fn test_upsert_accounts_updates_is_latest_flag() { ); // Verify historical query returns first update - let storage_at_block_1 = select_account_storage_at_block(&mut conn, account_id, block_num_1) - .expect("Failed to query storage at block 1"); + let storage_at_block_1 = + reconstruct_account_storage_at_block(&mut conn, account_id, block_num_1) + .expect("Failed to query storage at block 1"); assert_eq!( storage_at_block_1.to_commitment(), diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 7e0c326a23..7ebed5a744 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -134,6 +134,12 @@ pub enum DatabaseError { SqlValueConversion(#[from] DatabaseTypeConversionError), #[error("Not implemented: {0}")] NotImplemented(String), + #[error("storage root not found for account {account_id}, slot {slot_name}, block {block_num}")] + StorageRootNotFound { + account_id: AccountId, + slot_name: String, + block_num: BlockNumber, + }, } impl DatabaseError { diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index d368896f26..7a43e40f98 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,11 +1,12 @@ use std::collections::BTreeMap; +use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{AccountId, NonFungibleDeltaAction, StorageSlotName}; use miden_protocol::asset::{Asset, FungibleAsset}; use miden_protocol::block::BlockNumber; -use miden_protocol::crypto::merkle::EmptySubtreeRoots; use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; +use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; @@ -42,6 +43,10 @@ pub(crate) struct InnerForest { /// Populated during block import for all storage map slots. storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, + /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. + /// Accumulated from deltas - each block's entries include all entries up to that point. + storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + /// Maps (`account_id`, `block_num`) to vault SMT root. /// Tracks asset vault versions across all blocks with structural sharing. vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, @@ -52,6 +57,7 @@ impl InnerForest { Self { forest: SmtForest::new(), storage_map_roots: BTreeMap::new(), + storage_entries: BTreeMap::new(), vault_roots: BTreeMap::new(), } } @@ -81,9 +87,8 @@ impl InnerForest { return Self::empty_smt_root(); } self.vault_roots - .range((account_id, BlockNumber::GENESIS)..) - .take_while(|((id, _), _)| *id == account_id) - .last() + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) } @@ -110,27 +115,97 @@ impl InnerForest { } self.storage_map_roots - .range((account_id, slot_name.clone(), BlockNumber::GENESIS)..) - .take_while(|((id, name, _), _)| *id == account_id && name == slot_name) - .last() + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) } /// Retrieves the vault SMT root for an account at or before the given block. + /// Retrieves the storage map SMT root for an account slot at or before the given block. /// - /// Finds the most recent vault root entry for the account, since vault state persists + /// Finds the most recent storage root entry for the slot, since storage state persists /// across blocks where no changes occur. - // - // TODO: a fallback to DB lookup is required once pruning lands. - // Currently returns empty root which would be incorrect - #[cfg(test)] - fn get_vault_root(&self, account_id: AccountId, block_num: BlockNumber) -> Word { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + pub(crate) fn get_storage_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> Word { + self.storage_map_roots + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), block_num), + ) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) } + /// Opens a storage map and returns storage map details with SMT proofs for the given keys. + /// + /// Returns `None` if no storage root is tracked for this account/slot/block combination. + /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. + pub(crate) fn open_storage_map( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + keys: &[Word], + ) -> Option> { + let root = self.get_storage_root(account_id, &slot_name, block_num); + + // Empty root means no storage map exists for this account/slot + if root == Self::empty_smt_root() { + return None; + } + + if keys.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Some(Ok(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::LimitExceeded, + })); + } + + // Collect SMT proofs for each key + let proofs = Result::from_iter(keys.iter().map(|key| self.forest.open(root, *key))); + + Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) + } + + /// Returns all key-value entries for a specific account storage slot at or before a block. + /// + /// Uses range query semantics: finds the most recent entries at or before `block_num`. + /// Returns `None` if no entries exist for this account/slot up to the given block. + /// Returns `LimitExceeded` if there are too many entries to return. + pub(crate) fn storage_map_entries( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + ) -> Option { + // Find the most recent entries at or before block_num + let entries = self + .storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, entries)| entries)?; + + if entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Some(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::LimitExceeded, + }); + } + let entries = Vec::from_iter(entries.iter().map(|(k, v)| (*k, *v))); + + Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) + } + // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -297,7 +372,7 @@ impl InnerForest { /// Updates the forest with storage map changes from a delta. /// /// Processes storage map slot deltas, building SMTs for each modified slot - /// and tracking the new roots. + /// and tracking the new roots and accumulated entries. /// /// # Arguments /// @@ -313,27 +388,53 @@ impl InnerForest { for (slot_name, map_delta) in storage_delta.maps() { let prev_root = self.get_latest_storage_map_root(account_id, slot_name, is_full_state); - let entries: Vec<_> = + let delta_entries: Vec<_> = map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); - if entries.is_empty() { + if delta_entries.is_empty() { continue; } let updated_root = self .forest - .batch_insert(prev_root, entries.iter().copied()) + .batch_insert(prev_root, delta_entries.iter().copied()) .expect("forest insertion should succeed"); self.storage_map_roots .insert((account_id, slot_name.clone(), block_num), updated_root); + // Accumulate entries: start from previous block's entries or empty for full state + let mut accumulated_entries = if is_full_state { + BTreeMap::new() + } else { + self.storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, entries)| entries.clone()) + .unwrap_or_default() + }; + + // Apply delta entries (insert or remove if value is EMPTY_WORD) + for (key, value) in &delta_entries { + if *value == EMPTY_WORD { + accumulated_entries.remove(key); + } else { + accumulated_entries.insert(*key, *value); + } + } + + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), accumulated_entries); + tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, ?slot_name, - entries = entries.len(), + delta_entries = delta_entries.len(), "Updated storage map in forest" ); } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index fb6ceb917a..216ef42061 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -205,6 +205,15 @@ fn test_vault_state_persists_across_blocks_without_changes() { let account_id = dummy_account(); let faucet_id = dummy_faucet(); + // Helper to query vault root at or before a block (range query) + let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { + forest + .vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + }; + // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); @@ -228,18 +237,18 @@ fn test_vault_state_persists_across_blocks_without_changes() { let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; assert_ne!(root_after_block_1, root_after_block_6); - // Verify get_vault_root finds the correct previous root for intermediate blocks + // Verify range query finds the correct previous root for intermediate blocks // Block 3 should return block 1's root (most recent before block 3) - let root_at_block_3 = forest.get_vault_root(account_id, BlockNumber::from(3)); - assert_eq!(root_at_block_3, root_after_block_1); + let root_at_block_3 = get_vault_root(&forest, account_id, BlockNumber::from(3)); + assert_eq!(root_at_block_3, Some(root_after_block_1)); // Block 5 should also return block 1's root - let root_at_block_5 = forest.get_vault_root(account_id, BlockNumber::from(5)); - assert_eq!(root_at_block_5, root_after_block_1); + let root_at_block_5 = get_vault_root(&forest, account_id, BlockNumber::from(5)); + assert_eq!(root_at_block_5, Some(root_after_block_1)); // Block 6 should return block 6's root - let root_at_block_6 = forest.get_vault_root(account_id, block_6); - assert_eq!(root_at_block_6, root_after_block_6); + let root_at_block_6 = get_vault_root(&forest, account_id, block_6); + assert_eq!(root_at_block_6, Some(root_after_block_6)); } #[test] diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 43e653dcf6..e191051d95 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -18,13 +18,15 @@ use miden_node_proto::domain::account::{ AccountStorageMapDetails, AccountVaultDetails, NetworkAccountPrefix, + SlotData, StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; +use miden_protocol::Word; +use miden_protocol::account::AccountId; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::account::{AccountId, StorageSlotContent}; use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; @@ -39,7 +41,6 @@ use miden_protocol::crypto::merkle::smt::{ use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; use miden_protocol::transaction::{OutputNote, PartialBlockchain}; use miden_protocol::utils::Serializable; -use miden_protocol::{AccountError, Word}; use tokio::sync::{Mutex, RwLock, oneshot}; use tracing::{info, info_span, instrument}; @@ -1008,6 +1009,10 @@ impl State { /// /// This method queries the database to fetch the account state and processes the detail /// request to return only the requested information. + /// + /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. + /// Returns an error if the forest doesn't have data for the requested slot. + /// All-entries queries (`SlotData::All`) use the forest to return all entries. async fn fetch_public_account_details( &self, account_id: AccountId, @@ -1027,11 +1032,12 @@ impl State { // Validate block exists in the blockchain before querying the database self.validate_block_exists(block_num).await?; - let account_header = - self.db - .select_account_header_at_block(account_id, block_num) - .await? - .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; + // Query account header and storage header together in a single DB call + let (account_header, storage_header) = self + .db + .select_account_header_with_storage_header_at_block(account_id, block_num) + .await? + .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; let account_code = match code_commitment { Some(commitment) if commitment == account_header.code_commitment() => None, @@ -1055,25 +1061,31 @@ impl State { None => AccountVaultDetails::empty(), }; - // TODO: don't load the entire storage at once, load what is required - let storage = self.db.select_account_storage_at_block(account_id, block_num).await?; - let storage_header = storage.to_header(); let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); - for StorageMapRequest { slot_name, slot_data } in storage_requests { - let Some(slot) = storage.slots().iter().find(|s| s.name() == &slot_name) else { - continue; - }; + // Use forest for storage map queries + let forest_guard = self.forest.read().await; - let storage_map = match slot.content() { - StorageSlotContent::Map(map) => map, - StorageSlotContent::Value(_) => { - return Err(AccountError::StorageSlotNotMap(slot_name).into()); - }, + for StorageMapRequest { slot_name, slot_data } in storage_requests { + let details = match &slot_data { + SlotData::MapKeys(keys) => forest_guard + .open_storage_map(account_id, slot_name.clone(), block_num, keys) + .ok_or_else(|| DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + })? + .map_err(DatabaseError::MerkleError)?, + SlotData::All => forest_guard + .storage_map_entries(account_id, slot_name.clone(), block_num) + .ok_or_else(|| DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + })?, }; - let details = AccountStorageMapDetails::new(slot_name, slot_data, storage_map); storage_map_details.push(details); } diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index dccf440205..2918af848e 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -318,8 +318,22 @@ message AccountVaultDetails { // Account storage details for AccountProofResponse message AccountStorageDetails { message AccountStorageMapDetails { - // Wrapper for repeated storage map entries - message MapEntries { + // Wrapper for repeated storage map entries including their proofs. + // Used when specific keys are requested to enable client-side verification. + message MapEntriesWithProofs { + // Definition of individual storage entries including a proof. + message StorageMapEntryWithProof { + primitives.Digest key = 1; + primitives.Digest value = 2; + primitives.SmtOpening proof = 3; + } + + repeated StorageMapEntryWithProof entries = 1; + } + + // Wrapper for repeated storage map entries (without proofs). + // Used when all entries are requested for small maps. + message AllMapEntries { // Definition of individual storage entries. message StorageMapEntry { primitives.Digest key = 1; @@ -332,13 +346,18 @@ message AccountStorageDetails { // Storage slot name. string slot_name = 1; - // A flag that is set to `true` if the number of to-be-returned entries in the - // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - // endpoint should be used to get all storage map data. + // True when the number of entries exceeds the response limit. + // When set, clients should use the `SyncStorageMaps` endpoint. bool too_many_entries = 2; - // By default we provide all storage entries. - MapEntries entries = 3; + // The map entries (with or without proofs). Empty when too_many_entries is true. + oneof entries { + // All storage entries without proofs (for small maps or full requests). + AllMapEntries all_entries = 3; + + // Specific entries with their SMT proofs (for partial requests). + MapEntriesWithProofs entries_with_proofs = 4; + } } // Account storage header (storage slot info for up to 256 slots) From 4f99654053f683ccf7a910f05012bb4ba5ebebf9 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 14 Jan 2026 21:22:40 +0100 Subject: [PATCH 084/125] refactor: [4/4] unify `get_account_details` and `get_account_proof[s]` into `get_account` (#1385) --- CHANGELOG.md | 1 + bin/network-monitor/src/counter.rs | 221 ++++++++++++++++++++++---- crates/proto/src/domain/account.rs | 80 +++++----- crates/proto/src/generated/account.rs | 6 +- crates/proto/src/generated/rpc.rs | 132 +++------------ crates/proto/src/generated/store.rs | 114 ++----------- crates/rpc/README.md | 13 +- crates/rpc/src/server/api.rs | 36 +---- crates/store/README.md | 13 +- crates/store/src/server/rpc_api.rs | 38 +---- crates/store/src/state.rs | 32 ++-- docs/external/src/rpc.md | 11 +- proto/proto/internal/store.proto | 5 +- proto/proto/rpc.proto | 13 +- proto/proto/types/account.proto | 6 +- 15 files changed, 335 insertions(+), 386 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a1f1db02d..5b69d92605 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ ### Changes +- [BREAKING] Removed `GetAccountDetails` RPC endpoint. Use `GetAccount` instead ([#1185](https://github.com/0xMiden/miden-node/issues/1185)). - [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). - [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index b3cd8f94d2..2819c7395e 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -84,55 +84,220 @@ async fn get_genesis_block_header(rpc_client: &mut RpcClient) -> Result Result> { + let request = build_account_request(account_id, false); + let resp = rpc_client.get_account(request).await?.into_inner(); + + let Some(details) = resp.details else { + return Ok(None); + }; + + let storage_details = details.storage_details.context("missing storage details")?; + let storage_header = storage_details.header.context("missing storage header")?; + + Ok(Some(storage_header)) +} + +/// Fetch the latest nonce of the given account from RPC. async fn fetch_counter_value( rpc_client: &mut RpcClient, account_id: AccountId, ) -> Result> { - let id_bytes: [u8; 15] = account_id.into(); - let req = miden_node_proto::generated::account::AccountId { id: id_bytes.to_vec() }; - let resp = rpc_client.get_account_details(req).await?.into_inner(); - if let Some(raw) = resp.details { - let account = Account::read_from_bytes(&raw) - .map_err(|e| anyhow::anyhow!("failed to deserialize account details: {e}"))?; + let Some(storage_header) = fetch_account_storage_header(rpc_client, account_id).await? else { + return Ok(None); + }; + + let counter_slot = storage_header + .slots + .iter() + .find(|slot| slot.slot_name == COUNTER_SLOT_NAME.as_str()) + .context(format!("counter slot '{}' not found", COUNTER_SLOT_NAME.as_str()))?; + + // The counter value is stored as a Word, with the actual u64 value in the last element + let slot_value: Word = counter_slot + .commitment + .as_ref() + .context("missing storage slot value")? + .try_into() + .context("failed to convert slot value to word")?; - // Access the counter slot by name to avoid index-ordering issues - let word = account - .storage() - .get_item(&COUNTER_SLOT_NAME) - .context("failed to get counter storage slot")?; + let value = slot_value.as_elements().last().expect("Word has 4 elements").as_int(); - let value = word[0].as_int(); + Ok(Some(value)) +} - Ok(Some(value)) +/// Build an account request for the given account ID. +/// +/// If `include_code_and_vault` is true, uses dummy commitments to force the server +/// to return code and vault data (server only returns data when our commitment differs). +fn build_account_request( + account_id: AccountId, + include_code_and_vault: bool, +) -> miden_node_proto::generated::rpc::AccountRequest { + let id_bytes: [u8; 15] = account_id.into(); + let account_id_proto = + miden_node_proto::generated::account::AccountId { id: id_bytes.to_vec() }; + + let (code_commitment, asset_vault_commitment) = if include_code_and_vault { + let dummy: miden_node_proto::generated::primitives::Digest = Word::default().into(); + (Some(dummy), Some(dummy)) } else { - Ok(None) + (None, None) + }; + + miden_node_proto::generated::rpc::AccountRequest { + account_id: Some(account_id_proto), + block_num: None, + details: Some(miden_node_proto::generated::rpc::account_request::AccountDetailRequest { + code_commitment, + asset_vault_commitment, + storage_maps: vec![], + }), } } +/// Fetch an account from RPC and reconstruct the full Account. +/// +/// Uses dummy commitments to force the server to return all data (code, vault, storage header). +/// Only supports accounts with value slots; returns an error if storage maps are present. async fn fetch_wallet_account( rpc_client: &mut RpcClient, account_id: AccountId, ) -> Result> { - let id_bytes: [u8; 15] = account_id.into(); - let req = miden_node_proto::generated::account::AccountId { id: id_bytes.to_vec() }; - let resp = rpc_client.get_account_details(req).await; + use miden_protocol::account::AccountCode; + use miden_protocol::asset::AssetVault; - // If the RPC call fails, return None - if resp.is_err() { - return Ok(None); - } + let request = build_account_request(account_id, true); - let Some(account_details) = resp.expect("Previously checked for error").into_inner().details - else { + let response = match rpc_client.get_account(request).await { + Ok(response) => response.into_inner(), + Err(e) => { + warn!(account.id = %account_id, err = %e, "failed to fetch wallet account via RPC"); + return Ok(None); + }, + }; + + let Some(details) = response.details else { + if response.witness.is_some() { + info!( + account.id = %account_id, + "account found on-chain but cannot reconstruct full account from RPC response" + ); + } return Ok(None); }; - let account = Account::read_from_bytes(&account_details) - .map_err(|e| anyhow::anyhow!("failed to deserialize account details: {e}"))?; + let header = details.header.context("missing account header")?; + let nonce: u64 = header.nonce; + + let code = details + .code + .map(|code_bytes| AccountCode::read_from_bytes(&code_bytes)) + .transpose() + .context("failed to deserialize account code")? + .context("server did not return account code")?; + + let vault = match details.vault_details { + Some(vault_details) if vault_details.too_many_assets => { + anyhow::bail!("account {account_id} has too many assets, cannot fetch full account"); + }, + Some(vault_details) => { + let assets: Vec = vault_details + .assets + .into_iter() + .map(TryInto::try_into) + .collect::>() + .context("failed to convert assets")?; + AssetVault::new(&assets).context("failed to create vault")? + }, + None => anyhow::bail!("server did not return asset vault for account {account_id}"), + }; + + let storage_details = details.storage_details.context("missing storage details")?; + let storage = build_account_storage(storage_details)?; + + let account = Account::new(account_id, vault, storage, code, Felt::new(nonce), None) + .context("failed to create account")?; + + // Sanity check: verify reconstructed account matches header commitments + let expected_code_commitment: Word = header + .code_commitment + .context("missing code commitment in header")? + .try_into() + .context("invalid code commitment")?; + let expected_vault_root: Word = header + .vault_root + .context("missing vault root in header")? + .try_into() + .context("invalid vault root")?; + let expected_storage_commitment: Word = header + .storage_commitment + .context("missing storage commitment in header")? + .try_into() + .context("invalid storage commitment")?; + + anyhow::ensure!( + account.code().commitment() == expected_code_commitment, + "code commitment mismatch: rebuilt={:?}, expected={:?}", + account.code().commitment(), + expected_code_commitment + ); + anyhow::ensure!( + account.vault().root() == expected_vault_root, + "vault root mismatch: rebuilt={:?}, expected={:?}", + account.vault().root(), + expected_vault_root + ); + anyhow::ensure!( + account.storage().to_commitment() == expected_storage_commitment, + "storage commitment mismatch: rebuilt={:?}, expected={:?}", + account.storage().to_commitment(), + expected_storage_commitment + ); + + info!(account.id = %account_id, "fetched wallet account from RPC"); Ok(Some(account)) } +/// Build account storage from the storage details returned by the server. +/// +/// This function only supports accounts with value slots. If any storage map slots +/// are encountered, an error is returned since the monitor only uses simple accounts. +fn build_account_storage( + storage_details: miden_node_proto::generated::rpc::AccountStorageDetails, +) -> Result { + use miden_protocol::account::{AccountStorage, StorageSlot}; + + let storage_header = storage_details.header.context("missing storage header")?; + + let mut slots = Vec::new(); + for slot in storage_header.slots { + let slot_name = miden_protocol::account::StorageSlotName::new(slot.slot_name.clone()) + .context("invalid slot name")?; + let value: Word = slot + .commitment + .context("missing slot value")? + .try_into() + .context("invalid slot value")?; + + // slot_type: 0 = Value, 1 = Map + anyhow::ensure!( + slot.slot_type == 0, + "storage map slots are not supported for this account" + ); + + slots.push(StorageSlot::with_value(slot_name, value)); + } + + AccountStorage::new(slots).context("failed to create account storage") +} + async fn setup_increment_task( config: MonitorConfig, rpc_client: &mut RpcClient, @@ -649,8 +814,6 @@ async fn create_and_submit_network_note( let final_account = executed_tx.final_account().clone(); - let transaction_inputs = executed_tx.tx_inputs().to_bytes(); - // Prove the transaction let prover = LocalTransactionProver::default(); let proven_tx = prover.prove(executed_tx).context("Failed to prove transaction")?; @@ -658,7 +821,7 @@ async fn create_and_submit_network_note( // Submit the proven transaction let request = ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: Some(transaction_inputs), + transaction_inputs: None, }; let block_height: BlockNumber = rpc_client diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 6d736b2431..5ddc790fc7 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -136,27 +136,27 @@ impl TryFrom for AccountStorageHeader { // ================================================================================================ /// Represents a request for an account proof. -pub struct AccountProofRequest { +pub struct AccountRequest { pub account_id: AccountId, // If not present, the latest account proof references the latest available pub block_num: Option, pub details: Option, } -impl TryFrom for AccountProofRequest { +impl TryFrom for AccountRequest { type Error = ConversionError; - fn try_from(value: proto::rpc::AccountProofRequest) -> Result { - let proto::rpc::AccountProofRequest { account_id, block_num, details } = value; + fn try_from(value: proto::rpc::AccountRequest) -> Result { + let proto::rpc::AccountRequest { account_id, block_num, details } = value; let account_id = account_id - .ok_or(proto::rpc::AccountProofRequest::missing_field(stringify!(account_id)))? + .ok_or(proto::rpc::AccountRequest::missing_field(stringify!(account_id)))? .try_into()?; let block_num = block_num.map(Into::into); let details = details.map(TryFrom::try_from).transpose()?; - Ok(AccountProofRequest { account_id, block_num, details }) + Ok(AccountRequest { account_id, block_num, details }) } } @@ -167,13 +167,13 @@ pub struct AccountDetailRequest { pub storage_requests: Vec, } -impl TryFrom for AccountDetailRequest { +impl TryFrom for AccountDetailRequest { type Error = ConversionError; fn try_from( - value: proto::rpc::account_proof_request::AccountDetailRequest, + value: proto::rpc::account_request::AccountDetailRequest, ) -> Result { - let proto::rpc::account_proof_request::AccountDetailRequest { + let proto::rpc::account_request::AccountDetailRequest { code_commitment, asset_vault_commitment, storage_maps, @@ -197,21 +197,21 @@ pub struct StorageMapRequest { pub slot_data: SlotData, } -impl TryFrom +impl TryFrom for StorageMapRequest { type Error = ConversionError; fn try_from( - value: proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest, + value: proto::rpc::account_request::account_detail_request::StorageMapDetailRequest, ) -> Result { - let proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest { + let proto::rpc::account_request::account_detail_request::StorageMapDetailRequest { slot_name, slot_data, } = value; let slot_name = StorageSlotName::new(slot_name)?; - let slot_data = slot_data.ok_or(proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; + let slot_data = slot_data.ok_or(proto::rpc::account_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; Ok(StorageMapRequest { slot_name, slot_data }) } @@ -224,17 +224,23 @@ pub enum SlotData { MapKeys(Vec), } -impl TryFrom - for SlotData +impl + TryFrom< + proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData, + > for SlotData { type Error = ConversionError; - fn try_from(value: proto::rpc::account_proof_request::account_detail_request::storage_map_detail_request::SlotData) -> Result { - use proto::rpc::account_proof_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; + fn try_from( + value: proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData, + ) -> Result { + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; Ok(match value { ProtoSlotData::AllEntries(true) => SlotData::All, - ProtoSlotData::AllEntries(false) => return Err(ConversionError::EnumDiscriminantOutOfRange), + ProtoSlotData::AllEntries(false) => { + return Err(ConversionError::EnumDiscriminantOutOfRange); + }, ProtoSlotData::MapKeys(keys) => { let keys = try_convert(keys.map_keys).collect::, _>>()?; SlotData::MapKeys(keys) @@ -668,35 +674,35 @@ const fn storage_slot_type_to_raw(slot_type: StorageSlotType) -> u32 { //================================================================================================ /// Represents the response to an account proof request. -pub struct AccountProofResponse { +pub struct AccountResponse { pub block_num: BlockNumber, pub witness: AccountWitness, pub details: Option, } -impl TryFrom for AccountProofResponse { +impl TryFrom for AccountResponse { type Error = ConversionError; - fn try_from(value: proto::rpc::AccountProofResponse) -> Result { - let proto::rpc::AccountProofResponse { block_num, witness, details } = value; + fn try_from(value: proto::rpc::AccountResponse) -> Result { + let proto::rpc::AccountResponse { block_num, witness, details } = value; let block_num = block_num - .ok_or(proto::rpc::AccountProofResponse::missing_field(stringify!(block_num)))? + .ok_or(proto::rpc::AccountResponse::missing_field(stringify!(block_num)))? .into(); let witness = witness - .ok_or(proto::rpc::AccountProofResponse::missing_field(stringify!(witness)))? + .ok_or(proto::rpc::AccountResponse::missing_field(stringify!(witness)))? .try_into()?; let details = details.map(TryFrom::try_from).transpose()?; - Ok(AccountProofResponse { block_num, witness, details }) + Ok(AccountResponse { block_num, witness, details }) } } -impl From for proto::rpc::AccountProofResponse { - fn from(value: AccountProofResponse) -> Self { - let AccountProofResponse { block_num, witness, details } = value; +impl From for proto::rpc::AccountResponse { + fn from(value: AccountResponse) -> Self { + let AccountResponse { block_num, witness, details } = value; Self { witness: Some(witness.into()), @@ -717,13 +723,11 @@ pub struct AccountDetails { pub storage_details: AccountStorageDetails, } -impl TryFrom for AccountDetails { +impl TryFrom for AccountDetails { type Error = ConversionError; - fn try_from( - value: proto::rpc::account_proof_response::AccountDetails, - ) -> Result { - let proto::rpc::account_proof_response::AccountDetails { + fn try_from(value: proto::rpc::account_response::AccountDetails) -> Result { + let proto::rpc::account_response::AccountDetails { header, code, vault_details, @@ -731,19 +735,17 @@ impl TryFrom for AccountDeta } = value; let account_header = header - .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( - header - )))? + .ok_or(proto::rpc::account_response::AccountDetails::missing_field(stringify!(header)))? .try_into()?; let storage_details = storage_details - .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + .ok_or(proto::rpc::account_response::AccountDetails::missing_field(stringify!( storage_details )))? .try_into()?; let vault_details = vault_details - .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + .ok_or(proto::rpc::account_response::AccountDetails::missing_field(stringify!( vault_details )))? .try_into()?; @@ -758,7 +760,7 @@ impl TryFrom for AccountDeta } } -impl From for proto::rpc::account_proof_response::AccountDetails { +impl From for proto::rpc::account_response::AccountDetails { fn from(value: AccountDetails) -> Self { let AccountDetails { account_header, diff --git a/crates/proto/src/generated/account.rs b/crates/proto/src/generated/account.rs index f93017b30c..6ff6135626 100644 --- a/crates/proto/src/generated/account.rs +++ b/crates/proto/src/generated/account.rs @@ -27,7 +27,7 @@ pub struct AccountSummary { /// Represents the storage header of an account. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountStorageHeader { - /// Storage slots with their types and commitments. + /// Storage slots with their types and data. #[prost(message, repeated, tag = "1")] pub slots: ::prost::alloc::vec::Vec, } @@ -42,7 +42,9 @@ pub mod account_storage_header { /// The type of the storage slot. #[prost(uint32, tag = "2")] pub slot_type: u32, - /// The commitment (Word) for this storage slot. + /// The data (Word) for this storage slot. + /// For value slots (slot_type=0), this is the actual value stored in the slot. + /// For map slots (slot_type=1), this is the root of the storage map. #[prost(message, optional, tag = "3")] pub commitment: ::core::option::Option, } diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 755009e2c0..caa8258132 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -95,7 +95,7 @@ pub struct MaybeNoteScript { } /// Returns the latest state proof of the specified account. #[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofRequest { +pub struct AccountRequest { /// ID of the account for which we want to get data #[prost(message, optional, tag = "1")] pub account_id: ::core::option::Option, @@ -106,10 +106,10 @@ pub struct AccountProofRequest { pub block_num: ::core::option::Option, /// Request for additional account details; valid only for public accounts. #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, + pub details: ::core::option::Option, } -/// Nested message and enum types in `AccountProofRequest`. -pub mod account_proof_request { +/// Nested message and enum types in `AccountRequest`. +pub mod account_request { /// Request the details for a public account. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountDetailRequest { @@ -171,7 +171,7 @@ pub mod account_proof_request { } /// Represents the result of getting account proof. #[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofResponse { +pub struct AccountResponse { /// The block number at which the account witness was created and the account details were observed. #[prost(message, optional, tag = "1")] pub block_num: ::core::option::Option, @@ -180,10 +180,10 @@ pub struct AccountProofResponse { pub witness: ::core::option::Option, /// Additional details for public accounts. #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, + pub details: ::core::option::Option, } -/// Nested message and enum types in `AccountProofResponse`. -pub mod account_proof_response { +/// Nested message and enum types in `AccountResponse`. +pub mod account_response { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountDetails { /// Account header. @@ -201,7 +201,7 @@ pub mod account_proof_response { pub vault_details: ::core::option::Option, } } -/// Account vault details for AccountProofResponse +/// Account vault details for AccountResponse #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountVaultDetails { /// A flag that is set to true if the account contains too many assets. This indicates @@ -214,7 +214,7 @@ pub struct AccountVaultDetails { #[prost(message, repeated, tag = "2")] pub assets: ::prost::alloc::vec::Vec, } -/// Account storage details for AccountProofResponse +/// Account storage details for AccountResponse #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountStorageDetails { /// Account storage header (storage slot info for up to 256 slots) @@ -747,36 +747,12 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "CheckNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns the latest state of an account with the specified ID. - pub async fn get_account_details( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/GetAccountDetails", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccountDetails")); - self.inner.unary(req, path, codec).await - } /// Returns the latest state proof of the specified account. - pub async fn get_account_proof( + pub async fn get_account( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -788,9 +764,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetAccountProof"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetAccount"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccountProof")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccount")); self.inner.unary(req, path, codec).await } /// Returns raw block data for the specified block number. @@ -1171,22 +1147,11 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns the latest state of an account with the specified ID. - async fn get_account_details( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; /// Returns the latest state proof of the specified account. - async fn get_account_proof( + async fn get_account( &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; /// Returns raw block data for the specified block number. async fn get_block_by_number( &self, @@ -1485,68 +1450,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/GetAccountDetails" => { - #[allow(non_camel_case_types)] - struct GetAccountDetailsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetAccountDetailsSvc { - type Response = super::super::account::AccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_details(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountDetailsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetAccountProof" => { + "/rpc.Api/GetAccount" => { #[allow(non_camel_case_types)] - struct GetAccountProofSvc(pub Arc); - impl tonic::server::UnaryService - for GetAccountProofSvc { - type Response = super::AccountProofResponse; + struct GetAccountSvc(pub Arc); + impl tonic::server::UnaryService + for GetAccountSvc { + type Response = super::AccountResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_account_proof(&inner, request).await + ::get_account(&inner, request).await }; Box::pin(fut) } @@ -1557,7 +1477,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetAccountProofSvc(inner); + let method = GetAccountSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 451922429f..abbd8b25d5 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -373,37 +373,12 @@ pub mod rpc_client { req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns the latest state of an account with the specified ID. - pub async fn get_account_details( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetAccountDetails", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("store.Rpc", "GetAccountDetails")); - self.inner.unary(req, path, codec).await - } /// Returns the latest state proof of the specified account. - pub async fn get_account_proof( + pub async fn get_account( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -415,11 +390,9 @@ pub mod rpc_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/GetAccountProof", - ); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetAccount"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccountProof")); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccount")); self.inner.unary(req, path, codec).await } /// Returns raw block data for the specified block number. @@ -729,20 +702,12 @@ pub mod rpc_server { tonic::Response, tonic::Status, >; - /// Returns the latest state of an account with the specified ID. - async fn get_account_details( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; /// Returns the latest state proof of the specified account. - async fn get_account_proof( + async fn get_account( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns raw block data for the specified block number. @@ -1012,72 +977,25 @@ pub mod rpc_server { }; Box::pin(fut) } - "/store.Rpc/GetAccountDetails" => { - #[allow(non_camel_case_types)] - struct GetAccountDetailsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetAccountDetailsSvc { - type Response = super::super::account::AccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_details(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountDetailsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/store.Rpc/GetAccountProof" => { + "/store.Rpc/GetAccount" => { #[allow(non_camel_case_types)] - struct GetAccountProofSvc(pub Arc); + struct GetAccountSvc(pub Arc); impl< T: Rpc, - > tonic::server::UnaryService - for GetAccountProofSvc { - type Response = super::super::rpc::AccountProofResponse; + > tonic::server::UnaryService + for GetAccountSvc { + type Response = super::super::rpc::AccountResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc::AccountProofRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_account_proof(&inner, request).await + ::get_account(&inner, request).await }; Box::pin(fut) } @@ -1088,7 +1006,7 @@ pub mod rpc_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetAccountProofSvc(inner); + let method = GetAccountSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/rpc/README.md b/crates/rpc/README.md index da30b7144f..e3f1a6018d 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -15,8 +15,7 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [CheckNullifiers](#checknullifiers) - [SyncNullifiers](#syncnullifiers) -- [GetAccountDetails](#getaccountdetails) -- [GetAccountProofs](#getaccountproofs) +- [GetAccount](#getaccount) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) - [GetLimits](#getlimits) @@ -51,15 +50,13 @@ When nullifier checking fails, detailed error information is provided through gR --- -### GetAccountDetails +### GetAccount -Returns the latest state of an account with the specified ID. +Returns an account witness (Merkle proof of inclusion in the account tree) and optionally account details. ---- - -### GetAccountProofs +The witness proves the account's state commitment in the account tree. If details are requested, the response also includes the account's header, code, vault assets, and storage data. Account details are only available for public accounts. -Returns the latest state proofs of the specified accounts. +If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. --- diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 86bb35e591..d1c922d886 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -16,7 +16,6 @@ use miden_node_utils::limiter::{ QueryParamNoteTagLimit, QueryParamNullifierLimit, }; -use miden_protocol::account::AccountId; use miden_protocol::batch::ProvenBatch; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::{Note, NoteRecipient, NoteScript}; @@ -456,31 +455,6 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_batch(request).await } - /// Returns details for public (public) account by id. - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_account_details", - skip_all, - ret(level = "debug"), - err - )] - async fn get_account_details( - &self, - request: Request, - ) -> std::result::Result, Status> { - debug!(target: COMPONENT, request = ?request.get_ref()); - - // Validating account using conversion: - let _account_id: AccountId = request - .get_ref() - .clone() - .try_into() - .map_err(|err| Status::invalid_argument(format!("Invalid account id: {err}")))?; - - self.store.clone().get_account_details(request).await - } - #[instrument( parent = None, target = COMPONENT, @@ -503,20 +477,20 @@ impl api_server::Api for RpcService { #[instrument( parent = None, target = COMPONENT, - name = "rpc.server.get_account_proof", + name = "rpc.server.get_account", skip_all, ret(level = "debug"), err )] - async fn get_account_proof( + async fn get_account( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); debug!(target: COMPONENT, ?request); - self.store.clone().get_account_proof(request).await + self.store.clone().get_account(request).await } #[instrument( diff --git a/crates/store/README.md b/crates/store/README.md index 0b12487c18..e3f9a8dde3 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -14,8 +14,7 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [ApplyBlock](#applyblock) - [CheckNullifiers](#checknullifiers) -- [GetAccountDetails](#getaccountdetails) -- [GetAccountProofs](#getaccountproofs) +- [GetAccount](#getaccount) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) - [GetBlockInputs](#getblockinputs) @@ -55,15 +54,13 @@ When nullifier checking fails, detailed error information is provided through gR --- -### GetAccountDetails +### GetAccount -Returns the latest state of an account with the specified ID. +Returns an account witness (Merkle proof of inclusion in the account tree) and optionally account details. ---- - -### GetAccountProofs +The witness proves the account's state commitment in the account tree. If details are requested, the response also includes the account's header, code, vault assets, and storage data. Account details are only available for public accounts. -Returns the latest state proofs of the specified accounts. +If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. --- diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 5ac0148688..a354a9ed9d 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,5 +1,4 @@ use miden_node_proto::convert; -use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; use miden_node_utils::limiter::{ @@ -282,29 +281,6 @@ impl rpc_server::Rpc for StoreApi { Ok(Response::new(proto::note::CommittedNoteList { notes })) } - /// Returns details for public (public) account by id. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_account_details", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] - async fn get_account_details( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - let account_id = read_account_id::(Some(request))?; - let account_info: AccountInfo = self.state.get_account_details(account_id).await?; - - // TODO: revisit this, previous implementation was just returning only the summary, but it - // is weird since the details are not empty. - Ok(Response::new((&account_info).into())) - } - #[instrument( parent = None, target = COMPONENT, @@ -334,23 +310,23 @@ impl rpc_server::Rpc for StoreApi { #[instrument( parent = None, target = COMPONENT, - name = "store.rpc_server.get_account_proof", + name = "store.rpc_server.get_account", skip_all, level = "debug", ret(level = "debug"), err )] - async fn get_account_proof( + async fn get_account( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_proof_request = request.try_into()?; + let account_request = request.try_into()?; - let proof = self.state.get_account_proof(account_proof_request).await?; + let account_data = self.state.get_account(account_request).await?; - Ok(Response::new(proof.into())) + Ok(Response::new(account_data.into())) } #[instrument( diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index e191051d95..d9594a87c4 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -12,8 +12,8 @@ use miden_node_proto::domain::account::{ AccountDetailRequest, AccountDetails, AccountInfo, - AccountProofRequest, - AccountProofResponse, + AccountRequest, + AccountResponse, AccountStorageDetails, AccountStorageMapDetails, AccountVaultDetails, @@ -945,16 +945,20 @@ impl State { self.db.select_all_network_account_ids(block_range).await } - /// Returns the respective account proof with optional details, such as asset and storage - /// entries. + /// Returns an account witness and optionally account details at a specific block. /// - /// When `block_num` is provided, this method will return the account state at that specific - /// block using both the historical account tree witness and historical database state. - pub async fn get_account_proof( + /// The witness is a Merkle proof of inclusion in the account tree, proving the account's + /// state commitment. If `details` is requested, the method also returns the account's code, + /// vault assets, and storage data. Account details are only available for public accounts. + /// + /// If `block_num` is provided, returns the state at that historical block; otherwise, returns + /// the latest state. Note that historical states are only available for recent blocks close + /// to the chain tip. + pub async fn get_account( &self, - account_request: AccountProofRequest, - ) -> Result { - let AccountProofRequest { block_num, account_id, details } = account_request; + account_request: AccountRequest, + ) -> Result { + let AccountRequest { block_num, account_id, details } = account_request; if details.is_some() && !account_id.has_public_state() { return Err(DatabaseError::AccountNotPublic(account_id)); @@ -968,13 +972,13 @@ impl State { None }; - Ok(AccountProofResponse { block_num, witness, details }) + Ok(AccountResponse { block_num, witness, details }) } - /// Gets the block witness (account tree proof) for the specified account + /// Returns an account witness (Merkle proof of inclusion in the account tree). /// - /// If `block_num` is provided, returns the witness at that historical block, - /// if not present, returns the witness at the latest block. + /// If `block_num` is provided, returns the witness at that historical block; + /// otherwise, returns the witness at the latest block. async fn get_account_witness( &self, block_num: Option, diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index 7aeb9a81bb..47706de3b3 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -12,8 +12,7 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [CheckNullifiers](#checknullifiers) -- [GetAccountDetails](#getaccountdetails) -- [GetAccountProofs](#getaccountproofs) +- [GetAccount](#getaccount) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) - [GetLimits](#getlimits) @@ -100,13 +99,13 @@ match proof.verify_unset(&nullifier, &nullifier_tree_root) { **Limits:** `nullifier` (1000) -### GetAccountDetails +### GetAccount -Request the latest state of an account. +Request an account witness (Merkle proof of inclusion in the account tree) and optionally account details. -### GetAccountProofs +The witness proves the account's state commitment in the account tree. If details are requested, the response also includes the account's header, code, vault assets, and storage data. Account details are only available for public accounts. -Request state proofs for accounts, including specific storage slots. +If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. ### GetBlockByNumber diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 7fef64b137..86f4aeff0d 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -31,11 +31,8 @@ service Rpc { // Verify proofs against the nullifier tree root in the latest block header. rpc CheckNullifiers(rpc.NullifierList) returns (rpc.CheckNullifiersResponse) {} - // Returns the latest state of an account with the specified ID. - rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} - // Returns the latest state proof of the specified account. - rpc GetAccountProof(rpc.AccountProofRequest) returns (rpc.AccountProofResponse) {} + rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} // Returns raw block data for the specified block number. rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index 2918af848e..d32459cf1d 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -30,11 +30,8 @@ service Api { // Verify proofs against the nullifier tree root in the latest block header. rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - // Returns the latest state of an account with the specified ID. - rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} - // Returns the latest state proof of the specified account. - rpc GetAccountProof(AccountProofRequest) returns (AccountProofResponse) {} + rpc GetAccount(AccountRequest) returns (AccountResponse) {} // Returns raw block data for the specified block number. rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} @@ -223,7 +220,7 @@ message MaybeNoteScript { // ================================================================================================ // Returns the latest state proof of the specified account. -message AccountProofRequest { +message AccountRequest { // Request the details for a public account. message AccountDetailRequest { // Represents a storage slot index and the associated map keys. @@ -276,7 +273,7 @@ message AccountProofRequest { } // Represents the result of getting account proof. -message AccountProofResponse { +message AccountResponse { message AccountDetails { // Account header. @@ -303,7 +300,7 @@ message AccountProofResponse { optional AccountDetails details = 3; } -// Account vault details for AccountProofResponse +// Account vault details for AccountResponse message AccountVaultDetails { // A flag that is set to true if the account contains too many assets. This indicates // to the user that `SyncAccountVault` endpoint should be used to retrieve the @@ -315,7 +312,7 @@ message AccountVaultDetails { repeated primitives.Asset assets = 2; } -// Account storage details for AccountProofResponse +// Account storage details for AccountResponse message AccountStorageDetails { message AccountStorageMapDetails { // Wrapper for repeated storage map entries including their proofs. diff --git a/proto/proto/types/account.proto b/proto/proto/types/account.proto index 15ae475b3c..e61db64ae2 100644 --- a/proto/proto/types/account.proto +++ b/proto/proto/types/account.proto @@ -38,11 +38,13 @@ message AccountStorageHeader { // The type of the storage slot. uint32 slot_type = 2; - // The commitment (Word) for this storage slot. + // The data (Word) for this storage slot. + // For value slots (slot_type=0), this is the actual value stored in the slot. + // For map slots (slot_type=1), this is the root of the storage map. primitives.Digest commitment = 3; } - // Storage slots with their types and commitments. + // Storage slots with their types and data. repeated StorageSlot slots = 1; } From 243080b18381da305c5c4159ac371ca1f24e6f7c Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 15 Jan 2026 02:56:43 -0300 Subject: [PATCH 085/125] fix(monitor): remove long running spans (#1515) This removes root spans which run essentially forever, rendering them useless. --- bin/network-monitor/src/counter.rs | 18 ------------------ bin/network-monitor/src/explorer.rs | 1 - bin/network-monitor/src/faucet.rs | 8 -------- bin/network-monitor/src/frontend.rs | 1 - bin/network-monitor/src/remote_prover.rs | 8 -------- bin/network-monitor/src/status.rs | 16 ---------------- 6 files changed, 52 deletions(-) diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 2819c7395e..b64a85f2b8 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -373,15 +373,6 @@ async fn setup_increment_task( /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument( - parent = None, - target = COMPONENT, - name = "network_monitor.counter.run_increment_task", - skip_all, - level = "info", - ret(level = "debug"), - err -)] pub async fn run_increment_task( config: MonitorConfig, tx: watch::Sender, @@ -541,15 +532,6 @@ fn send_status(tx: &watch::Sender, status: ServiceStatus) -> Resu /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument( - parent = None, - target = COMPONENT, - name = "network_monitor.counter.run_counter_tracking_task", - skip_all, - level = "info", - ret(level = "debug"), - err -)] pub async fn run_counter_tracking_task( config: MonitorConfig, tx: watch::Sender, diff --git a/bin/network-monitor/src/explorer.rs b/bin/network-monitor/src/explorer.rs index 2053ce22ca..f912a62dff 100644 --- a/bin/network-monitor/src/explorer.rs +++ b/bin/network-monitor/src/explorer.rs @@ -64,7 +64,6 @@ const LATEST_BLOCK_REQUEST: GraphqlRequest = GraphqlRequest { /// /// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are /// connection issues or failures while checking the explorer status. -#[instrument(target = COMPONENT, name = "explorer-status-task", skip_all)] pub async fn run_explorer_status_task( explorer_url: Url, name: String, diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 6569a22fa4..84c0b0f3ac 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -88,14 +88,6 @@ pub struct GetMetadataResponse { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument( - parent = None, - target = COMPONENT, - name = "network_monitor.faucet.run_faucet_test_task", - skip_all, - level = "info", - ret(level = "debug") -)] pub async fn run_faucet_test_task( faucet_url: Url, status_sender: watch::Sender, diff --git a/bin/network-monitor/src/frontend.rs b/bin/network-monitor/src/frontend.rs index dd6a8fc5ce..035db669cd 100644 --- a/bin/network-monitor/src/frontend.rs +++ b/bin/network-monitor/src/frontend.rs @@ -36,7 +36,6 @@ pub struct ServerState { /// /// * `server_state` - The server state containing watch receivers for all services. /// * `config` - The configuration of the network. -#[instrument(target = COMPONENT, name = "frontend.serve", skip_all, fields(port = %config.port))] pub async fn serve(server_state: ServerState, config: MonitorConfig) { // build our application with routes let app = Router::new() diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index 4331d80334..791315d3b8 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -87,14 +87,6 @@ pub struct ProverTestDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument( - parent = None, - target = COMPONENT, - name = "network_monitor.remote_prover.run_remote_prover_test_task", - skip_all, - level = "info", - ret(level = "debug") -)] pub async fn run_remote_prover_test_task( prover_url: Url, name: &str, diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index 11c77593ef..c6cc762799 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -310,14 +310,6 @@ impl RpcStatusDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument( - parent = None, - target = COMPONENT, - name = "network_monitor.status.run_rpc_status_task", - skip_all, - level = "info", - ret(level = "debug") -)] pub async fn run_rpc_status_task( rpc_url: Url, status_sender: watch::Sender, @@ -422,14 +414,6 @@ pub(crate) async fn check_rpc_status( /// /// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are /// connection issues or failures while checking the remote prover status. -#[instrument( - parent = None, - target = COMPONENT, - name = "network_monitor.status.run_remote_prover_status_task", - skip_all, - level = "info", - ret(level = "debug") -)] pub async fn run_remote_prover_status_task( prover_url: Url, name: String, From f0763aaf5cd98623388be36f24fe6bb49a84ffc0 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 15 Jan 2026 08:44:58 -0300 Subject: [PATCH 086/125] feat(monitor): mark the rpc as inhealhy if chain is stale (#1512) --- CHANGELOG.md | 1 + bin/network-monitor/.env | 1 + bin/network-monitor/README.md | 3 + bin/network-monitor/src/config.rs | 13 ++++ bin/network-monitor/src/monitor/tasks.rs | 21 ++++- bin/network-monitor/src/status.rs | 98 +++++++++++++++++++++++- 6 files changed, 130 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b69d92605..f0c115da2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). +- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). ### Changes diff --git a/bin/network-monitor/.env b/bin/network-monitor/.env index 8474b06816..ad861da56d 100644 --- a/bin/network-monitor/.env +++ b/bin/network-monitor/.env @@ -5,6 +5,7 @@ MIDEN_MONITOR_REQUEST_TIMEOUT=10s # rpc checks MIDEN_MONITOR_RPC_URL=https://rpc.devnet.miden.io/ MIDEN_MONITOR_STATUS_CHECK_INTERVAL=30s +MIDEN_MONITOR_STALE_CHAIN_TIP_THRESHOLD=1m # remote prover checks MIDEN_MONITOR_REMOTE_PROVER_URLS=https://tx-prover.devnet.miden.io/,https://batch-prover.devnet.miden.io/ MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL=2m diff --git a/bin/network-monitor/README.md b/bin/network-monitor/README.md index 47063b9238..2affde86de 100644 --- a/bin/network-monitor/README.md +++ b/bin/network-monitor/README.md @@ -36,6 +36,7 @@ miden-network-monitor start --faucet-url http://localhost:8080 --enable-otel - `--faucet-test-interval`: Interval at which to test the faucet services (default: `2m`) - `--status-check-interval`: Interval at which to check the status of the services (default: `3s`) - `--request-timeout`: Timeout for outgoing requests (default: `10s`) +- `--stale-chain-tip-threshold`: Maximum time without a chain tip update before marking RPC as unhealthy (default: `1m`) - `--port, -p`: Web server port (default: `3000`) - `--enable-otel`: Enable OpenTelemetry tracing - `--wallet-filepath`: Path where the wallet account is located (default: `wallet_account.mac`) @@ -58,6 +59,7 @@ If command-line arguments are not provided, the application falls back to enviro - `MIDEN_MONITOR_FAUCET_TEST_INTERVAL`: Interval at which to test the faucet services - `MIDEN_MONITOR_STATUS_CHECK_INTERVAL`: Interval at which to check the status of the services - `MIDEN_MONITOR_REQUEST_TIMEOUT`: Timeout for outgoing requests +- `MIDEN_MONITOR_STALE_CHAIN_TIP_THRESHOLD`: Maximum time without a chain tip update before marking RPC as unhealthy - `MIDEN_MONITOR_PORT`: Web server port - `MIDEN_MONITOR_ENABLE_OTEL`: Enable OpenTelemetry tracing - `MIDEN_MONITOR_WALLET_FILEPATH`: Path where the wallet account is located @@ -147,6 +149,7 @@ The monitor application provides real-time status monitoring for the following M ### RPC Service - **Service Health**: Overall RPC service availability and status +- **Stale Chain Tip Detection**: Monitors chain tip progress and marks RPC as unhealthy if the chain tip hasn't changed within the configured threshold (default: 1 minute) - **Version Information**: RPC service version - **Genesis Commitment**: Network genesis commitment (with copy-to-clipboard functionality) - **Store Status**: diff --git a/bin/network-monitor/src/config.rs b/bin/network-monitor/src/config.rs index c30735c02d..7443b759f6 100644 --- a/bin/network-monitor/src/config.rs +++ b/bin/network-monitor/src/config.rs @@ -165,4 +165,17 @@ pub struct MonitorConfig { help = "The URL of the explorer service" )] pub explorer_url: Option, + + /// Maximum time without a chain tip update before marking RPC as unhealthy. + /// + /// If the chain tip does not increment within this duration, the RPC service will be + /// marked as unhealthy with a stale chain tip error. + #[arg( + long = "stale-chain-tip-threshold", + env = "MIDEN_MONITOR_STALE_CHAIN_TIP_THRESHOLD", + default_value = "1m", + value_parser = humantime::parse_duration, + help = "Maximum time without a chain tip update before marking RPC as unhealthy" + )] + pub stale_chain_tip_threshold: Duration, } diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index be3be5f3cf..c5b773dc32 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -26,6 +26,7 @@ use crate::frontend::{ServerState, serve}; use crate::remote_prover::{ProofType, generate_prover_test_payload, run_remote_prover_test_task}; use crate::status::{ ServiceStatus, + StaleChainTracker, check_remote_prover_status, check_rpc_status, run_remote_prover_status_task, @@ -75,18 +76,32 @@ impl Tasks { .connect_lazy::(); let current_time = current_unix_timestamp_secs(); - let initial_rpc_status = - check_rpc_status(&mut rpc, config.rpc_url.to_string(), current_time).await; + let mut stale_tracker = StaleChainTracker::new(config.stale_chain_tip_threshold); + let initial_rpc_status = check_rpc_status( + &mut rpc, + config.rpc_url.to_string(), + current_time, + &mut stale_tracker, + ) + .await; // Spawn the RPC checker let (rpc_tx, rpc_rx) = watch::channel(initial_rpc_status); let rpc_url = config.rpc_url.clone(); let status_check_interval = config.status_check_interval; let request_timeout = config.request_timeout; + let stale_chain_tip_threshold = config.stale_chain_tip_threshold; let id = self .handles .spawn(async move { - run_rpc_status_task(rpc_url, rpc_tx, status_check_interval, request_timeout).await; + run_rpc_status_task( + rpc_url, + rpc_tx, + status_check_interval, + request_timeout, + stale_chain_tip_threshold, + ) + .await; }) .id(); self.names.insert(id, "rpc-checker".to_string()); diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index c6cc762799..759fb0ed96 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -22,6 +22,58 @@ use crate::faucet::FaucetTestDetails; use crate::remote_prover::{ProofType, ProverTestDetails}; use crate::{COMPONENT, current_unix_timestamp_secs}; +// STALE CHAIN TIP TRACKER +// ================================================================================================ + +/// Tracks the chain tip and detects when it becomes stale. +/// +/// This struct monitors the chain tip from RPC status responses and determines if the chain +/// has stopped making progress by comparing the time since the last chain tip change against +/// a configurable threshold. +#[derive(Debug)] +pub struct StaleChainTracker { + /// The last observed chain tip from the store. + last_chain_tip: Option, + /// Unix timestamp when the chain tip was last observed to change. + last_chain_tip_update: Option, + /// Maximum time without a chain tip update before marking as stale. + stale_threshold_secs: u64, +} + +impl StaleChainTracker { + /// Creates a new stale chain tracker with the given threshold. + pub fn new(stale_threshold: Duration) -> Self { + Self { + last_chain_tip: None, + last_chain_tip_update: None, + stale_threshold_secs: stale_threshold.as_secs(), + } + } + + /// Updates the tracker with a new chain tip observation and returns whether the chain is + /// stale. + /// + /// The chain is considered stale if the tip hasn't changed for longer than the configured + /// threshold + pub fn update(&mut self, chain_tip: u32, current_time: u64) -> Option { + match self.last_chain_tip { + Some(last_tip) if last_tip == chain_tip => { + if let Some(last_update) = self.last_chain_tip_update { + let elapsed = current_time.saturating_sub(last_update); + if elapsed > self.stale_threshold_secs { + return Some(elapsed); + } + } + }, + _ => { + self.last_chain_tip = Some(chain_tip); + self.last_chain_tip_update = Some(current_time); + }, + } + None + } +} + // STATUS // ================================================================================================ @@ -299,13 +351,18 @@ impl RpcStatusDetails { /// Runs a task that continuously checks RPC status and updates a watch channel. /// /// This function spawns a task that periodically checks the RPC service status -/// and sends updates through a watch channel. +/// and sends updates through a watch channel. It also detects stale chain tips +/// and marks the RPC as unhealthy if the chain tip hasn't changed for longer +/// than the configured threshold. /// /// # Arguments /// /// * `rpc_url` - The URL of the RPC service. /// * `status_sender` - The sender for the watch channel. /// * `status_check_interval` - The interval at which to check the status of the services. +/// * `request_timeout` - The timeout for outgoing requests. +/// * `stale_chain_tip_threshold` - Maximum time without a chain tip update before marking as +/// unhealthy. /// /// # Returns /// @@ -315,6 +372,7 @@ pub async fn run_rpc_status_task( status_sender: watch::Sender, status_check_interval: Duration, request_timeout: Duration, + stale_chain_tip_threshold: Duration, ) { let url_str = rpc_url.to_string(); let mut rpc = ClientBuilder::new(rpc_url) @@ -329,12 +387,15 @@ pub async fn run_rpc_status_task( let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let mut stale_tracker = StaleChainTracker::new(stale_chain_tip_threshold); + loop { interval.tick().await; let current_time = current_unix_timestamp_secs(); - let status = check_rpc_status(&mut rpc, url_str.clone(), current_time).await; + let status = + check_rpc_status(&mut rpc, url_str.clone(), current_time, &mut stale_tracker).await; // Send the status update; exit if no receivers (shutdown signal) if status_sender.send(status).is_err() { @@ -346,13 +407,16 @@ pub async fn run_rpc_status_task( /// Checks the status of the RPC service. /// -/// This function checks the status of the RPC service. +/// This function checks the status of the RPC service and detects stale chain tips. +/// If the chain tip hasn't changed for longer than the configured threshold, the RPC +/// is marked as unhealthy. /// /// # Arguments /// /// * `rpc` - The RPC client. /// * `url` - The URL of the RPC service. /// * `current_time` - The current time. +/// * `stale_tracker` - Tracker for detecting stale chain tips. /// /// # Returns /// @@ -369,17 +433,43 @@ pub(crate) async fn check_rpc_status( rpc: &mut miden_node_proto::clients::RpcClient, url: String, current_time: u64, + stale_tracker: &mut StaleChainTracker, ) -> ServiceStatus { match rpc.status(()).await { Ok(response) => { let status = response.into_inner(); + let rpc_details = RpcStatusDetails::from_rpc_status(status, url); + + // Check for stale chain tip using the store's chain tip + if let Some(store_status) = &rpc_details.store_status { + if let Some(stale_duration) = + stale_tracker.update(store_status.chain_tip, current_time) + { + debug!( + target: COMPONENT, + chain_tip = store_status.chain_tip, + stale_duration_secs = stale_duration, + "Chain tip is stale" + ); + return ServiceStatus { + name: "RPC".to_string(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(format!( + "Chain tip {} has not changed for {} seconds", + store_status.chain_tip, stale_duration + )), + details: ServiceDetails::RpcStatus(rpc_details), + }; + } + } ServiceStatus { name: "RPC".to_string(), status: Status::Healthy, last_checked: current_time, error: None, - details: ServiceDetails::RpcStatus(RpcStatusDetails::from_rpc_status(status, url)), + details: ServiceDetails::RpcStatus(rpc_details), } }, Err(e) => { From 8cbce7c573a34282d8938edd6d7397b1922b0780 Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 16 Jan 2026 13:54:18 +1300 Subject: [PATCH 087/125] feat: Integrate ntx-builder with validator (#1453) --- CHANGELOG.md | 1 + bin/node/src/commands/bundled.rs | 3 + crates/ntx-builder/src/actor/execute.rs | 64 +++++++++++++++---- crates/ntx-builder/src/actor/mod.rs | 15 +++++ crates/ntx-builder/src/block_producer.rs | 6 +- crates/ntx-builder/src/builder.rs | 5 ++ .../src/remote_prover/tx_prover.rs | 6 +- 7 files changed, 84 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0c115da2f..d3efc9791b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). - The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). - Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). +- Integrated NTX Builder with validator via `SubmitProvenTransaction` RPC ([#1453](https://github.com/0xMiden/miden-node/pull/1453)). - Added pagination to `GetNetworkAccountIds` endpoint ([#1452](https://github.com/0xMiden/miden-node/pull/1452)). - Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). - Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 594959aa40..1a864a3817 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -311,6 +311,8 @@ impl BundledCommand { .context("Failed to parse URL")?; if should_start_ntx_builder { + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; let id = join_set .spawn(async move { let block_producer_url = @@ -319,6 +321,7 @@ impl BundledCommand { NetworkTransactionBuilder::new( store_ntx_builder_url, block_producer_url, + validator_url, ntx_builder.tx_prover_url, ntx_builder.ticker_interval, checkpoint, diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 83c1d09c92..cb38dc89a8 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -1,5 +1,7 @@ use std::collections::BTreeSet; +use miden_node_proto::clients::ValidatorClient; +use miden_node_proto::generated::{self as proto}; use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::account::{ @@ -27,6 +29,7 @@ use miden_protocol::vm::FutureMaybeSend; use miden_protocol::{TransactionInputError, Word}; use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; +use miden_tx::utils::Serializable; use miden_tx::{ DataStore, DataStoreError, @@ -75,8 +78,12 @@ type NtxResult = Result; /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { + /// TODO(sergerad): Remove block producer client when block proving moved to store. block_producer: BlockProducerClient, + /// Client for validating transactions via the Validator. + validator: ValidatorClient, + /// The prover to delegate proofs to. /// /// Defaults to local proving if unset. This should be avoided in production as this is @@ -94,12 +101,14 @@ impl NtxContext { /// Creates a new [`NtxContext`] instance. pub fn new( block_producer: BlockProducerClient, + validator: ValidatorClient, prover: Option, store: StoreClient, script_cache: LruCache, ) -> Self { Self { block_producer, + validator, prover, store, script_cache, @@ -147,7 +156,7 @@ impl NtxContext { .set_attribute("reference_block.number", chain_tip_header.block_num()); async move { - async move { + Box::pin(async move { let data_store = NtxDataStore::new( account, chain_tip_header, @@ -156,14 +165,26 @@ impl NtxContext { self.script_cache.clone(), ); + // Filter notes. let notes = notes.into_iter().map(Note::from).collect::>(); - let (successful, failed) = self.filter_notes(&data_store, notes).await?; - let executed = Box::pin(self.execute(&data_store, successful)).await?; - let proven = Box::pin(self.prove(executed.into())).await?; - let tx_id = proven.id(); - self.submit(proven).await?; - Ok((tx_id, failed)) - } + let (successful_notes, failed_notes) = + self.filter_notes(&data_store, notes).await?; + + // Execute transaction. + let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; + + // Prove transaction. + let tx_inputs: TransactionInputs = executed_tx.into(); + let proven_tx = Box::pin(self.prove(&tx_inputs)).await?; + + // Validate proven transaction. + self.validate(&proven_tx, &tx_inputs).await?; + + // Submit transaction to block producer. + self.submit(&proven_tx).await?; + + Ok((proven_tx.id(), failed_notes)) + }) .in_current_span() .await .inspect_err(|err| tracing::Span::current().set_error(err)) @@ -245,10 +266,12 @@ impl NtxContext { /// Delegates the transaction proof to the remote prover if configured, otherwise performs the /// proof locally. #[instrument(target = COMPONENT, name = "ntx.execute_transaction.prove", skip_all, err)] - async fn prove(&self, tx_inputs: TransactionInputs) -> NtxResult { + async fn prove(&self, tx_inputs: &TransactionInputs) -> NtxResult { if let Some(remote) = &self.prover { remote.prove(tx_inputs).await } else { + // Only perform tx inptus clone for local proving. + let tx_inputs = tx_inputs.clone(); tokio::task::spawn_blocking(move || LocalTransactionProver::default().prove(tx_inputs)) .await .map_err(NtxError::Panic)? @@ -258,12 +281,31 @@ impl NtxContext { /// Submits the transaction to the block producer. #[instrument(target = COMPONENT, name = "ntx.execute_transaction.submit", skip_all, err)] - async fn submit(&self, tx: ProvenTransaction) -> NtxResult<()> { + async fn submit(&self, proven_tx: &ProvenTransaction) -> NtxResult<()> { self.block_producer - .submit_proven_transaction(tx) + .submit_proven_transaction(proven_tx) .await .map_err(NtxError::Submission) } + + /// Validates the transaction against the Validator. + #[instrument(target = COMPONENT, name = "ntx.execute_transaction.validate", skip_all, err)] + async fn validate( + &self, + proven_tx: &ProvenTransaction, + tx_inputs: &TransactionInputs, + ) -> NtxResult<()> { + let request = proto::transaction::ProvenTransaction { + transaction: proven_tx.to_bytes(), + transaction_inputs: Some(tx_inputs.to_bytes()), + }; + self.validator + .clone() + .submit_proven_transaction(request) + .await + .map_err(NtxError::Submission)?; + Ok(()) + } } // NETWORK TRANSACTION DATA STORE diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 60e7df54c6..9a595a9d35 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -4,10 +4,12 @@ mod inflight_note; mod note_state; use std::sync::Arc; +use std::time::Duration; use account_state::{NetworkAccountState, TransactionCandidate}; use execute::NtxError; use futures::FutureExt; +use miden_node_proto::clients::{Builder, ValidatorClient}; use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_utils::ErrorReport; @@ -54,6 +56,8 @@ pub struct AccountActorContext { pub store: StoreClient, /// Address of the block producer gRPC server. pub block_producer_url: Url, + /// Address of the Validator server. + pub validator_url: Url, /// Address of the remote prover. If `None`, transactions will be proven locally, which is // undesirable due to the performance impact. pub tx_prover_url: Option, @@ -153,7 +157,9 @@ pub struct AccountActor { mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, + // TODO(sergerad): Remove block producer when block proving moved to store. block_producer: BlockProducerClient, + validator: ValidatorClient, prover: Option, chain_state: Arc>, script_cache: LruCache, @@ -169,6 +175,13 @@ impl AccountActor { cancel_token: CancellationToken, ) -> Self { let block_producer = BlockProducerClient::new(actor_context.block_producer_url.clone()); + let validator = Builder::new(actor_context.validator_url.clone()) + .without_tls() + .with_timeout(Duration::from_secs(10)) + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::(); let prover = actor_context.tx_prover_url.clone().map(RemoteTransactionProver::new); Self { origin, @@ -177,6 +190,7 @@ impl AccountActor { event_rx, cancel_token, block_producer, + validator, prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), @@ -276,6 +290,7 @@ impl AccountActor { // Execute the selected transaction. let context = execute::NtxContext::new( self.block_producer.clone(), + self.validator.clone(), self.prover.clone(), self.store.clone(), self.script_cache.clone(), diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index 7c1af9d8f4..87d3da7e6f 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -41,14 +41,16 @@ impl BlockProducerClient { Self { client: block_producer } } + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.submit_proven_transaction", skip_all, err)] pub async fn submit_proven_transaction( &self, - proven_tx: ProvenTransaction, + proven_tx: &ProvenTransaction, ) -> Result<(), Status> { let request = proto::transaction::ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: None, + transaction_inputs: None, /* Transaction inputs are only required for Validator + * transaction re-execution. */ }; self.client.clone().submit_proven_transaction(request).await?; diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 3d0a00aabd..5a1b091a69 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -73,6 +73,8 @@ pub struct NetworkTransactionBuilder { store_url: Url, /// Address of the block producer gRPC server. block_producer_url: Url, + /// Address of the Validator server. + validator_url: Url, /// Address of the remote prover. If `None`, transactions will be proven locally, which is /// undesirable due to the performance impact. tx_prover_url: Option, @@ -95,6 +97,7 @@ impl NetworkTransactionBuilder { pub fn new( store_url: Url, block_producer_url: Url, + validator_url: Url, tx_prover_url: Option, ticker_interval: Duration, bp_checkpoint: Arc, @@ -105,6 +108,7 @@ impl NetworkTransactionBuilder { Self { store_url, block_producer_url, + validator_url, tx_prover_url, ticker_interval, bp_checkpoint, @@ -141,6 +145,7 @@ impl NetworkTransactionBuilder { let actor_context = AccountActorContext { block_producer_url: self.block_producer_url.clone(), + validator_url: self.validator_url.clone(), tx_prover_url: self.tx_prover_url.clone(), chain_state: chain_state.clone(), store: store.clone(), diff --git a/crates/remote-prover-client/src/remote_prover/tx_prover.rs b/crates/remote-prover-client/src/remote_prover/tx_prover.rs index aea58aa11d..3bee6199fa 100644 --- a/crates/remote-prover-client/src/remote_prover/tx_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/tx_prover.rs @@ -104,7 +104,7 @@ impl RemoteTransactionProver { impl RemoteTransactionProver { pub fn prove( &self, - tx_inputs: TransactionInputs, + tx_inputs: &TransactionInputs, ) -> impl FutureMaybeSend> { async move { use miden_protocol::utils::Serializable; @@ -153,8 +153,8 @@ impl TryFrom for ProvenTransaction { } } -impl From for proto::ProofRequest { - fn from(tx_inputs: TransactionInputs) -> Self { +impl From<&TransactionInputs> for proto::ProofRequest { + fn from(tx_inputs: &TransactionInputs) -> Self { proto::ProofRequest { proof_type: proto::ProofType::Transaction.into(), payload: tx_inputs.to_bytes(), From ec27b95767f63f00d74500c6500b88585830c2f4 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 16 Jan 2026 10:11:38 +0200 Subject: [PATCH 088/125] feat(block-producer): detect and abort on chain desync (#1520) --- CHANGELOG.md | 1 + .../block-producer/src/block_builder/mod.rs | 45 ++++++++++++++----- crates/block-producer/src/errors.rs | 15 +++++-- crates/block-producer/src/server/mod.rs | 9 ++-- 4 files changed, 50 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3efc9791b..e8354c749c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). +- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). ### Changes diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index e63bc81846..a3a36ec4f0 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,8 +1,8 @@ use std::ops::{Deref, Range}; use std::sync::Arc; +use anyhow::Context; use futures::FutureExt; -use futures::never::Never; use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; @@ -79,13 +79,15 @@ impl BlockBuilder { } /// Starts the [`BlockBuilder`], infinitely producing blocks at the configured interval. /// + /// Returns only if there was a fatal, unrecoverable error. + /// /// Block production is sequential and consists of /// /// 1. Pulling the next set of batches from the mempool /// 2. Compiling these batches into the next block /// 3. Proving the block (this is simulated using random sleeps) /// 4. Committing the block to the store - pub async fn run(self, mempool: SharedMempool) { + pub async fn run(self, mempool: SharedMempool) -> anyhow::Result<()> { assert!( self.failure_rate < 1.0 && self.failure_rate.is_sign_positive(), "Failure rate must be a percentage" @@ -100,8 +102,16 @@ impl BlockBuilder { loop { interval.tick().await; - // Errors are handled internally by the block building process. - self.build_block(&mempool).await; + // Exit if a fatal error occurred. + // + // No need for error logging since this is handled inside the function. + if let err @ Err(BuildBlockError::Desync { local_chain_tip, .. }) = + self.build_block(&mempool).await + { + return err.with_context(|| { + format!("fatal error while building block {}", local_chain_tip.child()) + }); + } } } @@ -117,7 +127,7 @@ impl BlockBuilder { /// - A failed stage will emit an error event, and both its own span and the root span will be /// marked as errors. #[instrument(parent = None, target = COMPONENT, name = "block_builder.build_block", skip_all)] - async fn build_block(&self, mempool: &SharedMempool) { + async fn build_block(&self, mempool: &SharedMempool) -> Result<(), BuildBlockError> { use futures::TryFutureExt; let selected = Self::select_block(mempool).inspect(SelectedBlock::inject_telemetry).await; @@ -138,10 +148,10 @@ impl BlockBuilder { .and_then(|proven_block| self.commit_block(mempool, proven_block)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) - .or_else(|_err| self.rollback_block(mempool, block_num).never_error()) - // All errors were handled and discarded above, so this is just type juggling - // to drop the result. - .unwrap_or_else(|_: Never| ()) + .or_else(|err| async { + self.rollback_block(mempool, block_num).await; + Err(err) + }) .await } @@ -172,7 +182,7 @@ impl BlockBuilder { &self, selected_block: SelectedBlock, ) -> Result { - let SelectedBlock { block_number: _, batches } = selected_block; + let SelectedBlock { block_number, batches } = selected_block; let batch_iter = batches.iter(); @@ -207,6 +217,21 @@ impl BlockBuilder { .await .map_err(BuildBlockError::GetBlockInputsFailed)?; + // Check that the latest committed block in the store matches our expectations. + // + // Desync can occur since the mempool and store are separate components. One example is if + // the block-producer's apply_block gRPC request times out, rolling back the block locally, + // but the store still committed the block on its end. + let store_chain_tip = inputs.prev_block_header().block_num(); + if store_chain_tip.child() != block_number { + return Err(BuildBlockError::Desync { + local_chain_tip: block_number + .parent() + .expect("block being built always has a parent"), + store_chain_tip, + }); + } + Ok(BlockBatchesAndInputs { batches, inputs }) } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 8c0dc58661..38ac067163 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -20,15 +20,15 @@ use crate::validator::ValidatorError; pub enum BlockProducerError { /// A block-producer task completed although it should have ran indefinitely. #[error("task {task} completed unexpectedly")] - TaskFailedSuccessfully { task: &'static str }, + UnexpectedTaskCompletion { task: &'static str }, /// A block-producer task panic'd. - #[error("error joining {task} task")] + #[error("task {task} panic'd")] JoinError { task: &'static str, source: JoinError }, /// A block-producer task reported a transport error. - #[error("task {task} had a transport error")] - TonicTransportError { + #[error("task {task} failed")] + TaskError { task: &'static str, source: anyhow::Error, }, @@ -209,6 +209,13 @@ pub enum BuildBlockError { StoreApplyBlockFailed(#[source] StoreError), #[error("failed to get block inputs from store")] GetBlockInputsFailed(#[source] StoreError), + #[error( + "Desync detected between block-producer's chain tip {local_chain_tip} and the store's {store_chain_tip}" + )] + Desync { + local_chain_tip: BlockNumber, + store_chain_tip: BlockNumber, + }, #[error("failed to propose block")] ProposeBlockFailed(#[source] ProposedBlockError), #[error("failed to validate block")] diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index e4f8edfccb..68fe37b3f2 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -185,10 +185,7 @@ impl BlockProducer { let block_builder_id = tasks .spawn({ let mempool = mempool.clone(); - async { - block_builder.run(mempool).await; - Ok(()) - } + async { block_builder.run(mempool).await } }) .id(); @@ -214,8 +211,8 @@ impl BlockProducer { task_result .map_err(|source| BlockProducerError::JoinError { task, source }) .map(|(_, result)| match result { - Ok(_) => Err(BlockProducerError::TaskFailedSuccessfully { task }), - Err(source) => Err(BlockProducerError::TonicTransportError { task, source }), + Ok(_) => Err(BlockProducerError::UnexpectedTaskCompletion { task }), + Err(source) => Err(BlockProducerError::TaskError { task, source }), }) .and_then(|x| x)? } From 5ab03a78b45a93c36caf7c7b625c10dbb24f6e3a Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Fri, 16 Jan 2026 16:35:06 +0100 Subject: [PATCH 089/125] fix/ci: pin all tool versions and container tag (#1523) --- .github/workflows/book.yml | 4 ++-- .github/workflows/build-docs.yml | 2 +- .github/workflows/changelog.yml | 2 +- .github/workflows/lint.yml | 20 ++++++++++---------- .github/workflows/network-monitor.yml | 2 +- .github/workflows/publish-debian-all.yml | 8 ++++---- .github/workflows/publish-debian.yml | 2 +- .github/workflows/publish-docker.yml | 2 +- .github/workflows/publish-dry-run.yml | 2 +- .github/workflows/publish-main.yml | 2 +- .github/workflows/stress-test-check.yml | 6 ++++-- .github/workflows/test-beta.yml | 6 ++++-- .github/workflows/test.yml | 4 +++- .github/workflows/trigger-deploy-docs.yml | 2 +- CHANGELOG.md | 1 + 15 files changed, 36 insertions(+), 29 deletions(-) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 2806838def..10e48c5f1d 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -34,7 +34,7 @@ jobs: # The documentation is uploaded as a github artifact IFF it is required for deployment i.e. on push into next. build: name: Build documentation - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main @@ -66,7 +66,7 @@ jobs: environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: build if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} steps: diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml index 72d4c28d32..56cc7795f4 100644 --- a/.github/workflows/build-docs.yml +++ b/.github/workflows/build-docs.yml @@ -24,7 +24,7 @@ permissions: jobs: build-docs: name: Build Documentation - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout repository diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index abe1488b17..be2667efd0 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -12,7 +12,7 @@ permissions: jobs: changelog: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@main diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8cebcbe973..09500e58f7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -28,12 +28,12 @@ jobs: - uses: actions/checkout@v4 - uses: taiki-e/install-action@v2 with: - tool: typos + tool: typos@1.42.0 - run: make typos-check rustfmt: name: rustfmt - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: Rustup @@ -48,7 +48,7 @@ jobs: clippy: name: clippy - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: Rustup @@ -62,29 +62,29 @@ jobs: run: make clippy toml: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - uses: actions/checkout@v4 - uses: taiki-e/install-action@v2 with: - tool: taplo-cli + tool: taplo-cli@0.10.0 - run: make toml-check workspace-lints: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - uses: actions/checkout@v4 - uses: taiki-e/install-action@v2 with: - tool: cargo-workspace-lints + tool: cargo-workspace-lints@0.1.4 - run: | make workspace-check doc: name: doc - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: Rustup @@ -97,7 +97,7 @@ jobs: unused_deps: name: check for unused dependencies - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: machete @@ -105,7 +105,7 @@ jobs: proto: name: proto check - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: Rustup diff --git a/.github/workflows/network-monitor.yml b/.github/workflows/network-monitor.yml index 1a69216171..ca89a4df9d 100644 --- a/.github/workflows/network-monitor.yml +++ b/.github/workflows/network-monitor.yml @@ -23,7 +23,7 @@ env: jobs: check: name: check - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 15 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index 1539d7b1f7..6a8a8f1384 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -25,7 +25,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main @@ -48,7 +48,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main @@ -71,7 +71,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main @@ -94,7 +94,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index 1079bfddbf..f115f5eefd 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -53,7 +53,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index f4bfbe0088..990ef1d94c 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -24,7 +24,7 @@ permissions: jobs: publish: runs-on: - labels: "ubuntu-latest" + labels: "ubuntu-24.04" strategy: matrix: component: [node] diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml index 2acaab2faa..7b059d518f 100644 --- a/.github/workflows/publish-dry-run.yml +++ b/.github/workflows/publish-dry-run.yml @@ -30,7 +30,7 @@ jobs: - uses: Swatinem/rust-cache@v2 - uses: taiki-e/install-action@v2 with: - tool: cargo-binstall + tool: cargo-binstall@1.16.6 - name: Install cargo-msrv run: cargo binstall --no-confirm --force cargo-msrv - name: Check MSRV for each workspace member diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-main.yml index 25fe4552cf..748f2291ab 100644 --- a/.github/workflows/publish-main.yml +++ b/.github/workflows/publish-main.yml @@ -43,7 +43,7 @@ jobs: - uses: Swatinem/rust-cache@v2 - uses: taiki-e/install-action@v2 with: - tool: cargo-binstall + tool: cargo-binstall@1.16.6 - name: Install cargo-msrv run: cargo binstall --no-confirm --force cargo-msrv - name: Check MSRV for each workspace member diff --git a/.github/workflows/stress-test-check.yml b/.github/workflows/stress-test-check.yml index 47182f8f9f..605cf7b5be 100644 --- a/.github/workflows/stress-test-check.yml +++ b/.github/workflows/stress-test-check.yml @@ -23,7 +23,7 @@ env: jobs: stress-test-check: name: stress-test-check - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 10 steps: - uses: actions/checkout@main @@ -32,7 +32,9 @@ jobs: - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@nextest + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 - name: Install stress test run: make install-stress-test - name: Create directory for stress test store diff --git a/.github/workflows/test-beta.yml b/.github/workflows/test-beta.yml index 042d50be28..63aae6775d 100644 --- a/.github/workflows/test-beta.yml +++ b/.github/workflows/test-beta.yml @@ -10,7 +10,7 @@ permissions: jobs: test: name: test - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 30 steps: - uses: actions/checkout@v4 @@ -18,6 +18,8 @@ jobs: ref: 'next' - name: Rustup run: rustup install beta && rustup default beta - - uses: taiki-e/install-action@nextest + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 - name: Run tests run: make test diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cfee5fc3cd..c86c0f25fd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,9 @@ jobs: - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@nextest + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 - name: Run tests run: make test - name: Doc tests diff --git a/.github/workflows/trigger-deploy-docs.yml b/.github/workflows/trigger-deploy-docs.yml index 6de20e9784..ca54a442d2 100644 --- a/.github/workflows/trigger-deploy-docs.yml +++ b/.github/workflows/trigger-deploy-docs.yml @@ -8,7 +8,7 @@ on: jobs: notify: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 permissions: contents: read diff --git a/CHANGELOG.md b/CHANGELOG.md index e8354c749c..abcd9359a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). ### Changes From cc56cd1df8b99ca613e05e7156e29dbe99310e73 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Fri, 16 Jan 2026 17:43:38 +0100 Subject: [PATCH 090/125] feat: rocksdb storage for `AccountTree` and `NullifierTree` by default (#1326) --- .github/actions/install-rocksdb/action.yml | 14 ++ .github/workflows/lint.yml | 8 + .github/workflows/publish-debian-all.yml | 2 + .github/workflows/publish-debian.yml | 3 + .github/workflows/publish-dry-run.yml | 2 + .github/workflows/publish-main.yml | 2 + .github/workflows/stress-test-check.yml | 8 +- .github/workflows/test-beta.yml | 4 + .github/workflows/test.yml | 4 + CHANGELOG.md | 1 + Cargo.lock | 127 +++++++++++ Cargo.toml | 3 +- bin/node/Dockerfile | 3 +- crates/block-producer/src/server/tests.rs | 36 ++- crates/rpc/src/tests.rs | 75 ++++--- crates/store/Cargo.toml | 16 +- crates/store/README.md | 29 +++ ...unt_tree_historical.rs => account_tree.rs} | 56 +++-- crates/store/src/accounts/mod.rs | 4 + crates/store/src/lib.rs | 2 + crates/store/src/state.rs | 210 +++++++++++++----- 21 files changed, 493 insertions(+), 116 deletions(-) create mode 100644 .github/actions/install-rocksdb/action.yml rename crates/store/benches/{account_tree_historical.rs => account_tree.rs} (81%) diff --git a/.github/actions/install-rocksdb/action.yml b/.github/actions/install-rocksdb/action.yml new file mode 100644 index 0000000000..c42cb98250 --- /dev/null +++ b/.github/actions/install-rocksdb/action.yml @@ -0,0 +1,14 @@ +name: "Install RocksDB dependencies" +description: "Install dependencies for RocksDB compilation" + +runs: + using: "composite" + steps: + - name: Install LLVM/Clang for RocksDB + shell: bash + run: | + set -eux + sudo apt-get update + # Install clang/llvm for bindgen (needed for FFI bindings). + # RocksDB is compiled from source by librocksdb-sys. + sudo apt-get install -y clang llvm-dev libclang-dev diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 09500e58f7..d2dea8a72d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -51,6 +51,10 @@ jobs: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: | rustup update --no-self-update @@ -87,6 +91,10 @@ jobs: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index 6a8a8f1384..a6d63d5035 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -31,6 +31,8 @@ jobs: uses: actions/checkout@main with: fetch-depth: 0 + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Build and Publish Node uses: ./.github/actions/debian with: diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index f115f5eefd..81e8d74475 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -60,6 +60,9 @@ jobs: with: fetch-depth: 0 + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - name: Build and Publish Packages uses: ./.github/actions/debian with: diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml index 7b059d518f..fe6b15e879 100644 --- a/.github/workflows/publish-dry-run.yml +++ b/.github/workflows/publish-dry-run.yml @@ -23,6 +23,8 @@ jobs: fetch-depth: 0 - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Install dependencies run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-main.yml index 748f2291ab..fcaab36a86 100644 --- a/.github/workflows/publish-main.yml +++ b/.github/workflows/publish-main.yml @@ -18,6 +18,8 @@ jobs: with: fetch-depth: 0 ref: main + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb # Ensure the release tag refers to the latest commit on main. # Compare the commit SHA that triggered the workflow with the HEAD of the branch we just # checked out (main). diff --git a/.github/workflows/stress-test-check.yml b/.github/workflows/stress-test-check.yml index 605cf7b5be..383440b9ee 100644 --- a/.github/workflows/stress-test-check.yml +++ b/.github/workflows/stress-test-check.yml @@ -23,10 +23,14 @@ env: jobs: stress-test-check: name: stress-test-check - runs-on: ubuntu-24.04 - timeout-minutes: 10 + runs-on: Linux-ARM64-Runner + timeout-minutes: 20 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/test-beta.yml b/.github/workflows/test-beta.yml index 63aae6775d..07b9705fdf 100644 --- a/.github/workflows/test-beta.yml +++ b/.github/workflows/test-beta.yml @@ -16,6 +16,10 @@ jobs: - uses: actions/checkout@v4 with: ref: 'next' + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup install beta && rustup default beta - uses: taiki-e/install-action@v2 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c86c0f25fd..7760225a67 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -28,6 +28,10 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index abcd9359a0..bbd5407e71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -115,6 +115,7 @@ - Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/miden-node/pull/1278)). - Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/miden-node/pull/1293)). - [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/miden-node/pull/1292)). +- [BREAKING] Added `rocksdb` feature to enable rocksdb backends of `LargeSmt` ([#1326](https://github.com/0xMiden/miden-node/pull/1326)). - [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/miden-node/pull/1333)). - Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/miden-node/pull/1332)). - Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/miden-node/pull/1338)). diff --git a/Cargo.lock b/Cargo.lock index 1ad02438bf..aa8a55777e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -342,6 +342,24 @@ dependencies = [ "num-traits", ] +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools 0.10.5", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.114", +] + [[package]] name = "bit-set" version = "0.8.0" @@ -446,6 +464,16 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + [[package]] name = "camino" version = "1.2.2" @@ -496,6 +524,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cf-rustracing" version = "1.2.1" @@ -605,6 +642,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "3.2.25" @@ -2229,12 +2277,36 @@ version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + [[package]] name = "libm" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +[[package]] +name = "librocksdb-sys" +version = "0.17.3+10.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "libc", + "libz-sys", + "lz4-sys", +] + [[package]] name = "libsqlite3-sys" version = "0.35.0" @@ -2255,6 +2327,17 @@ dependencies = [ "libc", ] +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2367,6 +2450,16 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "matchers" version = "0.2.0" @@ -2527,6 +2620,7 @@ dependencies = [ "rand_core 0.9.5", "rand_hc", "rayon", + "rocksdb", "sha2", "sha3", "subtle", @@ -2824,6 +2918,7 @@ dependencies = [ "fs-err", "hex", "indexmap 2.13.0", + "miden-crypto", "miden-node-proto", "miden-node-proto-build", "miden-node-test-macro", @@ -3248,6 +3343,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -3348,6 +3449,16 @@ dependencies = [ "memoffset", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -4626,6 +4737,16 @@ dependencies = [ "serde", ] +[[package]] +name = "rocksdb" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "rstest" version = "0.26.1" @@ -4671,6 +4792,12 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rustc_version" version = "0.2.3" diff --git a/Cargo.toml b/Cargo.toml index 53e5182bba..6acc89250f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,7 +57,8 @@ miden-tx = { branch = "next", default-features = false, git = "http miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } # Other miden dependencies. These should align with those expected by miden-base. -miden-air = { features = ["std", "testing"], version = "0.20" } +miden-air = { features = ["std", "testing"], version = "0.20" } +miden-crypto = { default-features = false, version = "0.19" } # External dependencies anyhow = { version = "1.0" } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 3becd3ded9..832b0bb8d2 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,8 +1,9 @@ FROM rust:1.90-slim-bullseye AS builder +# Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang bindgen pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ rm -rf /var/lib/apt/lists/* WORKDIR /app diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index cbfd27fe02..91de51ddcf 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -114,16 +114,34 @@ async fn block_producer_startup_is_robust_to_network_failures() { assert!(response.is_ok()); // kill the store - // Use spawn_blocking because shutdown_timeout blocks and can't run in async context - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); + shutdown_store(store_runtime).await; // test: request against block-producer api should fail immediately let response = send_request(block_producer_client.clone(), 1).await; assert!(response.is_err()); // test: restart the store and request should succeed + let store_runtime = restart_store(store_addr, data_directory.path()).await; + let response = send_request(block_producer_client.clone(), 2).await; + assert!(response.is_ok()); + + // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + shutdown_store(store_runtime).await; +} + +/// Shuts down the store runtime properly to allow RocksDB to flush before the temp directory is +/// deleted. +async fn shutdown_store(store_runtime: runtime::Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. +async fn restart_store( + store_addr: std::net::SocketAddr, + data_directory: &std::path::Path, +) -> runtime::Runtime { let rpc_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -132,19 +150,21 @@ async fn block_producer_startup_is_robust_to_network_failures() { let block_producer_listener = TcpListener::bind(store_addr) .await .expect("store should bind the block-producer port"); - task::spawn(async move { + let dir = data_directory.to_path_buf(); + let store_runtime = + runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); + store_runtime.spawn(async move { Store { rpc_listener, ntx_builder_listener, block_producer_listener, - data_directory: data_directory.path().to_path_buf(), + data_directory: dir, } .serve() .await .expect("store should start serving"); }); - let response = send_request(block_producer_client.clone(), 2).await; - assert!(response.is_ok()); + store_runtime } /// Creates a dummy transaction and submits it to the block producer. diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 263ef9bfbc..e88ee40968 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -64,7 +64,7 @@ async fn rpc_server_accepts_requests_without_accept_header() { assert!(response.is_ok()); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] @@ -80,7 +80,7 @@ async fn rpc_server_accepts_requests_with_accept_header() { assert!(response.is_ok()); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] @@ -113,7 +113,7 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { assert!(response.as_ref().err().unwrap().message().contains("server does not support"),); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } } @@ -137,34 +137,17 @@ async fn rpc_startup_is_robust_to_network_failures() { assert!(response.unwrap().into_inner().block_header.is_some()); // Test: shutdown the store and should fail - // Use spawn_blocking because shutdown_timeout blocks and can't run in async context - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); + shutdown_store(store_runtime).await; let response = send_request(&mut rpc_client).await; assert!(response.is_err()); // Test: restart the store and request should succeed - let rpc_listener = TcpListener::bind(store_addr).await.expect("Failed to bind store"); - let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind store ntx-builder gRPC endpoint"); - let block_producer_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); - task::spawn(async move { - Store { - rpc_listener, - ntx_builder_listener, - block_producer_listener, - data_directory: data_directory.path().to_path_buf(), - grpc_timeout: Duration::from_secs(10), - } - .serve() - .await - .expect("store should start serving"); - }); + let store_runtime = restart_store(store_addr, data_directory.path()).await; let response = send_request(&mut rpc_client).await; assert_eq!(response.unwrap().into_inner().block_header.unwrap().block_num, 0); + + // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + shutdown_store(store_runtime).await; } #[tokio::test] @@ -207,7 +190,7 @@ async fn rpc_server_has_web_support() { assert!(headers.get("access-control-allow-credentials").is_some()); assert!(headers.get("access-control-expose-headers").is_some()); assert!(headers.get("vary").is_some()); - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] @@ -293,7 +276,7 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { ); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] @@ -366,7 +349,7 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { ); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } /// Sends an arbitrary / irrelevant request to the RPC. @@ -472,6 +455,40 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { ) } +/// Shuts down the store runtime properly to allow `RocksDB` to flush before the temp directory is +/// deleted. +async fn shutdown_store(store_runtime: Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. +async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) -> Runtime { + let rpc_listener = TcpListener::bind(store_addr).await.expect("Failed to bind store"); + let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("Failed to bind store ntx-builder gRPC endpoint"); + let block_producer_listener = + TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); + let dir = data_directory.to_path_buf(); + let store_runtime = + runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); + store_runtime.spawn(async move { + Store { + rpc_listener, + ntx_builder_listener, + block_producer_listener, + data_directory: dir, + grpc_timeout: Duration::from_secs(10), + } + .serve() + .await + .expect("store should start serving"); + }); + store_runtime +} + #[tokio::test] async fn get_limits_endpoint() { // Start the RPC and store @@ -524,5 +541,5 @@ async fn get_limits_endpoint() { ); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 22037e4b9e..062c4dde3c 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -24,6 +24,7 @@ diesel_migrations = { features = ["sqlite"], version = "2.3" } fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { workspace = true } @@ -55,6 +56,17 @@ rand = { workspace = true } regex = { version = "1.11" } termtree = { version = "0.5" } +[features] +default = ["rocksdb"] +rocksdb = ["miden-crypto/rocksdb"] + [[bench]] -harness = false -name = "account_tree_historical" +harness = false +name = "account_tree" +required-features = ["rocksdb"] + +[package.metadata.cargo-machete] +# This is an indirect dependency for which we need to enable optimisations +# via feature flags. Because we don't use it directly in code, machete +# identifies it as unused. +ignored = ["miden-crypto"] diff --git a/crates/store/README.md b/crates/store/README.md index e3f9a8dde3..57c002fe56 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -7,6 +7,35 @@ operator must take care that the store's API endpoint is **only** exposed to the For more information on the installation and operation of this component, please see the [node's readme](/README.md). +## RocksDB Feature + +The `rocksdb` feature (enabled by default) provides disk-backed storage via RocksDB for `LargeSmt`. Building _requires_ LLVM/Clang for `bindgen`. + +### Using System Libraries + +To avoid compiling RocksDB from source and safe yourself some time, use system libraries: + +```bash +# Install system RocksDB +# (Ubuntu/Debian) +#sudo apt-get install librocksdb-dev clang llvm-dev libclang-dev +# (Fedora) +#sudo dnf install rocksdb rocksdb-devel llvm19 clang19 + +# Set environment variables to use system library +export ROCKSDB_LIB_DIR=/usr/lib +export ROCKSDB_INCLUDE_DIR=/usr/include +# export ROCKSDB_STATIC=1 (optional) +# (Ubuntu/Debian) +#export LIBCLANG_PATH=/usr/lib/llvm-14/lib +# (Fedora) +#export LIBCLANG_PATH=/usr/lib64/llvm19/lib +``` + +### Building from Source + +Without the environment variables above, `librocksdb-sys` compiles RocksDB from source, which requires a C/C++ toolchain. + ## API overview The full gRPC API can be found [here](../../proto/proto/store.proto). diff --git a/crates/store/benches/account_tree_historical.rs b/crates/store/benches/account_tree.rs similarity index 81% rename from crates/store/benches/account_tree_historical.rs rename to crates/store/benches/account_tree.rs index ba7a5c2cc3..8c3f1009ec 100644 --- a/crates/store/benches/account_tree_historical.rs +++ b/crates/store/benches/account_tree.rs @@ -1,22 +1,44 @@ use std::hint::black_box; +use std::path::Path; +use std::sync::atomic::{AtomicUsize, Ordering}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; +use miden_crypto::merkle::smt::{RocksDbConfig, RocksDbStorage}; use miden_node_store::AccountTreeWithHistory; use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::block::BlockNumber; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::crypto::hash::rpo::Rpo256; -use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage}; +use miden_protocol::crypto::merkle::smt::LargeSmt; use miden_protocol::testing::account_id::AccountIdBuilder; +/// Counter for creating unique `RocksDB` directories during benchmarking. +static DB_COUNTER: AtomicUsize = AtomicUsize::new(0); + // HELPER FUNCTIONS // ================================================================================================ -/// Creates a storage backend for a `LargeSmt`. -fn setup_storage() -> MemoryStorage { - // TODO migrate to RocksDB for persistence to gain meaningful numbers - MemoryStorage::default() +/// Returns the default base path for `RocksDB` benchmark storage. +fn default_storage_path() -> std::path::PathBuf { + std::path::PathBuf::from("target/bench_rocksdb") +} + +/// Creates a `RocksDB` storage instance for benchmarking. +/// +/// # Arguments +/// * `base_path` - Base directory for `RocksDB` storage. Each call creates a unique subdirectory. +fn setup_storage(base_path: &Path) -> RocksDbStorage { + let counter = DB_COUNTER.fetch_add(1, Ordering::SeqCst); + let db_path = base_path.join(format!("bench_rocksdb_{counter}")); + + // Clean up the directory if it exists + if db_path.exists() { + fs_err::remove_dir_all(&db_path).ok(); + } + fs_err::create_dir_all(&db_path).expect("Failed to create storage directory"); + + RocksDbStorage::open(RocksDbConfig::new(db_path)).expect("RocksDB failed to open file") } /// Generates a deterministic word from a seed. @@ -47,7 +69,8 @@ fn generate_account_id(seed: &mut [u8; 32]) -> AccountId { /// Sets up a vanilla `AccountTree` with specified number of accounts. fn setup_vanilla_account_tree( num_accounts: usize, -) -> (AccountTree>, Vec) { + base_path: &Path, +) -> (AccountTree>, Vec) { let mut seed = [0u8; 32]; let mut account_ids = Vec::new(); let mut entries = Vec::new(); @@ -59,7 +82,7 @@ fn setup_vanilla_account_tree( entries.push((account_id_to_smt_key(account_id), commitment)); } - let storage = setup_storage(); + let storage = setup_storage(base_path); let smt = LargeSmt::with_entries(storage, entries).expect("Failed to create LargeSmt from entries"); let tree = AccountTree::new(smt).expect("Failed to create AccountTree"); @@ -70,9 +93,10 @@ fn setup_vanilla_account_tree( fn setup_account_tree_with_history( num_accounts: usize, num_blocks: usize, -) -> (AccountTreeWithHistory, Vec) { + base_path: &Path, +) -> (AccountTreeWithHistory, Vec) { let mut seed = [0u8; 32]; - let storage = setup_storage(); + let storage = setup_storage(base_path); let smt = LargeSmt::with_entries(storage, std::iter::empty()) .expect("Failed to create empty LargeSmt"); let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); @@ -104,11 +128,12 @@ fn setup_account_tree_with_history( /// This provides a baseline for comparison with historical access operations. fn bench_vanilla_access(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_vanilla_access"); + let base_path = default_storage_path(); let account_counts = [1, 10, 50, 100, 500, 1000]; for &num_accounts in &account_counts { - let (tree, account_ids) = setup_vanilla_account_tree(num_accounts); + let (tree, account_ids) = setup_vanilla_account_tree(num_accounts, &base_path); group.bench_function(BenchmarkId::new("vanilla", num_accounts), |b| { let test_account = *account_ids.first().unwrap(); @@ -125,6 +150,7 @@ fn bench_vanilla_access(c: &mut Criterion) { /// This provides a baseline for comparison with history-tracking insertion. fn bench_vanilla_insertion(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_insertion"); + let base_path = default_storage_path(); let account_counts = [1, 10, 50, 100, 500]; @@ -132,7 +158,7 @@ fn bench_vanilla_insertion(c: &mut Criterion) { group.bench_function(BenchmarkId::new("vanilla", num_accounts), |b| { b.iter(|| { let mut seed = [0u8; 32]; - let storage = setup_storage(); + let storage = setup_storage(&base_path); let smt = LargeSmt::with_entries(storage, std::iter::empty()) .expect("Failed to create empty LargeSmt"); let mut tree = AccountTree::new(smt).expect("Failed to create AccountTree"); @@ -158,18 +184,19 @@ fn bench_vanilla_insertion(c: &mut Criterion) { /// Benchmarks historical access at different depths and account counts. fn bench_historical_access(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_historical_access"); + let base_path = default_storage_path(); let account_counts = [10, 100, 500, 2500]; let block_depths = [0, 5, 10, 20, 32]; for &num_accounts in &account_counts { for &block_depth in &block_depths { - if block_depth > AccountTreeWithHistory::::MAX_HISTORY { + if block_depth > AccountTreeWithHistory::::MAX_HISTORY { continue; } let (tree_hist, account_ids) = - setup_account_tree_with_history(num_accounts, block_depth + 1); + setup_account_tree_with_history(num_accounts, block_depth + 1, &base_path); let current_block = tree_hist.block_number_latest(); let target_block = current_block .checked_sub(u32::try_from(block_depth).unwrap()) @@ -197,6 +224,7 @@ fn bench_historical_access(c: &mut Criterion) { /// Benchmarks insertion performance with history tracking at different account counts. fn bench_insertion_with_history(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_insertion"); + let base_path = default_storage_path(); let account_counts = [1, 10, 50, 100, 500, 2500]; @@ -204,7 +232,7 @@ fn bench_insertion_with_history(c: &mut Criterion) { group.bench_function(BenchmarkId::new("with_history", num_accounts), |b| { b.iter(|| { let mut seed = [0u8; 32]; - let storage = setup_storage(); + let storage = setup_storage(&base_path); let smt = LargeSmt::with_entries(storage, std::iter::empty()) .expect("Failed to create empty LargeSmt"); let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index c0a37be323..7fa5be8e79 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -29,6 +29,10 @@ mod tests; /// Convenience for an in-memory-only account tree. pub type InMemoryAccountTree = AccountTree>; +#[cfg(feature = "rocksdb")] +/// Convenience for a persistent account tree. +pub type PersistentAccountTree = AccountTree>; + // HISTORICAL ERROR TYPES // ================================================================================================ diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 5a9dc5ee2f..1d345dcf01 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -7,6 +7,8 @@ mod inner_forest; mod server; pub mod state; +#[cfg(feature = "rocksdb")] +pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; pub use server::{DataDirectory, Store}; diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index d9594a87c4..2ff1f887b6 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -31,18 +31,19 @@ use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_i use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; -use miden_protocol::crypto::merkle::smt::{ - LargeSmt, - LargeSmtError, - MemoryStorage, - SmtProof, - SmtStorage, -}; +#[cfg(not(feature = "rocksdb"))] +use miden_protocol::crypto::merkle::smt::MemoryStorage; +use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtProof, SmtStorage}; use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; use miden_protocol::transaction::{OutputNote, PartialBlockchain}; use miden_protocol::utils::Serializable; use tokio::sync::{Mutex, RwLock, oneshot}; use tracing::{info, info_span, instrument}; +#[cfg(feature = "rocksdb")] +use { + miden_crypto::merkle::smt::RocksDbStorage, + miden_protocol::crypto::merkle::smt::RocksDbConfig, +}; use crate::accounts::{AccountTreeWithHistory, HistoricalError}; use crate::blocks::BlockStore; @@ -82,8 +83,137 @@ pub struct TransactionInputs { pub new_account_id_prefix_is_unique: Option, } +/// The storage backend for trees. +#[cfg(feature = "rocksdb")] +pub type TreeStorage = RocksDbStorage; +#[cfg(not(feature = "rocksdb"))] +pub type TreeStorage = MemoryStorage; + +/// Converts a `LargeSmtError` into a `StateInitializationError`. +fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInitializationError { + match e { + LargeSmtError::Merkle(merkle_error) => { + StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) + }, + LargeSmtError::Storage(err) => { + StateInitializationError::AccountTreeIoError(err.as_report()) + }, + } +} + +/// Loads an SMT from persistent storage. +#[cfg(feature = "rocksdb")] +fn load_smt(storage: S) -> Result, StateInitializationError> { + LargeSmt::new(storage).map_err(account_tree_large_smt_error_to_init_error) +} + +/// Trait for loading trees from storage. +/// +/// For `MemoryStorage`, the tree is rebuilt from database entries on each startup. +/// For `RocksDbStorage`, the tree is loaded directly from disk (much faster for large trees). +// TODO handle on disk rocksdb storage file being missing and/or corrupted. +trait StorageLoader: SmtStorage + Sized { + /// Creates a storage backend for the given domain. + fn create(data_dir: &Path, domain: &'static str) -> Result; + + /// Loads an account tree, either from persistent storage or by rebuilding from DB. + fn load_account_tree( + self, + db: &mut Db, + ) -> impl std::future::Future, StateInitializationError>> + Send; + + /// Loads a nullifier tree, either from persistent storage or by rebuilding from DB. + fn load_nullifier_tree( + self, + db: &mut Db, + ) -> impl std::future::Future< + Output = Result>, StateInitializationError>, + > + Send; +} + +#[cfg(not(feature = "rocksdb"))] +impl StorageLoader for MemoryStorage { + fn create(_data_dir: &Path, _domain: &'static str) -> Result { + Ok(MemoryStorage::default()) + } + + async fn load_account_tree( + self, + db: &mut Db, + ) -> Result, StateInitializationError> { + let account_data = db.select_all_account_commitments().await?; + let smt_entries = account_data + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error) + } + + async fn load_nullifier_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + let nullifiers = db.select_all_nullifiers().await?; + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + NullifierTree::with_storage_from_entries(self, entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) + } +} + +#[cfg(feature = "rocksdb")] +impl StorageLoader for RocksDbStorage { + fn create(data_dir: &Path, domain: &'static str) -> Result { + let storage_path = data_dir.join(domain); + fs_err::create_dir_all(&storage_path) + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; + RocksDbStorage::open(RocksDbConfig::new(storage_path)) + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) + } + + async fn load_account_tree( + self, + db: &mut Db, + ) -> Result, StateInitializationError> { + // If RocksDB storage has data, load from it directly + let has_data = self + .has_leaves() + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; + if has_data { + return load_smt(self); + } + + info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); + let account_data = db.select_all_account_commitments().await?; + let smt_entries = account_data + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error) + } + + async fn load_nullifier_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + // If RocksDB storage has data, load from it directly + let has_data = self + .has_leaves() + .map_err(|e| StateInitializationError::NullifierTreeIoError(e.to_string()))?; + if has_data { + let smt = load_smt(self)?; + return Ok(NullifierTree::new_unchecked(smt)); + } + + info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); + let nullifiers = db.select_all_nullifiers().await?; + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + NullifierTree::with_storage_from_entries(self, entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) + } +} + /// Container for state that needs to be updated atomically. -struct InnerState +struct InnerState where S: SmtStorage, { @@ -92,10 +222,7 @@ where account_tree: AccountTreeWithHistory, } -impl InnerState -where - S: SmtStorage, -{ +impl InnerState { /// Returns the latest block number. fn latest_block_num(&self) -> BlockNumber { self.blockchain @@ -119,7 +246,7 @@ pub struct State { /// Read-write lock used to prevent writing to a structure while it is being used. /// /// The lock is writer-preferring, meaning the writer won't be starved. - inner: RwLock, + inner: RwLock>, /// Forest-related state `(SmtForest, storage_map_roots, vault_roots)` with its own lock. forest: RwLock, @@ -133,7 +260,7 @@ impl State { // CONSTRUCTOR // -------------------------------------------------------------------------------------------- - /// Loads the state from the `db`. + /// Loads the state from the data directory. #[instrument(target = COMPONENT, skip_all)] pub async fn load(data_path: &Path) -> Result { let data_directory = DataDirectory::load(data_path.to_path_buf()) @@ -151,8 +278,16 @@ impl State { let blockchain = load_mmr(&mut db).await?; let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); - let account_tree = load_account_tree(&mut db, latest_block_num).await?; - let nullifier_tree = load_nullifier_tree(&mut db).await?; + + let account_storage = TreeStorage::create(data_path, "accounttree")?; + let smt = account_storage.load_account_tree(&mut db).await?; + let account_tree = + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree)?; + let account_tree = AccountTreeWithHistory::new(account_tree, latest_block_num); + + let nullifier_storage = TreeStorage::create(data_path, "nullifiertree")?; + let nullifier_tree = nullifier_storage.load_nullifier_tree(&mut db).await?; + let forest = load_smt_forest(&mut db, latest_block_num).await?; let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); @@ -1208,49 +1343,6 @@ async fn load_mmr(db: &mut Db) -> Result { Ok(chain_mmr) } -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_nullifier_tree( - db: &mut Db, -) -> Result>, StateInitializationError> { - let nullifiers = db.select_all_nullifiers().await?; - - // Convert nullifier data to entries for NullifierTree - // The nullifier value format is: block_num - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - - NullifierTree::with_storage_from_entries(MemoryStorage::default(), entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) -} - -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_account_tree( - db: &mut Db, - block_number: BlockNumber, -) -> Result, StateInitializationError> { - let account_data = Vec::from_iter(db.select_all_account_commitments().await?); - - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - - let smt = - LargeSmt::with_entries(MemoryStorage::default(), smt_entries).map_err(|e| match e { - LargeSmtError::Merkle(merkle_error) => { - StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) - }, - LargeSmtError::Storage(err) => { - // large_smt::StorageError is not `Sync` and hence `context` cannot be called - // which we want to and do - StateInitializationError::AccountTreeIoError(err.as_report()) - }, - })?; - - let account_tree = - AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree)?; - - Ok(AccountTreeWithHistory::new(account_tree, block_number)) -} - /// Loads SMT forest with storage map and vault Merkle paths for all public accounts. #[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] async fn load_smt_forest( From dc920c709f658c8b54ef7e9f2196165fe6990b13 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 16 Jan 2026 18:45:00 +0200 Subject: [PATCH 091/125] ci: spell check does not need a massive runner (#1524) Co-authored-by: Bernhard Schuster --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d2dea8a72d..37bb1d1f21 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -22,7 +22,7 @@ env: jobs: typos: - runs-on: Linux-ARM64-Runner + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - uses: actions/checkout@v4 From 0cefaf6de54d82cd2c6bbecc123123249b6f2848 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Fri, 16 Jan 2026 18:05:24 +0100 Subject: [PATCH 092/125] store: limit number of requested storage map keys / smt proofs returned (#1517) --- CHANGELOG.md | 1 + crates/proto/src/domain/account.rs | 8 ++++++- crates/store/src/inner_forest/mod.rs | 5 +++- crates/store/src/inner_forest/tests.rs | 33 ++++++++++++++++++++++++++ 4 files changed, 45 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbd5407e71..fee80111cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ - Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). +- Limit number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). - Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 5ddc790fc7..269737c888 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -435,6 +435,12 @@ impl AccountStorageMapDetails { /// Maximum number of storage map entries that can be returned in a single response. pub const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of SMT proofs that can be returned in a single response. + /// + /// This limit is more restrictive than [`Self::MAX_RETURN_ENTRIES`] because SMT proofs + /// are larger (up to 64 inner nodes each) and more CPU-intensive to generate. + pub const MAX_SMT_PROOF_ENTRIES: usize = 16; + /// Creates storage map details with all entries from the storage map. /// /// If the storage map has too many entries (> `MAX_RETURN_ENTRIES`), @@ -476,7 +482,7 @@ impl AccountStorageMapDetails { /// Use this when the caller has already obtained the proofs from an `SmtForest`. /// Returns `LimitExceeded` if too many proofs are provided. pub fn from_proofs(slot_name: StorageSlotName, proofs: Vec) -> Self { - if proofs.len() > Self::MAX_RETURN_ENTRIES { + if proofs.len() > Self::MAX_SMT_PROOF_ENTRIES { Self { slot_name, entries: StorageMapEntries::LimitExceeded, diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 7a43e40f98..a5d47ac515 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -147,6 +147,9 @@ impl InnerForest { /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. + /// + /// If the number of requested keys exceeds [`AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES`], + /// returns `LimitExceeded`. pub(crate) fn open_storage_map( &self, account_id: AccountId, @@ -161,7 +164,7 @@ impl InnerForest { return None; } - if keys.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + if keys.len() > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { return Some(Ok(AccountStorageMapDetails { slot_name, entries: StorageMapEntries::LimitExceeded, diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 216ef42061..da311db463 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -428,3 +428,36 @@ fn test_storage_map_incremental_updates() { assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } + +#[test] +fn test_open_storage_map_returns_limit_exceeded_for_too_many_keys() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(3); + let block_num = BlockNumber::GENESIS.child(); + + // Create a storage map with a few entries + let mut map_delta = StorageMapDelta::default(); + for i in 0..20u32 { + let key = Word::from([i, 0, 0, 0]); + let value = Word::from([0, 0, 0, i]); + map_delta.insert(key, value); + } + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + // Request proofs for more than MAX_SMT_PROOF_ENTRIES (16) keys. + // Should return LimitExceeded. + let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); + let result = forest.open_storage_map(account_id, slot_name.clone(), block_num, &keys); + + let details = result.expect("Should return Some").expect("Should not error"); + assert_matches!(details.entries, StorageMapEntries::LimitExceeded); +} From a64cc317c862698d87dc615e36173718180df013 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 19 Jan 2026 10:44:24 +0100 Subject: [PATCH 093/125] feat/makefile: allow container runtime override with env `CONTAINER_RUNTIME` (#1525) --- Makefile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 5522c2d63b..72bdbce492 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ help: WARNINGS=RUSTDOCFLAGS="-D warnings" BUILD_PROTO=BUILD_PROTO=1 +CONTAINER_RUNTIME ?= docker # -- linting -------------------------------------------------------------------------------------- @@ -114,20 +115,20 @@ install-network-monitor: ## Installs network monitor binary # --- docker -------------------------------------------------------------------------------------- .PHONY: docker-build-node -docker-build-node: ## Builds the Miden node using Docker +docker-build-node: ## Builds the Miden node using Docker (override with CONTAINER_RUNTIME=podman) @CREATED=$$(date) && \ VERSION=$$(cat bin/node/Cargo.toml | grep -m 1 '^version' | cut -d '"' -f 2) && \ COMMIT=$$(git rev-parse HEAD) && \ - docker build --build-arg CREATED="$$CREATED" \ + $(CONTAINER_RUNTIME) build --build-arg CREATED="$$CREATED" \ --build-arg VERSION="$$VERSION" \ --build-arg COMMIT="$$COMMIT" \ -f bin/node/Dockerfile \ -t miden-node-image . .PHONY: docker-run-node -docker-run-node: ## Runs the Miden node as a Docker container - docker volume create miden-db - docker run --name miden-node \ +docker-run-node: ## Runs the Miden node as a Docker container (override with CONTAINER_RUNTIME=podman) + $(CONTAINER_RUNTIME) volume create miden-db + $(CONTAINER_RUNTIME) run --name miden-node \ -p 57291:57291 \ -v miden-db:/db \ -d miden-node-image From b0e453451221c3369b5c98bda11ca0969d653235 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 19 Jan 2026 14:39:35 +0200 Subject: [PATCH 094/125] Port network note must always fail fix --- crates/ntx-builder/src/actor/mod.rs | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 9a595a9d35..602dde11b2 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -7,7 +7,6 @@ use std::sync::Arc; use std::time::Duration; use account_state::{NetworkAccountState, TransactionCandidate}; -use execute::NtxError; use futures::FutureExt; use miden_node_proto::clients::{Builder, ValidatorClient}; use miden_node_proto::domain::account::NetworkAccountPrefix; @@ -296,6 +295,7 @@ impl AccountActor { self.script_cache.clone(), ); + let notes = tx_candidate.notes.clone(); let execution_result = context.execute_transaction(tx_candidate).await; match execution_result { // Execution completed without failed notes. @@ -311,21 +311,10 @@ impl AccountActor { // Transaction execution failed. Err(err) => { tracing::error!(err = err.as_report(), "network transaction failed"); - match err { - NtxError::AllNotesFailed(failed) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(notes.as_slice(), block_num); - self.mode = ActorMode::NoViableNotes; - }, - NtxError::InputNotes(_) - | NtxError::NoteFilter(_) - | NtxError::Execution(_) - | NtxError::Proving(_) - | NtxError::Submission(_) - | NtxError::Panic(_) => { - self.mode = ActorMode::NoViableNotes; - }, - } + self.mode = ActorMode::NoViableNotes; + let notes = + notes.into_iter().map(|note| note.into_inner().into()).collect::>(); + state.notes_failed(notes.as_slice(), block_num); }, } } From eb1d9c0656b1670448b2bfbad0c05f07dc4f3d42 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 19 Jan 2026 14:41:30 +0200 Subject: [PATCH 095/125] Port note limit fix --- crates/ntx-builder/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index b0d89f94c8..62088ce6cc 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -14,7 +14,8 @@ pub use builder::NetworkTransactionBuilder; const COMPONENT: &str = "miden-ntx-builder"; /// Maximum number of network notes a network transaction is allowed to consume. -const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(50).unwrap(); +const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).unwrap(); +const _: () = assert!(MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); /// Maximum number of network transactions which should be in progress concurrently. /// From 66f7410c5b71fab78b08f4599f242932cb95c45f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 19 Jan 2026 15:31:44 +0100 Subject: [PATCH 096/125] feat: filter excessive numbers of storage map key queries earlier (#1530) --- crates/proto/src/domain/account.rs | 6 +++++- crates/rpc/src/server/api.rs | 17 +++++++++++++++++ crates/utils/src/limiter.rs | 10 ++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 269737c888..ac66808933 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -1,6 +1,7 @@ use std::fmt::{Debug, Display, Formatter}; use miden_node_utils::formatting::format_opt; +use miden_node_utils::limiter::{QueryParamLimiter, QueryParamStorageMapKeyLimit}; use miden_protocol::Word; use miden_protocol::account::{ Account, @@ -439,7 +440,10 @@ impl AccountStorageMapDetails { /// /// This limit is more restrictive than [`Self::MAX_RETURN_ENTRIES`] because SMT proofs /// are larger (up to 64 inner nodes each) and more CPU-intensive to generate. - pub const MAX_SMT_PROOF_ENTRIES: usize = 16; + /// + /// This is defined by [`QueryParamStorageMapKeyLimit::LIMIT`] and used both in RPC validation + /// and store-level enforcement to ensure consistent limits. + pub const MAX_SMT_PROOF_ENTRIES: usize = QueryParamStorageMapKeyLimit::LIMIT; /// Creates storage map details with all entries from the storage map. /// diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index d1c922d886..0d12797354 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -15,6 +15,7 @@ use miden_node_utils::limiter::{ QueryParamNoteIdLimit, QueryParamNoteTagLimit, QueryParamNullifierLimit, + QueryParamStorageMapKeyLimit, }; use miden_protocol::batch::ProvenBatch; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -486,10 +487,21 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData::MapKeys as ProtoMapKeys; + let request = request.into_inner(); debug!(target: COMPONENT, ?request); + // Validate storage map key limits before forwarding to store + if let Some(details) = &request.details { + for storage_map in &details.storage_maps { + if let Some(ProtoMapKeys(keys)) = &storage_map.slot_data { + check::(keys.map_keys.len())?; + } + } + } + self.store.clone().get_account(request).await } @@ -617,6 +629,7 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { QueryParamNoteIdLimit as NoteId, QueryParamNoteTagLimit as NoteTag, QueryParamNullifierLimit as Nullifier, + QueryParamStorageMapKeyLimit as StorageMapKey, }; proto::rpc::RpcLimits { @@ -638,6 +651,10 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { ), ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), + ( + "GetAccount".into(), + endpoint_limits(&[(StorageMapKey::PARAM_NAME, StorageMapKey::LIMIT)]), + ), ]), } }); diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 03b8aeb54f..391a2d5389 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -120,3 +120,13 @@ impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; const LIMIT: usize = GENERAL_REQUEST_LIMIT; } + +/// Used for the following RPC endpoints +/// * `get_account` +/// +/// Capped at 16 storage map keys per slot to limit the number of SMT proofs returned. +pub struct QueryParamStorageMapKeyLimit; +impl QueryParamLimiter for QueryParamStorageMapKeyLimit { + const PARAM_NAME: &str = "storage_map_key"; + const LIMIT: usize = 16; +} From a2de1546a965ce36fd652d648a8352d067d370c9 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 19 Jan 2026 16:11:40 +0100 Subject: [PATCH 097/125] be more conservative with shutdown timeouts, give some time post shutdown for all locks to be released (#1533) --- crates/rpc/src/tests.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index e88ee40968..b35fe8b6dc 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -458,9 +458,11 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { /// Shuts down the store runtime properly to allow `RocksDB` to flush before the temp directory is /// deleted. async fn shutdown_store(store_runtime: Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_secs(3))) .await .expect("shutdown should complete"); + // Give RocksDB time to release its lock file after the runtime shutdown + tokio::time::sleep(Duration::from_millis(200)).await; } /// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. From 3b76a41eeec6bfb5977b4681a1167fb8fa088709 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 20 Jan 2026 11:26:45 +0200 Subject: [PATCH 098/125] Fix diesel version regression --- crates/store/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 496a860988..dd06567ea5 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -19,8 +19,8 @@ anyhow = { workspace = true } deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } deadpool-diesel = { features = ["sqlite"], version = "0.6" } deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.2" } -diesel_migrations = { features = ["sqlite"], version = "2.2" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } From 2cc9742dcdedb2a6f5a5cf572be42f1836f77542 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 20 Jan 2026 06:42:12 -0300 Subject: [PATCH 099/125] fix(ntx): notify actor about account deltas (#1547) --- crates/ntx-builder/src/actor/account_state.rs | 2 +- crates/ntx-builder/src/coordinator.rs | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index cff9334db8..bfc5a41b71 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -214,10 +214,10 @@ impl NetworkAccountState { match update { NetworkAccountEffect::Updated(account_delta) => { self.account.add_delta(&account_delta); + tx_impact.account_delta = Some(account_prefix); }, NetworkAccountEffect::Created(_) => {}, } - tx_impact.account_delta = Some(account_prefix); } } for note in network_notes { diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 3806b7c8f0..7b5a588a9c 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -6,6 +6,7 @@ use indexmap::IndexMap; use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::domain::note::NetworkNote; +use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::transaction::TransactionId; use tokio::sync::mpsc::error::SendError; use tokio::sync::{Semaphore, mpsc}; @@ -229,7 +230,22 @@ impl Coordinator { event: &Arc, ) -> Result<(), SendError>> { let mut target_actors = HashMap::new(); - if let MempoolEvent::TransactionAdded { id, network_notes, .. } = event.as_ref() { + if let MempoolEvent::TransactionAdded { id, network_notes, account_delta, .. } = + event.as_ref() + { + // We need to inform the account if it was updated. This lets it know that its own + // transaction has been applied, and in the future also resolves race conditions with + // external network transactions (once these are allowed). + if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { + let account_id = delta.id(); + if account_id.is_network() { + let prefix = account_id.try_into().expect("account is network account"); + if let Some(actor) = self.actor_registry.get(&prefix) { + target_actors.insert(prefix, actor); + } + } + } + // Determine target actors for each note. for note in network_notes { if let NetworkNote::SingleTarget(note) = note { From 0cec12f004adc802cad9059d17169c80b4e7ee10 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 20 Jan 2026 14:53:20 +0100 Subject: [PATCH 100/125] fix/query: `GetAccount` should limit on totals, not per keyset (#1551) --- crates/proto/src/domain/account.rs | 49 +++++++++++++++++++++++--- crates/rpc/src/server/api.rs | 28 +++++++++------ crates/store/src/inner_forest/tests.rs | 9 ++--- crates/store/src/state.rs | 19 ++++++++++ crates/utils/src/limiter.rs | 9 ++--- 5 files changed, 92 insertions(+), 22 deletions(-) diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index ac66808933..57e9a4de84 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -1,7 +1,7 @@ use std::fmt::{Debug, Display, Formatter}; use miden_node_utils::formatting::format_opt; -use miden_node_utils::limiter::{QueryParamLimiter, QueryParamStorageMapKeyLimit}; +use miden_node_utils::limiter::{QueryParamLimiter, QueryParamStorageMapKeyTotalLimit}; use miden_protocol::Word; use miden_protocol::account::{ Account, @@ -441,9 +441,9 @@ impl AccountStorageMapDetails { /// This limit is more restrictive than [`Self::MAX_RETURN_ENTRIES`] because SMT proofs /// are larger (up to 64 inner nodes each) and more CPU-intensive to generate. /// - /// This is defined by [`QueryParamStorageMapKeyLimit::LIMIT`] and used both in RPC validation - /// and store-level enforcement to ensure consistent limits. - pub const MAX_SMT_PROOF_ENTRIES: usize = QueryParamStorageMapKeyLimit::LIMIT; + /// This is defined by [`QueryParamStorageMapKeyTotalLimit::LIMIT`] and used both in RPC + /// validation and store-level enforcement to ensure consistent limits. + pub const MAX_SMT_PROOF_ENTRIES: usize = QueryParamStorageMapKeyTotalLimit::LIMIT; /// Creates storage map details with all entries from the storage map. /// @@ -498,6 +498,14 @@ impl AccountStorageMapDetails { } } } + + /// Creates storage map details indicating the limit was exceeded. + pub fn limit_exceeded(slot_name: StorageSlotName) -> Self { + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } + } } impl TryFrom @@ -638,6 +646,21 @@ pub struct AccountStorageDetails { pub map_details: Vec, } +impl AccountStorageDetails { + /// Creates storage details where all map slots indicate limit exceeded. + pub fn all_limits_exceeded( + header: AccountStorageHeader, + slot_names: impl IntoIterator, + ) -> Self { + Self { + header, + map_details: Vec::from_iter( + slot_names.into_iter().map(AccountStorageMapDetails::limit_exceeded), + ), + } + } +} + impl TryFrom for AccountStorageDetails { type Error = ConversionError; @@ -733,6 +756,24 @@ pub struct AccountDetails { pub storage_details: AccountStorageDetails, } +impl AccountDetails { + /// Creates account details where all storage map slots indicate limit exceeded. + pub fn with_storage_limits_exceeded( + account_header: AccountHeader, + account_code: Option>, + vault_details: AccountVaultDetails, + storage_header: AccountStorageHeader, + slot_names: impl IntoIterator, + ) -> Self { + Self { + account_header, + account_code, + vault_details, + storage_details: AccountStorageDetails::all_limits_exceeded(storage_header, slot_names), + } + } +} + impl TryFrom for AccountDetails { type Error = ConversionError; diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 0d12797354..42889ef633 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -15,7 +15,7 @@ use miden_node_utils::limiter::{ QueryParamNoteIdLimit, QueryParamNoteTagLimit, QueryParamNullifierLimit, - QueryParamStorageMapKeyLimit, + QueryParamStorageMapKeyTotalLimit, }; use miden_protocol::batch::ProvenBatch; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -487,19 +487,27 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { - use proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData::MapKeys as ProtoMapKeys; + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ + SlotData::MapKeys as ProtoMapKeys, + SlotData::AllEntries as ProtoMapAllEntries + }; let request = request.into_inner(); debug!(target: COMPONENT, ?request); - // Validate storage map key limits before forwarding to store + // Validate total storage map key limit before forwarding to store if let Some(details) = &request.details { - for storage_map in &details.storage_maps { - if let Some(ProtoMapKeys(keys)) = &storage_map.slot_data { - check::(keys.map_keys.len())?; - } - } + let total_keys: usize = details + .storage_maps + .iter() + .filter_map(|m| m.slot_data.as_ref()) + .filter_map(|d| match d { + ProtoMapKeys(keys) => Some(keys.map_keys.len()), + ProtoMapAllEntries(_) => None, + }) + .sum(); + check::(total_keys)?; } self.store.clone().get_account(request).await @@ -629,7 +637,7 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { QueryParamNoteIdLimit as NoteId, QueryParamNoteTagLimit as NoteTag, QueryParamNullifierLimit as Nullifier, - QueryParamStorageMapKeyLimit as StorageMapKey, + QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal, }; proto::rpc::RpcLimits { @@ -653,7 +661,7 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), ( "GetAccount".into(), - endpoint_limits(&[(StorageMapKey::PARAM_NAME, StorageMapKey::LIMIT)]), + endpoint_limits(&[(StorageMapKeyTotal::PARAM_NAME, StorageMapKeyTotal::LIMIT)]), ), ]), } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index da311db463..fd60f535c4 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -441,9 +441,10 @@ fn test_open_storage_map_returns_limit_exceeded_for_too_many_keys() { let slot_name = StorageSlotName::mock(3); let block_num = BlockNumber::GENESIS.child(); - // Create a storage map with a few entries + // Create a storage map with entries + let num_entries = AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES + 5; let mut map_delta = StorageMapDelta::default(); - for i in 0..20u32 { + for i in 0..num_entries as u32 { let key = Word::from([i, 0, 0, 0]); let value = Word::from([0, 0, 0, i]); map_delta.insert(key, value); @@ -453,9 +454,9 @@ fn test_open_storage_map_returns_limit_exceeded_for_too_many_keys() { let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); forest.update_account(block_num, &delta).unwrap(); - // Request proofs for more than MAX_SMT_PROOF_ENTRIES (16) keys. + // Request proofs for more than MAX_SMT_PROOF_ENTRIES keys. // Should return LimitExceeded. - let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); + let keys: Vec = (0..num_entries as u32).map(|i| Word::from([i, 0, 0, 0])).collect(); let result = forest.open_storage_map(account_id, slot_name.clone(), block_num, &keys); let details = result.expect("Should return Some").expect("Should not error"); diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 1b19649aa5..34aebfffb9 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -1205,6 +1205,25 @@ impl State { None => AccountVaultDetails::empty(), }; + // Check total keys limit upfront before expensive open operations + let total_keys: usize = storage_requests + .iter() + .filter_map(|req| match &req.slot_data { + SlotData::MapKeys(keys) => Some(keys.len()), + SlotData::All => None, + }) + .sum(); + + if total_keys > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { + return Ok(AccountDetails::with_storage_limits_exceeded( + account_header, + account_code, + vault_details, + storage_header, + storage_requests.into_iter().map(|req| req.slot_name), + )); + } + let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 391a2d5389..1adf5be411 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -124,9 +124,10 @@ impl QueryParamLimiter for QueryParamBlockLimit { /// Used for the following RPC endpoints /// * `get_account` /// -/// Capped at 16 storage map keys per slot to limit the number of SMT proofs returned. -pub struct QueryParamStorageMapKeyLimit; -impl QueryParamLimiter for QueryParamStorageMapKeyLimit { +/// Capped at 64 total storage map keys across all slots to limit the number of SMT proofs +/// returned. +pub struct QueryParamStorageMapKeyTotalLimit; +impl QueryParamLimiter for QueryParamStorageMapKeyTotalLimit { const PARAM_NAME: &str = "storage_map_key"; - const LIMIT: usize = 16; + const LIMIT: usize = 64; } From 0fa7208f3e87c5e0e73f7f6426d018afa48bf240 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 20 Jan 2026 15:09:20 +0100 Subject: [PATCH 101/125] feat: add a sanity check for sync state of rocksdb vs sqlite, minor code re-org (#1532) --- CHANGELOG.md | 1 + crates/block-producer/src/server/tests.rs | 2 + crates/store/src/db/mod.rs | 3 +- crates/store/src/errors.rs | 15 + crates/store/src/server/mod.rs | 3 +- crates/store/src/state/loader.rs | 295 ++++++++++++++++++++ crates/store/src/{state.rs => state/mod.rs} | 224 ++------------- 7 files changed, 341 insertions(+), 202 deletions(-) create mode 100644 crates/store/src/state/loader.rs rename crates/store/src/{state.rs => state/mod.rs} (86%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0328be9514..ff897a298c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). - Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). ### Changes diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 91de51ddcf..453512597b 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -94,6 +94,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { ntx_builder_listener, block_producer_listener, data_directory: dir, + grpc_timeout: std::time::Duration::from_secs(30), } .serve() .await @@ -159,6 +160,7 @@ async fn restart_store( ntx_builder_listener, block_producer_listener, data_directory: dir, + grpc_timeout: std::time::Duration::from_secs(30), } .serve() .await diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 4bffb0af76..fc96212b56 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -3,8 +3,7 @@ use std::ops::RangeInclusive; use std::path::PathBuf; use anyhow::Context; -use diesel::prelude::QueryableByName; -use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index d0eb2142a8..32c345a985 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -206,6 +206,21 @@ pub enum StateInitializationError { DatabaseLoadError(#[from] DatabaseSetupError), #[error("inner forest error")] InnerForestError(#[from] InnerForestError), + #[error( + "{tree_name} SMT root ({tree_root:?}) does not match expected root from block {block_num} \ + ({block_root:?}). Delete the tree storage directories and restart the node to rebuild \ + from the database." + )] + TreeStorageDiverged { + tree_name: &'static str, + block_num: BlockNumber, + tree_root: Word, + block_root: Word, + }, + #[error("public account {0} is missing details in database")] + PublicAccountMissingDetails(AccountId), + #[error("failed to convert account to delta: {0}")] + AccountToDeltaConversionFailed(String), } #[derive(Debug, Error)] diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index f38f737fc9..420ec4921b 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -88,7 +88,8 @@ impl Store { let ntx_builder_address = self.ntx_builder_listener.local_addr()?; let block_producer_address = self.block_producer_listener.local_addr()?; info!(target: COMPONENT, rpc_endpoint=?rpc_address, ntx_builder_endpoint=?ntx_builder_address, - block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_timeout, "Loading database"); + block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_timeout, + "Loading database"); let state = Arc::new(State::load(&self.data_directory).await.context("failed to load state")?); diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs new file mode 100644 index 0000000000..4aa8e2590b --- /dev/null +++ b/crates/store/src/state/loader.rs @@ -0,0 +1,295 @@ +//! Tree loading logic for the store state. +//! +//! This module handles loading and initializing the Merkle trees (account tree, nullifier tree, +//! and SMT forest) from storage backends. It supports different loading modes: +//! +//! - **Memory mode** (`rocksdb` feature disabled): Trees are rebuilt from the database on each +//! startup. +//! - **Persistent mode** (`rocksdb` feature enabled): Trees are loaded from persistent storage if +//! data exists, otherwise rebuilt from the database and persisted. + +use std::path::Path; + +use miden_protocol::Word; +use miden_protocol::block::account_tree::account_id_to_smt_key; +use miden_protocol::block::nullifier_tree::NullifierTree; +use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; +#[cfg(not(feature = "rocksdb"))] +use miden_protocol::crypto::merkle::smt::MemoryStorage; +use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; +#[cfg(feature = "rocksdb")] +use tracing::info; +use tracing::instrument; +#[cfg(feature = "rocksdb")] +use { + miden_crypto::merkle::smt::RocksDbStorage, + miden_protocol::crypto::merkle::smt::RocksDbConfig, +}; + +use crate::COMPONENT; +use crate::db::Db; +use crate::errors::{DatabaseError, StateInitializationError}; +use crate::inner_forest::InnerForest; + +// CONSTANTS +// ================================================================================================ + +/// Directory name for the account tree storage within the data directory. +pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; + +/// Directory name for the nullifier tree storage within the data directory. +pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; + +// STORAGE TYPE ALIAS +// ================================================================================================ + +/// The storage backend for trees. +#[cfg(feature = "rocksdb")] +pub type TreeStorage = RocksDbStorage; +#[cfg(not(feature = "rocksdb"))] +pub type TreeStorage = MemoryStorage; + +// ERROR CONVERSION +// ================================================================================================ + +/// Converts a `LargeSmtError` into a `StateInitializationError`. +pub fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInitializationError { + use miden_node_utils::ErrorReport; + match e { + LargeSmtError::Merkle(merkle_error) => { + StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) + }, + LargeSmtError::Storage(err) => { + StateInitializationError::AccountTreeIoError(err.as_report()) + }, + } +} + +// STORAGE LOADER TRAIT +// ================================================================================================ + +/// Trait for loading trees from storage. +/// +/// For `MemoryStorage`, the tree is rebuilt from database entries on each startup. +/// For `RocksDbStorage`, the tree is loaded directly from disk (much faster for large trees). +/// +/// Missing or corrupted storage is handled by the `verify_tree_consistency` check after loading, +/// which detects divergence between persistent storage and the database. If divergence is detected, +/// the user should manually delete the tree storage directories and restart the node. +pub trait StorageLoader: SmtStorage + Sized { + /// Creates a storage backend for the given domain. + fn create(data_dir: &Path, domain: &'static str) -> Result; + + /// Loads an account tree, either from persistent storage or by rebuilding from DB. + fn load_account_tree( + self, + db: &mut Db, + ) -> impl std::future::Future, StateInitializationError>> + Send; + + /// Loads a nullifier tree, either from persistent storage or by rebuilding from DB. + fn load_nullifier_tree( + self, + db: &mut Db, + ) -> impl std::future::Future< + Output = Result>, StateInitializationError>, + > + Send; +} + +// MEMORY STORAGE IMPLEMENTATION +// ================================================================================================ + +#[cfg(not(feature = "rocksdb"))] +impl StorageLoader for MemoryStorage { + fn create(_data_dir: &Path, _domain: &'static str) -> Result { + Ok(MemoryStorage::default()) + } + + async fn load_account_tree( + self, + db: &mut Db, + ) -> Result, StateInitializationError> { + let account_data = db.select_all_account_commitments().await?; + let smt_entries = account_data + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error) + } + + async fn load_nullifier_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + let nullifiers = db.select_all_nullifiers().await?; + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + NullifierTree::with_storage_from_entries(self, entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) + } +} + +// ROCKSDB STORAGE IMPLEMENTATION +// ================================================================================================ + +#[cfg(feature = "rocksdb")] +impl StorageLoader for RocksDbStorage { + fn create(data_dir: &Path, domain: &'static str) -> Result { + let storage_path = data_dir.join(domain); + + fs_err::create_dir_all(&storage_path) + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; + RocksDbStorage::open(RocksDbConfig::new(storage_path)) + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) + } + + async fn load_account_tree( + self, + db: &mut Db, + ) -> Result, StateInitializationError> { + // If RocksDB storage has data, load from it directly + let has_data = self + .has_leaves() + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; + if has_data { + return load_smt(self); + } + + info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); + let account_data = db.select_all_account_commitments().await?; + let smt_entries = account_data + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error) + } + + async fn load_nullifier_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + // If RocksDB storage has data, load from it directly + let has_data = self + .has_leaves() + .map_err(|e| StateInitializationError::NullifierTreeIoError(e.to_string()))?; + if has_data { + let smt = load_smt(self)?; + return Ok(NullifierTree::new_unchecked(smt)); + } + + info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); + let nullifiers = db.select_all_nullifiers().await?; + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + NullifierTree::with_storage_from_entries(self, entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) + } +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Loads an SMT from persistent storage. +#[cfg(feature = "rocksdb")] +pub fn load_smt(storage: S) -> Result, StateInitializationError> { + LargeSmt::new(storage).map_err(account_tree_large_smt_error_to_init_error) +} + +// TREE LOADING FUNCTIONS +// ================================================================================================ + +/// Loads the blockchain MMR from all block headers in the database. +#[instrument(target = COMPONENT, skip_all)] +pub async fn load_mmr(db: &mut Db) -> Result { + let block_commitments: Vec = db + .select_all_block_headers() + .await? + .iter() + .map(BlockHeader::commitment) + .collect(); + + // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX + // entries. + let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + + Ok(chain_mmr) +} + +/// Loads SMT forest with storage map and vault Merkle paths for all public accounts. +#[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] +pub async fn load_smt_forest( + db: &mut Db, + block_num: BlockNumber, +) -> Result { + use miden_protocol::account::delta::AccountDelta; + + let public_account_ids = db.select_all_public_account_ids().await?; + + // Acquire write lock once for the entire initialization + let mut forest = InnerForest::new(); + + // Process each account + for account_id in public_account_ids { + // Get the full account from the database + let account_info = db.select_account(account_id).await?; + let account = account_info.details.expect("public accounts always have details in DB"); + + // Convert the full account to a full-state delta + let delta = + AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + + // Use the unified update method (will recognize it's a full-state delta) + forest.update_account(block_num, &delta)?; + } + + Ok(forest) +} + +// CONSISTENCY VERIFICATION +// ================================================================================================ + +/// Verifies that tree roots match the expected roots from the latest block header. +/// +/// This check ensures the database and tree storage (memory or persistent) haven't diverged due to +/// corruption or incomplete shutdown. When trees are rebuilt from the database, they will naturally +/// match; when loaded from persistent storage, this catches any inconsistencies. +/// +/// # Arguments +/// * `account_tree_root` - Root of the loaded account tree +/// * `nullifier_tree_root` - Root of the loaded nullifier tree +/// * `db` - Database connection to fetch the latest block header +/// +/// # Errors +/// Returns `StateInitializationError::TreeStorageDiverged` if any root doesn't match. +#[instrument(target = COMPONENT, skip_all)] +pub async fn verify_tree_consistency( + account_tree_root: Word, + nullifier_tree_root: Word, + db: &mut Db, +) -> Result<(), StateInitializationError> { + // Fetch the latest block header to get the expected roots + let latest_header = db.select_block_header_by_block_num(None).await?; + + let (block_num, expected_account_root, expected_nullifier_root) = latest_header + .map(|header| (header.block_num(), header.account_root(), header.nullifier_root())) + .unwrap_or_default(); + + // Verify account tree root + if account_tree_root != expected_account_root { + return Err(StateInitializationError::TreeStorageDiverged { + tree_name: "Account", + block_num, + tree_root: account_tree_root, + block_root: expected_account_root, + }); + } + + // Verify nullifier tree root + if nullifier_tree_root != expected_nullifier_root { + return Err(StateInitializationError::TreeStorageDiverged { + tree_name: "Nullifier", + block_num, + tree_root: nullifier_tree_root, + block_root: expected_nullifier_root, + }); + } + + Ok(()) +} diff --git a/crates/store/src/state.rs b/crates/store/src/state/mod.rs similarity index 86% rename from crates/store/src/state.rs rename to crates/store/src/state/mod.rs index 34aebfffb9..a798defcdd 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state/mod.rs @@ -27,23 +27,16 @@ use miden_node_utils::formatting::format_array; use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; -use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; +use miden_protocol::block::account_tree::{AccountTree, AccountWitness}; +use miden_protocol::block::nullifier_tree::NullifierWitness; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; -#[cfg(not(feature = "rocksdb"))] -use miden_protocol::crypto::merkle::smt::MemoryStorage; -use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtProof, SmtStorage}; +use miden_protocol::crypto::merkle::smt::{SmtProof, SmtStorage}; use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; use miden_protocol::transaction::{OutputNote, PartialBlockchain}; use miden_protocol::utils::Serializable; use tokio::sync::{Mutex, RwLock, oneshot}; use tracing::{Instrument, info, info_span, instrument}; -#[cfg(feature = "rocksdb")] -use { - miden_crypto::merkle::smt::RocksDbStorage, - miden_protocol::crypto::merkle::smt::RocksDbConfig, -}; use crate::accounts::{AccountTreeWithHistory, HistoricalError}; use crate::blocks::BlockStore; @@ -72,6 +65,16 @@ use crate::errors::{ use crate::inner_forest::InnerForest; use crate::{COMPONENT, DataDirectory}; +mod loader; + +pub use loader::{ + ACCOUNT_TREE_STORAGE_DIR, + NULLIFIER_TREE_STORAGE_DIR, + StorageLoader, + TreeStorage, +}; +use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; + // STRUCTURES // ================================================================================================ @@ -83,141 +86,14 @@ pub struct TransactionInputs { pub new_account_id_prefix_is_unique: Option, } -/// The storage backend for trees. -#[cfg(feature = "rocksdb")] -pub type TreeStorage = RocksDbStorage; -#[cfg(not(feature = "rocksdb"))] -pub type TreeStorage = MemoryStorage; - -/// Converts a `LargeSmtError` into a `StateInitializationError`. -fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInitializationError { - match e { - LargeSmtError::Merkle(merkle_error) => { - StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) - }, - LargeSmtError::Storage(err) => { - StateInitializationError::AccountTreeIoError(err.as_report()) - }, - } -} - -/// Loads an SMT from persistent storage. -#[cfg(feature = "rocksdb")] -fn load_smt(storage: S) -> Result, StateInitializationError> { - LargeSmt::new(storage).map_err(account_tree_large_smt_error_to_init_error) -} - -/// Trait for loading trees from storage. -/// -/// For `MemoryStorage`, the tree is rebuilt from database entries on each startup. -/// For `RocksDbStorage`, the tree is loaded directly from disk (much faster for large trees). -// TODO handle on disk rocksdb storage file being missing and/or corrupted. -trait StorageLoader: SmtStorage + Sized { - /// Creates a storage backend for the given domain. - fn create(data_dir: &Path, domain: &'static str) -> Result; - - /// Loads an account tree, either from persistent storage or by rebuilding from DB. - fn load_account_tree( - self, - db: &mut Db, - ) -> impl std::future::Future, StateInitializationError>> + Send; - - /// Loads a nullifier tree, either from persistent storage or by rebuilding from DB. - fn load_nullifier_tree( - self, - db: &mut Db, - ) -> impl std::future::Future< - Output = Result>, StateInitializationError>, - > + Send; -} - -#[cfg(not(feature = "rocksdb"))] -impl StorageLoader for MemoryStorage { - fn create(_data_dir: &Path, _domain: &'static str) -> Result { - Ok(MemoryStorage::default()) - } - - async fn load_account_tree( - self, - db: &mut Db, - ) -> Result, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - LargeSmt::with_entries(self, smt_entries) - .map_err(account_tree_large_smt_error_to_init_error) - } - - async fn load_nullifier_tree( - self, - db: &mut Db, - ) -> Result>, StateInitializationError> { - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) - } -} - -#[cfg(feature = "rocksdb")] -impl StorageLoader for RocksDbStorage { - fn create(data_dir: &Path, domain: &'static str) -> Result { - let storage_path = data_dir.join(domain); - fs_err::create_dir_all(&storage_path) - .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; - RocksDbStorage::open(RocksDbConfig::new(storage_path)) - .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) - } - - async fn load_account_tree( - self, - db: &mut Db, - ) -> Result, StateInitializationError> { - // If RocksDB storage has data, load from it directly - let has_data = self - .has_leaves() - .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; - if has_data { - return load_smt(self); - } - - info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - LargeSmt::with_entries(self, smt_entries) - .map_err(account_tree_large_smt_error_to_init_error) - } - - async fn load_nullifier_tree( - self, - db: &mut Db, - ) -> Result>, StateInitializationError> { - // If RocksDB storage has data, load from it directly - let has_data = self - .has_leaves() - .map_err(|e| StateInitializationError::NullifierTreeIoError(e.to_string()))?; - if has_data { - let smt = load_smt(self)?; - return Ok(NullifierTree::new_unchecked(smt)); - } - - info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) - } -} - /// Container for state that needs to be updated atomically. struct InnerState where S: SmtStorage, { - nullifier_tree: NullifierTree>, + nullifier_tree: miden_protocol::block::nullifier_tree::NullifierTree< + miden_protocol::crypto::merkle::smt::LargeSmt, + >, blockchain: Blockchain, account_tree: AccountTreeWithHistory, } @@ -279,15 +155,21 @@ impl State { let blockchain = load_mmr(&mut db).await?; let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); - let account_storage = TreeStorage::create(data_path, "accounttree")?; + let account_storage = TreeStorage::create(data_path, ACCOUNT_TREE_STORAGE_DIR)?; let smt = account_storage.load_account_tree(&mut db).await?; let account_tree = AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree)?; - let account_tree = AccountTreeWithHistory::new(account_tree, latest_block_num); - let nullifier_storage = TreeStorage::create(data_path, "nullifiertree")?; + let nullifier_storage = TreeStorage::create(data_path, NULLIFIER_TREE_STORAGE_DIR)?; let nullifier_tree = nullifier_storage.load_nullifier_tree(&mut db).await?; + // Verify that tree roots match the expected roots from the database. + // This catches any divergence between persistent storage and the database caused by + // corruption or incomplete shutdown. + verify_tree_consistency(account_tree.root(), nullifier_tree.root(), &mut db).await?; + + let account_tree = AccountTreeWithHistory::new(account_tree, latest_block_num); + let forest = load_smt_forest(&mut db, latest_block_num).await?; let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); @@ -1352,59 +1234,3 @@ impl State { self.db.select_transactions_records(account_ids, block_range).await } } - -// INNER STATE LOADING -// ================================================================================================ - -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); - - // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX - // entries. - let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); - - Ok(chain_mmr) -} - -/// Loads SMT forest with storage map and vault Merkle paths for all public accounts. -#[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] -async fn load_smt_forest( - db: &mut Db, - block_num: BlockNumber, -) -> Result { - use miden_protocol::account::delta::AccountDelta; - - let public_account_ids = db.select_all_public_account_ids().await?; - - // Acquire write lock once for the entire initialization - let mut forest = InnerForest::new(); - - // Process each account - for account_id in public_account_ids { - // Get the full account from the database - let account_info = db.select_account(account_id).await?; - let account = account_info.details.expect("public accounts always have details in DB"); - - // Convert the full account to a full-state delta - let delta = - AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); - - // Use the unified update method (will recognize it's a full-state delta) - forest.update_account(block_num, &delta)?; - - tracing::debug!( - target: COMPONENT, - %account_id, - %block_num, - "Initialized forest for account from DB" - ); - } - - Ok(forest) -} From 5478bad3405986a5e81b6154213a7eaa61324830 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 20 Jan 2026 15:36:24 -0300 Subject: [PATCH 102/125] chore: update with latest miden-base (#1526) * chore: update with latest miden-base * docs: add changelog entry * fix: rollback tx_inputs removal * fix: return empty vec in get_vault_asset_witnesses * review: remove clone * review: add attachment to notes in DB * review: add test for DB with note attachment * review: add is_single_target_network_note field & fix lint * chore: update to latest miden-base * review: update query docs to reflect schema changes * review: update assert comment * review: remov async move --- CHANGELOG.md | 3 +- Cargo.lock | 85 ++++-- bin/network-monitor/src/counter.rs | 17 +- bin/network-monitor/src/deploy/wallet.rs | 4 +- bin/stress-test/src/seeding/mod.rs | 11 +- bin/stress-test/src/store/mod.rs | 4 +- .../block-producer/src/domain/transaction.rs | 5 +- crates/block-producer/src/errors.rs | 3 +- .../src/test_utils/proven_tx.rs | 2 +- crates/ntx-builder/src/actor/account_state.rs | 53 ++-- crates/ntx-builder/src/actor/execute.rs | 3 +- crates/ntx-builder/src/actor/inflight_note.rs | 11 +- crates/ntx-builder/src/actor/mod.rs | 24 +- crates/ntx-builder/src/actor/note_state.rs | 16 +- crates/ntx-builder/src/builder.rs | 6 +- crates/ntx-builder/src/coordinator.rs | 25 +- crates/ntx-builder/src/store.rs | 10 +- crates/proto/Cargo.toml | 1 + crates/proto/src/domain/account.rs | 82 +++--- crates/proto/src/domain/note.rs | 128 ++++----- crates/proto/src/errors/mod.rs | 4 +- crates/proto/src/generated/note.rs | 11 +- crates/rpc/src/server/api.rs | 5 +- crates/store/src/accounts/mod.rs | 3 +- .../db/migrations/2025062000000_setup/up.sql | 39 ++- crates/store/src/db/mod.rs | 6 +- crates/store/src/db/models/conv.rs | 66 +---- .../store/src/db/models/queries/accounts.rs | 16 +- .../src/db/models/queries/accounts/tests.rs | 10 +- crates/store/src/db/models/queries/notes.rs | 82 +++--- crates/store/src/db/schema.rs | 5 +- crates/store/src/db/tests.rs | 245 +++++++----------- crates/store/src/errors.rs | 8 +- crates/store/src/genesis/config/errors.rs | 10 +- crates/store/src/genesis/config/mod.rs | 11 +- crates/store/src/server/ntx_builder.rs | 12 +- crates/store/src/state/mod.rs | 7 +- crates/validator/src/block_validation/mod.rs | 2 +- .../validator/src/tx_validation/data_store.rs | 34 +-- proto/proto/types/note.proto | 9 +- 40 files changed, 466 insertions(+), 612 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff897a298c..0c8fdd662b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,8 +49,9 @@ - Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). - [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). - [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). -- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). +- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). - Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). +- [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). ### Fixes diff --git a/Cargo.lock b/Cargo.lock index 2d9c82f3b2..15ad3a6f49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -1355,7 +1366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -1700,6 +1711,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -2130,7 +2144,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -2497,6 +2511,22 @@ dependencies = [ "autocfg", ] +[[package]] +name = "miden-agglayer" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +dependencies = [ + "fs-err", + "miden-assembly", + "miden-core", + "miden-core-lib", + "miden-protocol", + "miden-standards", + "miden-utils-sync", + "regex", + "walkdir", +] + [[package]] name = "miden-air" version = "0.20.2" @@ -2551,8 +2581,8 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "miden-protocol", "thiserror 2.0.17", @@ -2852,6 +2882,7 @@ dependencies = [ "miden-node-proto-build", "miden-node-utils", "miden-protocol", + "miden-standards", "miette", "proptest", "prost", @@ -3044,8 +3075,8 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "bech32", "fs-err", @@ -3074,8 +3105,8 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "proc-macro2", "quote", @@ -3164,8 +3195,8 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "fs-err", "miden-assembly", @@ -3181,12 +3212,15 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "anyhow", "itertools 0.14.0", + "miden-agglayer", + "miden-assembly", "miden-block-prover", + "miden-core-lib", "miden-processor", "miden-protocol", "miden-standards", @@ -3194,13 +3228,14 @@ dependencies = [ "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", + "thiserror 2.0.17", "winterfell", ] [[package]] name = "miden-tx" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "miden-processor", "miden-protocol", @@ -3212,8 +3247,8 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.13.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#20014df8b7e64648cf771a56572e47f78911b712" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" dependencies = [ "miden-protocol", "miden-tx", @@ -3466,7 +3501,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -3862,7 +3897,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3898,7 +3933,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -3988,7 +4023,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -4020,7 +4055,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.1", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] @@ -4840,7 +4875,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -5473,7 +5508,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -5482,7 +5517,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -6487,7 +6522,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index b64a85f2b8..86eb7a2f21 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -20,7 +20,6 @@ use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; use miden_protocol::note::{ Note, NoteAssets, - NoteExecutionHint, NoteInputs, NoteMetadata, NoteRecipient, @@ -30,7 +29,7 @@ use miden_protocol::note::{ }; use miden_protocol::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; use miden_protocol::utils::Deserializable; -use miden_protocol::{Felt, Word, ZERO}; +use miden_protocol::{Felt, Word}; use miden_standards::account::interface::{AccountInterface, AccountInterfaceExt}; use miden_standards::code_builder::CodeBuilder; use miden_tx::auth::BasicAuthenticator; @@ -318,7 +317,7 @@ async fn setup_increment_task( .await? .unwrap_or(wallet_account_file.account.clone()); - let AuthSecretKey::RpoFalcon512(secret_key) = wallet_account_file + let AuthSecretKey::Falcon512Rpo(secret_key) = wallet_account_file .auth_secret_keys .first() .expect("wallet account file should have one auth secret key") @@ -770,7 +769,7 @@ async fn create_and_submit_network_note( rng: &mut ChaCha20Rng, ) -> Result<(String, AccountHeader, BlockNumber)> { // Create authenticator for transaction signing - let authenticator = BasicAuthenticator::new(&[AuthSecretKey::RpoFalcon512(secret_key.clone())]); + let authenticator = BasicAuthenticator::new(&[AuthSecretKey::Falcon512Rpo(secret_key.clone())]); let account_interface = AccountInterface::from_account(wallet_account); @@ -794,6 +793,8 @@ async fn create_and_submit_network_note( .await .context("Failed to execute transaction")?; + let tx_inputs = executed_tx.tx_inputs().to_bytes(); + let final_account = executed_tx.final_account().clone(); // Prove the transaction @@ -803,7 +804,7 @@ async fn create_and_submit_network_note( // Submit the proven transaction let request = ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: None, + transaction_inputs: Some(tx_inputs), }; let block_height: BlockNumber = rpc_client @@ -851,10 +852,8 @@ fn create_network_note( let metadata = NoteMetadata::new( wallet_account.id(), NoteType::Public, - NoteTag::from_account_id(counter_account.id()), - NoteExecutionHint::Always, - ZERO, - )?; + NoteTag::with_account_target(counter_account.id()), + ); let serial_num = Word::new([ Felt::new(rng.random()), diff --git a/bin/network-monitor/src/deploy/wallet.rs b/bin/network-monitor/src/deploy/wallet.rs index 89c616c178..de687ab6d4 100644 --- a/bin/network-monitor/src/deploy/wallet.rs +++ b/bin/network-monitor/src/deploy/wallet.rs @@ -22,7 +22,7 @@ use crate::COMPONENT; pub fn create_wallet_account() -> Result<(Account, SecretKey)> { let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }; + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; let init_seed: [u8; 32] = rng.random(); let wallet_account = create_basic_wallet( @@ -41,7 +41,7 @@ pub fn save_wallet_account( secret_key: &SecretKey, file_path: &Path, ) -> Result<()> { - let auth_secret_key = AuthSecretKey::RpoFalcon512(secret_key.clone()); + let auth_secret_key = AuthSecretKey::Falcon512Rpo(secret_key.clone()); let account_file = AccountFile::new(account.clone(), vec![auth_secret_key]); account_file.write(file_path)?; Ok(()) diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index a3a258892d..e0fe79338f 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -34,6 +34,7 @@ use miden_protocol::block::{ use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::errors::AssetError; use miden_protocol::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; use miden_protocol::transaction::{ InputNote, @@ -45,8 +46,8 @@ use miden_protocol::transaction::{ TransactionHeader, }; use miden_protocol::utils::Serializable; -use miden_protocol::{AssetError, Felt, ONE, Word}; -use miden_standards::account::auth::AuthRpoFalcon512; +use miden_protocol::{Felt, ONE, Word}; +use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::account::faucets::BasicFungibleFaucet; use miden_standards::account::wallets::BasicWallet; use miden_standards::note::create_p2id_note; @@ -314,7 +315,7 @@ fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RpoRandomCo target_id, vec![asset], miden_protocol::note::NoteType::Public, - Felt::default(), + miden_protocol::note::NoteAttachment::default(), rng, ) .expect("note creation failed") @@ -327,7 +328,7 @@ fn create_account(public_key: PublicKey, index: u64, storage_mode: AccountStorag AccountBuilder::new(init_seed.try_into().unwrap()) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(storage_mode) - .with_auth_component(AuthRpoFalcon512::new(public_key.into())) + .with_auth_component(AuthFalcon512Rpo::new(public_key.into())) .with_component(BasicWallet) .build() .unwrap() @@ -345,7 +346,7 @@ fn create_faucet() -> Account { .account_type(AccountType::FungibleFaucet) .storage_mode(AccountStorageMode::Private) .with_component(BasicFungibleFaucet::new(token_symbol, 2, Felt::new(u64::MAX)).unwrap()) - .with_auth_component(AuthRpoFalcon512::new(key_pair.public_key().into())) + .with_auth_component(AuthFalcon512Rpo::new(key_pair.public_key().into())) .build() .unwrap() } diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index 59e094ba50..e4960bb7e1 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -92,7 +92,7 @@ pub async fn sync_state( ) -> (Duration, proto::rpc::SyncStateResponse) { let note_tags = account_ids .iter() - .map(|id| u32::from(NoteTag::from_account_id(*id))) + .map(|id| u32::from(NoteTag::with_account_target(*id))) .collect::>(); let account_ids = account_ids @@ -158,7 +158,7 @@ pub async fn sync_notes( ) -> Duration { let note_tags = account_ids .iter() - .map(|id| u32::from(NoteTag::from_account_id(*id))) + .map(|id| u32::from(NoteTag::with_account_target(*id))) .collect::>(); let sync_request = proto::rpc::SyncNotesRequest { block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 0c819d06fb..5b2ab30b32 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::block::BlockNumber; -use miden_protocol::note::Nullifier; +use miden_protocol::note::{NoteHeader, Nullifier}; use miden_protocol::transaction::{OutputNote, ProvenTransaction, TransactionId, TxAccountUpdate}; use crate::errors::VerifyTxError; @@ -119,8 +119,7 @@ impl AuthenticatedTransaction { pub fn unauthenticated_note_commitments(&self) -> impl Iterator + '_ { self.inner .unauthenticated_notes() - .copied() - .map(|header| header.commitment()) + .map(NoteHeader::commitment) .filter(|commitment| !self.notes_authenticated_by_store.contains(commitment)) } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 38ac067163..40c74c99f5 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -2,11 +2,12 @@ use core::error::Error as CoreError; use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; +use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::block::BlockNumber; +use miden_protocol::errors::{ProposedBatchError, ProposedBlockError, ProvenBatchError}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::TransactionId; -use miden_protocol::{ProposedBatchError, ProposedBlockError, ProvenBatchError, Word}; use miden_remote_prover_client::RemoteProverClientError; use thiserror::Error; use tokio::task::JoinError; diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index aa6ec310ef..b8d67e9fbe 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -131,7 +131,7 @@ impl MockProvenTxBuilder { .map(|note_index| { let note = Note::mock_noop(Word::from([0, 0, 0, note_index])); - OutputNote::Header(*note.header()) + OutputNote::Header(note.header().clone()) }) .collect(); diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index bfc5a41b71..3a9015a265 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::num::NonZeroUsize; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; use miden_node_utils::tracing::OpenTelemetrySpanExt; @@ -49,8 +49,8 @@ pub struct TransactionCandidate { /// The current state of a network account. #[derive(Clone)] pub struct NetworkAccountState { - /// The network account prefix corresponding to the network account this state represents. - account_prefix: NetworkAccountPrefix, + /// The network account ID corresponding to the network account this state represents. + account_id: NetworkAccountId, /// Component of this state which Contains the committed and inflight account updates as well /// as available and nullified notes. @@ -74,26 +74,23 @@ impl NetworkAccountState { #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] pub async fn load( account: Account, - account_prefix: NetworkAccountPrefix, + account_id: NetworkAccountId, store: &StoreClient, block_num: BlockNumber, ) -> Result { - let notes = store.get_unconsumed_network_notes(account_prefix, block_num.as_u32()).await?; + let notes = store.get_unconsumed_network_notes(account_id, block_num.as_u32()).await?; let notes = notes .into_iter() - .filter_map(|note| { - if let NetworkNote::SingleTarget(note) = note { - Some(note) - } else { - None - } + .map(|note| { + let NetworkNote::SingleTarget(note) = note; + note }) .collect::>(); let account = NetworkAccountNoteState::new(account, notes); let state = Self { account, - account_prefix, + account_id, inflight_txs: BTreeMap::default(), nullifier_idx: HashSet::default(), }; @@ -166,7 +163,7 @@ impl NetworkAccountState { } => { // Filter network notes relevant to this account. let network_notes = filter_by_prefix_and_map_to_single_target( - self.account_prefix, + self.account_id, network_notes.clone(), ); self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); @@ -209,12 +206,12 @@ impl NetworkAccountState { let mut tx_impact = TransactionImpact::default(); if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let account_prefix = update.prefix(); - if account_prefix == self.account_prefix { + let account_id = update.network_account_id(); + if account_id == self.account_id { match update { NetworkAccountEffect::Updated(account_delta) => { self.account.add_delta(&account_delta); - tx_impact.account_delta = Some(account_prefix); + tx_impact.account_delta = Some(account_id); }, NetworkAccountEffect::Created(_) => {}, } @@ -222,9 +219,9 @@ impl NetworkAccountState { } for note in network_notes { assert_eq!( - note.account_prefix(), - self.account_prefix, - "transaction note prefix does not match network account actor's prefix" + note.account_id(), + self.account_id, + "note's account ID does not match network account actor's account ID" ); tx_impact.notes.insert(note.nullifier()); self.nullifier_idx.insert(note.nullifier()); @@ -253,7 +250,7 @@ impl NetworkAccountState { }; if let Some(prefix) = impact.account_delta { - if prefix == self.account_prefix { + if prefix == self.account_id { self.account.commit_delta(); } } @@ -276,10 +273,10 @@ impl NetworkAccountState { }; // Revert account creation. - if let Some(account_prefix) = impact.account_delta { + if let Some(account_id) = impact.account_delta { // Account creation reverted, actor must stop. - if account_prefix == self.account_prefix && self.account.revert_delta() { - return Some(ActorShutdownReason::AccountReverted(account_prefix)); + if account_id == self.account_id && self.account.revert_delta() { + return Some(ActorShutdownReason::AccountReverted(account_id)); } } @@ -318,7 +315,7 @@ impl NetworkAccountState { #[derive(Clone, Default)] struct TransactionImpact { /// The network account this transaction added an account delta to. - account_delta: Option, + account_delta: Option, /// Network notes this transaction created. notes: BTreeSet, @@ -335,16 +332,14 @@ impl TransactionImpact { /// Filters network notes by prefix and maps them to single target network notes. fn filter_by_prefix_and_map_to_single_target( - account_prefix: NetworkAccountPrefix, + account_id: NetworkAccountId, notes: Vec, ) -> Vec { notes .into_iter() .filter_map(|note| match note { - NetworkNote::SingleTarget(note) if note.account_prefix() == account_prefix => { - Some(note) - }, - _ => None, + NetworkNote::SingleTarget(note) if note.account_id() == account_id => Some(note), + NetworkNote::SingleTarget(_) => None, }) .collect::>() } diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index cb38dc89a8..66f22f8c06 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -4,6 +4,7 @@ use miden_node_proto::clients::ValidatorClient; use miden_node_proto::generated::{self as proto}; use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::Word; use miden_protocol::account::{ Account, AccountId, @@ -13,6 +14,7 @@ use miden_protocol::account::{ }; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::errors::TransactionInputError; use miden_protocol::note::{Note, NoteScript}; use miden_protocol::transaction::{ AccountInputs, @@ -26,7 +28,6 @@ use miden_protocol::transaction::{ TransactionInputs, }; use miden_protocol::vm::FutureMaybeSend; -use miden_protocol::{TransactionInputError, Word}; use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; use miden_tx::utils::Serializable; diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs index 626b474ace..23c7d06d72 100644 --- a/crates/ntx-builder/src/actor/inflight_note.rs +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -46,15 +46,10 @@ impl InflightNetworkNote { /// Checks if the network note is available for execution. /// - /// The note is available if it can be consumed and the backoff period has passed. + /// The note is available if the backoff period has passed. pub fn is_available(&self, block_num: BlockNumber) -> bool { - let can_consume = self - .to_inner() - .metadata() - .execution_hint() - .can_be_consumed(block_num) - .unwrap_or(true); - can_consume && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) + self.note.can_be_consumed(block_num).unwrap_or(true) + && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) } /// Registers a failed attempt to execute the network note at the specified block number. diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 602dde11b2..f743d79085 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -9,7 +9,7 @@ use std::time::Duration; use account_state::{NetworkAccountState, TransactionCandidate}; use futures::FutureExt; use miden_node_proto::clients::{Builder, ValidatorClient}; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_utils::ErrorReport; use miden_node_utils::lru_cache::LruCache; @@ -33,7 +33,7 @@ use crate::store::StoreClient; /// The reason an actor has shut down. pub enum ActorShutdownReason { /// Occurs when the transaction that created the actor is reverted. - AccountReverted(NetworkAccountPrefix), + AccountReverted(NetworkAccountId), /// Occurs when an account actor detects failure in the messaging channel used by the /// coordinator. EventChannelClosed, @@ -42,7 +42,7 @@ pub enum ActorShutdownReason { /// Occurs when an account actor detects its corresponding cancellation token has been triggered /// by the coordinator. Cancellation tokens are triggered by the coordinator to initiate /// graceful shutdown of actors. - Cancelled(NetworkAccountPrefix), + Cancelled(NetworkAccountId), } // ACCOUNT ACTOR CONFIG @@ -78,7 +78,7 @@ pub enum AccountOrigin { /// store yet. Transaction(Box), /// Accounts that already exist in the store. - Store(NetworkAccountPrefix), + Store(NetworkAccountId), } impl AccountOrigin { @@ -93,16 +93,16 @@ impl AccountOrigin { } /// Returns an [`AccountOrigin::Store`]. - pub fn store(prefix: NetworkAccountPrefix) -> Self { - AccountOrigin::Store(prefix) + pub fn store(account_id: NetworkAccountId) -> Self { + AccountOrigin::Store(account_id) } - /// Returns the [`NetworkAccountPrefix`] of the account. - pub fn prefix(&self) -> NetworkAccountPrefix { + /// Returns the [`NetworkAccountId`] of the account. + pub fn id(&self) -> NetworkAccountId { match self { - AccountOrigin::Transaction(account) => NetworkAccountPrefix::try_from(account.id()) + AccountOrigin::Transaction(account) => NetworkAccountId::try_from(account.id()) .expect("actor accounts are always network accounts"), - AccountOrigin::Store(prefix) => *prefix, + AccountOrigin::Store(account_id) => *account_id, } } } @@ -213,7 +213,7 @@ impl AccountActor { }; let block_num = self.chain_state.read().await.chain_tip_header.block_num(); let mut state = - NetworkAccountState::load(account, self.origin.prefix(), &self.store, block_num) + NetworkAccountState::load(account, self.origin.id(), &self.store, block_num) .await .expect("actor should be able to load account state"); @@ -229,7 +229,7 @@ impl AccountActor { }; tokio::select! { _ = self.cancel_token.cancelled() => { - return ActorShutdownReason::Cancelled(self.origin.prefix()); + return ActorShutdownReason::Cancelled(self.origin.id()); } // Handle mempool events. event = self.event_rx.recv() => { diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs index 9de85dd6ad..87b91fc21a 100644 --- a/crates/ntx-builder/src/actor/note_state.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -1,6 +1,6 @@ use std::collections::{HashMap, VecDeque}; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::note::SingleTargetNetworkNote; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{Account, AccountDelta, AccountId}; @@ -33,7 +33,7 @@ pub struct NetworkAccountNoteState { impl NetworkAccountNoteState { /// Creates a new account state from the supplied account and notes. pub fn new(account: Account, notes: Vec) -> Self { - let account_prefix = NetworkAccountPrefix::try_from(account.id()) + let account_id = NetworkAccountId::try_from(account.id()) .expect("only network accounts are used for account state"); let mut state = Self { @@ -46,8 +46,8 @@ impl NetworkAccountNoteState { for note in notes { // Currently only support single target network notes in NTB. assert!( - note.account_prefix() == account_prefix, - "Notes supplied into account state must match expected account prefix" + note.account_id() == account_id, + "Notes supplied into account state must match expected account ID" ); state.add_note(note); } @@ -210,15 +210,15 @@ impl NetworkAccountEffect { AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), }; - update.account_id().is_network().then_some(update) + update.protocol_account_id().is_network().then_some(update) } - pub fn prefix(&self) -> NetworkAccountPrefix { + pub fn network_account_id(&self) -> NetworkAccountId { // SAFETY: This is a network account by construction. - self.account_id().try_into().unwrap() + self.protocol_account_id().try_into().unwrap() } - fn account_id(&self) -> AccountId { + fn protocol_account_id(&self) -> AccountId { match self { NetworkAccountEffect::Created(acc) => acc.id(), NetworkAccountEffect::Updated(delta) => delta.id(), diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 5a1b091a69..84c711385f 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -4,7 +4,7 @@ use std::time::Duration; use anyhow::Context; use futures::TryStreamExt; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_utils::lru_cache::LruCache; use miden_protocol::Word; @@ -155,9 +155,9 @@ impl NetworkTransactionBuilder { // Create initial set of actors based on all known network accounts. let account_ids = store.get_network_account_ids().await?; for account_id in account_ids { - if let Ok(account_prefix) = NetworkAccountPrefix::try_from(account_id) { + if let Ok(account_id) = NetworkAccountId::try_from(account_id) { self.coordinator - .spawn_actor(AccountOrigin::store(account_prefix), &actor_context) + .spawn_actor(AccountOrigin::store(account_id), &actor_context) .await?; } } diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 7b5a588a9c..f6c0389114 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use anyhow::Context; use indexmap::IndexMap; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::domain::note::NetworkNote; use miden_protocol::account::delta::AccountUpdateDetails; @@ -69,7 +69,7 @@ pub struct Coordinator { /// When actors are spawned, they register their communication channel here. When events need /// to be broadcast, this registry is used to locate the appropriate actors. The registry is /// automatically cleaned up when actors complete their execution. - actor_registry: HashMap, + actor_registry: HashMap, /// Join set for managing actor tasks and monitoring their completion status. /// @@ -89,7 +89,7 @@ pub struct Coordinator { /// Cache of events received from the mempool that predate corresponding network accounts. /// Grouped by account prefix to allow targeted event delivery to actors upon creation. - predating_events: HashMap>>, + predating_events: HashMap>>, } impl Coordinator { @@ -118,7 +118,7 @@ impl Coordinator { origin: AccountOrigin, actor_context: &AccountActorContext, ) -> Result<(), SendError>> { - let account_prefix = origin.prefix(); + let account_prefix = origin.id(); // If an actor already exists for this account prefix, something has gone wrong. if let Some(handle) = self.actor_registry.remove(&account_prefix) { @@ -248,15 +248,14 @@ impl Coordinator { // Determine target actors for each note. for note in network_notes { - if let NetworkNote::SingleTarget(note) = note { - let prefix = note.account_prefix(); - if let Some(actor) = self.actor_registry.get(&prefix) { - // Register actor as target. - target_actors.insert(prefix, actor); - } else { - // Cache event for every note that doesn't have a corresponding actor. - self.predating_events.entry(prefix).or_default().insert(*id, event.clone()); - } + let NetworkNote::SingleTarget(note) = note; + let prefix = note.account_id(); + if let Some(actor) = self.actor_registry.get(&prefix) { + // Register actor as target. + target_actors.insert(prefix, actor); + } else { + // Cache event for every note that doesn't have a corresponding actor. + self.predating_events.entry(prefix).or_default().insert(*id, event.clone()); } } } diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 447571a5a0..784a27101e 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -1,7 +1,7 @@ use std::time::Duration; use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::BlockRange; @@ -109,9 +109,9 @@ impl StoreClient { #[instrument(target = COMPONENT, name = "store.client.get_network_account", skip_all, err)] pub async fn get_network_account( &self, - prefix: NetworkAccountPrefix, + account_id: NetworkAccountId, ) -> Result, StoreError> { - let request = proto::store::AccountIdPrefix { account_id_prefix: prefix.inner() }; + let request = proto::store::AccountIdPrefix { account_id_prefix: account_id.prefix() }; let store_response = self .inner @@ -140,7 +140,7 @@ impl StoreClient { #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] pub async fn get_unconsumed_network_notes( &self, - network_account_prefix: NetworkAccountPrefix, + network_account_id: NetworkAccountId, block_num: u32, ) -> Result, StoreError> { // Upper bound of each note is ~10KB. Limit page size to ~10MB. @@ -154,7 +154,7 @@ impl StoreClient { let req = proto::store::UnconsumedNetworkNotesRequest { page_token, page_size: PAGE_SIZE, - network_account_id_prefix: network_account_prefix.inner(), + network_account_id_prefix: network_account_id.prefix(), block_num, }; let resp = store_client.get_unconsumed_network_notes(req).await?.into_inner(); diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 255b27c9df..6d3589ca3d 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -21,6 +21,7 @@ http = { workspace = true } miden-node-grpc-error-macro = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { workspace = true } +miden-standards = { workspace = true } prost = { workspace = true } thiserror = { workspace = true } tonic = { default-features = true, workspace = true } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 57e9a4de84..3bd3aa87cc 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -18,8 +18,9 @@ use miden_protocol::block::BlockNumber; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::crypto::merkle::smt::SmtProof; -use miden_protocol::note::{NoteExecutionMode, NoteTag}; +use miden_protocol::note::NoteAttachment; use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_standards::note::NetworkAccountTarget; use thiserror::Error; use super::try_convert; @@ -1022,76 +1023,95 @@ impl From for proto::primitives::Asset { pub type AccountPrefix = u32; -/// Newtype wrapper for network account prefix. +/// Newtype wrapper for network account IDs. +/// /// Provides type safety for accounts that are meant for network execution. +/// This wraps the full `AccountId` of a network account, typically extracted +/// from a `NetworkAccountTarget` attachment. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub struct NetworkAccountPrefix(u32); +pub struct NetworkAccountId(AccountId); -impl std::fmt::Display for NetworkAccountPrefix { +impl std::fmt::Display for NetworkAccountId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } -impl NetworkAccountPrefix { - pub fn inner(&self) -> u32 { +impl NetworkAccountId { + /// Returns the inner `AccountId`. + pub fn inner(&self) -> AccountId { self.0 } -} -impl TryFrom for NetworkAccountPrefix { - type Error = NetworkAccountError; - - fn try_from(value: u32) -> Result { - if value >> 30 != 0 { - return Err(NetworkAccountError::InvalidPrefix(value)); - } - Ok(NetworkAccountPrefix(value)) + /// Gets the 30-bit prefix of the account ID used for tag matching. + pub fn prefix(&self) -> AccountPrefix { + get_account_id_tag_prefix(self.0) } } -impl TryFrom for NetworkAccountPrefix { +impl TryFrom for NetworkAccountId { type Error = NetworkAccountError; fn try_from(id: AccountId) -> Result { if !id.is_network() { return Err(NetworkAccountError::NotNetworkAccount(id)); } - let prefix = get_account_id_tag_prefix(id); - Ok(NetworkAccountPrefix(prefix)) + Ok(NetworkAccountId(id)) } } -impl TryFrom for NetworkAccountPrefix { +impl TryFrom<&NoteAttachment> for NetworkAccountId { type Error = NetworkAccountError; - fn try_from(tag: NoteTag) -> Result { - if tag.execution_mode() != NoteExecutionMode::Network || !tag.is_single_target() { - return Err(NetworkAccountError::InvalidExecutionMode(tag)); - } + fn try_from(attachment: &NoteAttachment) -> Result { + let target = NetworkAccountTarget::try_from(attachment) + .map_err(|e| NetworkAccountError::InvalidAttachment(e.to_string()))?; + Ok(NetworkAccountId(target.target_id())) + } +} - let tag_inner: u32 = tag.into(); - assert!(tag_inner >> 30 == 0, "first 2 bits have to be 0"); - Ok(NetworkAccountPrefix(tag_inner)) +impl TryFrom for NetworkAccountId { + type Error = NetworkAccountError; + + fn try_from(attachment: NoteAttachment) -> Result { + NetworkAccountId::try_from(&attachment) } } -impl From for u32 { - fn from(value: NetworkAccountPrefix) -> Self { +impl From for AccountId { + fn from(value: NetworkAccountId) -> Self { value.inner() } } +impl From for u32 { + /// Returns the 30-bit prefix of the network account ID. + /// This is used for note tag matching. + fn from(value: NetworkAccountId) -> Self { + value.prefix() + } +} + #[derive(Debug, Error)] pub enum NetworkAccountError { #[error("account ID {0} is not a valid network account ID")] NotNetworkAccount(AccountId), - #[error("note tag {0} is not valid for network account execution")] - InvalidExecutionMode(NoteTag), - #[error("note prefix should be 30-bit long ({0} has non-zero in the 2 most significant bits)")] + #[error("invalid network account attachment: {0}")] + InvalidAttachment(String), + #[error("invalid network account prefix: {0}")] InvalidPrefix(u32), } +/// Validates that a u32 represents a valid network account prefix. +/// +/// Network accounts have a 30-bit prefix (top 2 bits must be 0). +pub fn validate_network_account_prefix(prefix: u32) -> Result { + if prefix >> 30 != 0 { + return Err(NetworkAccountError::InvalidPrefix(prefix)); + } + Ok(prefix) +} + /// Gets the 30-bit prefix of the account ID. fn get_account_id_tag_prefix(id: AccountId) -> AccountPrefix { (id.prefix().as_u64() >> 34) as AccountPrefix diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index c4065b2982..5098ef41fb 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -1,8 +1,10 @@ +use miden_protocol::Word; +use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ Note, + NoteAttachment, NoteDetails, - NoteExecutionHint, NoteId, NoteInclusionProof, NoteMetadata, @@ -12,10 +14,10 @@ use miden_protocol::note::{ Nullifier, }; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{Felt, Word}; +use miden_standards::note::NetworkAccountTarget; use thiserror::Error; -use super::account::NetworkAccountPrefix; +use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -28,20 +30,24 @@ impl TryFrom for NoteMetadata { .ok_or_else(|| proto::note::NoteMetadata::missing_field(stringify!(sender)))? .try_into()?; let note_type = NoteType::try_from(u64::from(value.note_type))?; - let tag = NoteTag::from(value.tag); + let tag = NoteTag::new(value.tag); - let execution_hint = NoteExecutionHint::try_from(value.execution_hint)?; - - let aux = Felt::try_from(value.aux).map_err(|_| ConversionError::NotAValidFelt)?; + // Deserialize attachment if present + let attachment = if value.attachment.is_empty() { + NoteAttachment::default() + } else { + NoteAttachment::read_from_bytes(&value.attachment) + .map_err(|err| ConversionError::deserialization_error("NoteAttachment", err))? + }; - Ok(NoteMetadata::new(sender, note_type, tag, execution_hint, aux)?) + Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) } } impl From for proto::note::NetworkNote { fn from(note: Note) -> Self { Self { - metadata: Some(proto::note::NoteMetadata::from(*note.metadata())), + metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: NoteDetails::from(note).to_bytes(), } } @@ -50,7 +56,7 @@ impl From for proto::note::NetworkNote { impl From for proto::note::Note { fn from(note: Note) -> Self { Self { - metadata: Some(proto::note::NoteMetadata::from(*note.metadata())), + metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: Some(NoteDetails::from(note).to_bytes()), } } @@ -60,7 +66,7 @@ impl From for proto::note::NetworkNote { fn from(note: NetworkNote) -> Self { let note = Note::from(note); Self { - metadata: Some(proto::note::NoteMetadata::from(*note.metadata())), + metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: NoteDetails::from(note).to_bytes(), } } @@ -70,17 +76,10 @@ impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); let note_type = val.note_type() as u32; - let tag = val.tag().into(); - let execution_hint: u64 = val.execution_hint().into(); - let aux = val.aux().into(); - - proto::note::NoteMetadata { - sender, - note_type, - tag, - execution_hint, - aux, - } + let tag = val.tag().as_u32(); + let attachment = val.attachment().to_bytes(); + + proto::note::NoteMetadata { sender, note_type, tag, attachment } } } @@ -184,14 +183,12 @@ impl TryFrom for Note { #[derive(Clone, Debug, PartialEq, Eq)] pub enum NetworkNote { SingleTarget(SingleTargetNetworkNote), - MultiTarget(MultiTargetNetworkNote), } impl NetworkNote { pub fn inner(&self) -> &Note { match self { - NetworkNote::SingleTarget(note) => ¬e.0, - NetworkNote::MultiTarget(note) => ¬e.0, + NetworkNote::SingleTarget(note) => note.inner(), } } @@ -211,8 +208,7 @@ impl NetworkNote { impl From for Note { fn from(value: NetworkNote) -> Self { match value { - NetworkNote::SingleTarget(note) => note.0, - NetworkNote::MultiTarget(note) => note.0, + NetworkNote::SingleTarget(note) => note.into(), } } } @@ -221,15 +217,7 @@ impl TryFrom for NetworkNote { type Error = NetworkNoteError; fn try_from(note: Note) -> Result { - if note.is_network_note() { - if note.metadata().tag().is_single_target() { - Ok(NetworkNote::SingleTarget(SingleTargetNetworkNote(note))) - } else { - Ok(NetworkNote::MultiTarget(MultiTargetNetworkNote(note))) - } - } else { - Err(NetworkNoteError::InvalidExecutionMode(note.metadata().tag())) - } + SingleTargetNetworkNote::try_from(note).map(NetworkNote::SingleTarget) } } @@ -241,43 +229,22 @@ impl TryFrom for NetworkNote { } } -// MULTI TARGET NETWORK NOTE -// ================================================================================================ - -/// A newtype that wraps around notes having multiple targets to be used in a network mode. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct MultiTargetNetworkNote(Note); - -impl TryFrom for MultiTargetNetworkNote { - type Error = NetworkNoteError; - - fn try_from(note: Note) -> Result { - if note.is_network_note() && !note.metadata().tag().is_single_target() { - Ok(Self(note)) - } else { - Err(NetworkNoteError::InvalidExecutionMode(note.metadata().tag())) - } - } -} - -impl TryFrom for MultiTargetNetworkNote { - type Error = ConversionError; - - fn try_from(proto_note: proto::note::NetworkNote) -> Result { - from_proto(proto_note) - } -} - // SINGLE TARGET NETWORK NOTE // ================================================================================================ -/// A newtype that wraps around notes targeting a single account to be used in a network mode. +/// A newtype that wraps around notes targeting a single network account. +/// +/// A note is considered a single-target network note if its attachment +/// is a valid `NetworkAccountTarget`. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct SingleTargetNetworkNote(Note); +pub struct SingleTargetNetworkNote { + note: Note, + account_target: NetworkAccountTarget, +} impl SingleTargetNetworkNote { pub fn inner(&self) -> &Note { - &self.0 + &self.note } pub fn metadata(&self) -> &NoteMetadata { @@ -292,18 +259,19 @@ impl SingleTargetNetworkNote { self.inner().id() } - /// The account prefix that this note targets. - pub fn account_prefix(&self) -> NetworkAccountPrefix { - self.metadata() - .tag() - .try_into() - .expect("Single target network note's tag should contain an account prefix") + /// The network account ID that this note targets. + pub fn account_id(&self) -> NetworkAccountId { + self.account_target.target_id().try_into().expect("always a network account ID") + } + + pub fn can_be_consumed(&self, block_num: BlockNumber) -> Option { + self.account_target.execution_hint().can_be_consumed(block_num) } } impl From for Note { fn from(value: SingleTargetNetworkNote) -> Self { - value.0 + value.note } } @@ -311,11 +279,11 @@ impl TryFrom for SingleTargetNetworkNote { type Error = NetworkNoteError; fn try_from(note: Note) -> Result { - if note.is_network_note() && note.metadata().tag().is_single_target() { - Ok(Self(note)) - } else { - Err(NetworkNoteError::InvalidExecutionMode(note.metadata().tag())) - } + // Single-target network notes are identified by having a NetworkAccountTarget attachment + let attachment = note.metadata().attachment(); + let account_target = NetworkAccountTarget::try_from(attachment) + .map_err(|e| NetworkNoteError::InvalidAttachment(e.to_string()))?; + Ok(Self { note, account_target }) } } @@ -346,8 +314,8 @@ where #[derive(Debug, Error)] pub enum NetworkNoteError { - #[error("note tag {0} is not a valid network note tag")] - InvalidExecutionMode(NoteTag), + #[error("note does not have a valid NetworkAccountTarget attachment: {0}")] + InvalidAttachment(String), } // NOTE SCRIPT diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index 49f0f30bd3..d654141884 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -4,8 +4,8 @@ use std::num::TryFromIntError; // Re-export the GrpcError derive macro for convenience pub use miden_node_grpc_error_macro::GrpcError; use miden_protocol::crypto::merkle::smt::{SmtLeafError, SmtProofError}; +use miden_protocol::errors::{AccountError, AssetError, FeeError, NoteError, StorageSlotNameError}; use miden_protocol::utils::DeserializationError; -use miden_protocol::{AccountError, AssetError, FeeError, StorageSlotNameError}; use thiserror::Error; use crate::domain::note::NetworkNoteError; @@ -24,7 +24,7 @@ pub enum ConversionError { #[error("hex error")] HexError(#[from] hex::FromHexError), #[error("note error")] - NoteError(#[from] miden_protocol::NoteError), + NoteError(#[from] NoteError), #[error("network note error")] NetworkNoteError(#[from] NetworkNoteError), #[error("SMT leaf error")] diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs index 239d6f6d0b..83d56aeb6b 100644 --- a/crates/proto/src/generated/note.rs +++ b/crates/proto/src/generated/note.rs @@ -27,14 +27,11 @@ pub struct NoteMetadata { /// See `miden_protocol::note::note_tag` for more info. #[prost(fixed32, tag = "3")] pub tag: u32, - /// Specifies when a note is ready to be consumed. + /// Serialized note attachment /// - /// See `miden_protocol::note::execution_hint` for more info. - #[prost(fixed64, tag = "4")] - pub execution_hint: u64, - /// An arbitrary user-defined value. - #[prost(fixed64, tag = "5")] - pub aux: u64, + /// See `miden_protocol::note::NoteAttachment` for more info. + #[prost(bytes = "vec", tag = "4")] + pub attachment: ::prost::alloc::vec::Vec, } /// Represents a note. /// diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 42889ef633..fd3ee97c65 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -360,7 +360,7 @@ impl api_server::Api for RpcService { let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); - let new_note = Note::new(note.assets().clone(), *note.metadata(), recipient); + let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, other => other.clone(), @@ -423,7 +423,8 @@ impl api_server::Api for RpcService { let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); - let new_note = Note::new(note.assets().clone(), *note.metadata(), recipient); + let new_note = + Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, other => other.clone(), diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index 7fa5be8e79..d015408adb 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -21,7 +21,8 @@ use miden_protocol::crypto::merkle::{ NodeIndex, SparseMerklePath, }; -use miden_protocol::{AccountTreeError, EMPTY_WORD, Word}; +use miden_protocol::errors::AccountTreeError; +use miden_protocol::{EMPTY_WORD, Word}; #[cfg(test)] mod tests; diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index adf06e2a32..32c618aaba 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -43,28 +43,27 @@ CREATE INDEX idx_accounts_block_num ON accounts(block_num); CREATE INDEX idx_accounts_code_commitment ON accounts(code_commitment) WHERE code_commitment IS NOT NULL; CREATE TABLE notes ( - committed_at INTEGER NOT NULL, -- Block number when the note was committed - batch_index INTEGER NOT NULL, -- Index of batch in block, starting from 0 - note_index INTEGER NOT NULL, -- Index of note in batch, starting from 0 - note_id BLOB NOT NULL, - note_commitment BLOB NOT NULL, - note_type INTEGER NOT NULL, -- 1-Public (0b01), 2-Private (0b10), 3-Encrypted (0b11) - sender BLOB NOT NULL, - tag INTEGER NOT NULL, - execution_mode INTEGER NOT NULL, -- 0-Network, 1-Local - aux INTEGER NOT NULL, - execution_hint INTEGER NOT NULL, - inclusion_path BLOB NOT NULL, -- Serialized sparse Merkle path of the note in the block's note tree - consumed_at INTEGER, -- Block number when the note was consumed - nullifier BLOB, -- Only known for public notes, null for private notes - assets BLOB, - inputs BLOB, - script_root BLOB, - serial_num BLOB, + committed_at INTEGER NOT NULL, -- Block number when the note was committed + batch_index INTEGER NOT NULL, -- Index of batch in block, starting from 0 + note_index INTEGER NOT NULL, -- Index of note in batch, starting from 0 + note_id BLOB NOT NULL, + note_commitment BLOB NOT NULL, + note_type INTEGER NOT NULL, -- 1-Public (0b01), 2-Private (0b10), 3-Encrypted (0b11) + sender BLOB NOT NULL, + tag INTEGER NOT NULL, + is_single_target_network_note INTEGER NOT NULL, -- 1 if note has NetworkAccountTarget attachment, 0 otherwise + attachment BLOB NOT NULL, -- Serialized note attachment data + inclusion_path BLOB NOT NULL, -- Serialized sparse Merkle path of the note in the block's note tree + consumed_at INTEGER, -- Block number when the note was consumed + nullifier BLOB, -- Only known for public notes, null for private notes + assets BLOB, + inputs BLOB, + script_root BLOB, + serial_num BLOB, PRIMARY KEY (committed_at, batch_index, note_index), CONSTRAINT notes_type_in_enum CHECK (note_type BETWEEN 1 AND 3), - CONSTRAINT notes_execution_mode_in_enum CHECK (execution_mode BETWEEN 0 AND 1), + CONSTRAINT notes_is_single_target_network_note_is_bool CHECK (is_single_target_network_note BETWEEN 0 AND 1), CONSTRAINT notes_consumed_at_is_u32 CHECK (consumed_at BETWEEN 0 AND 0xFFFFFFFF), CONSTRAINT notes_batch_index_is_u32 CHECK (batch_index BETWEEN 0 AND 0xFFFFFFFF), CONSTRAINT notes_note_index_is_u32 CHECK (note_index BETWEEN 0 AND 0xFFFFFFFF) @@ -75,7 +74,7 @@ CREATE INDEX idx_notes_note_commitment ON notes(note_commitment); CREATE INDEX idx_notes_sender ON notes(sender, committed_at); CREATE INDEX idx_notes_tag ON notes(tag, committed_at); CREATE INDEX idx_notes_nullifier ON notes(nullifier); -CREATE INDEX idx_unconsumed_network_notes ON notes(execution_mode, consumed_at); +CREATE INDEX idx_unconsumed_network_notes ON notes(is_single_target_network_note, consumed_at); -- Index for joining with block_headers on committed_at CREATE INDEX idx_notes_committed_at ON notes(committed_at); -- Index for joining with note_scripts diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index fc96212b56..7fe9c74974 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -4,7 +4,7 @@ use std::path::PathBuf; use anyhow::Context; use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; -use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; +use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; @@ -652,7 +652,7 @@ impl Db { /// Pagination is used to limit the number of notes returned. pub(crate) async fn select_unconsumed_network_notes( &self, - network_account_id_prefix: NetworkAccountPrefix, + network_account_prefix: u32, block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page)> { @@ -662,7 +662,7 @@ impl Db { self.transact("unconsumed network notes for account", move |conn| { models::queries::select_unconsumed_network_notes_by_tag( conn, - network_account_id_prefix.into(), + network_account_prefix, block_num, page, ) diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 37a9b019fb..4dcd012efa 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,11 +32,11 @@ on relevant platforms" )] -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_protocol::Felt; use miden_protocol::account::{StorageSlotName, StorageSlotType}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::{NoteExecutionMode, NoteTag}; +use miden_protocol::note::NoteTag; #[derive(Debug, thiserror::Error)] #[error("failed to convert from database type {from_type} into {into_type}")] @@ -78,42 +78,10 @@ impl SqlTypeConvert for BlockNumber { } } -impl SqlTypeConvert for NetworkAccountPrefix { - type Raw = i64; - - fn from_raw_sql(raw: Self::Raw) -> Result { - NetworkAccountPrefix::try_from(raw as u32).map_err(Self::map_err) - } - fn to_raw_sql(self) -> Self::Raw { - i64::from(self.inner()) - } -} - -impl SqlTypeConvert for NoteExecutionMode { - type Raw = i32; - - #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { - #[derive(Debug, thiserror::Error)] - #[error("valid values are 0 or 1 but found {0}")] - struct ValueError(i32); - - Ok(match raw { - 0 => Self::Network, - 1 => Self::Local, - invalid => { - return Err(Self::map_err(ValueError(invalid))); - }, - }) - } - - #[inline(always)] - fn to_raw_sql(self) -> Self::Raw { - match self { - NoteExecutionMode::Network => 0, - NoteExecutionMode::Local => 1, - } - } +/// Converts a network account ID to its 30-bit prefix for database indexing. +#[inline(always)] +pub(crate) fn network_account_id_to_prefix_sql(id: NetworkAccountId) -> i64 { + i64::from(id.prefix()) } impl SqlTypeConvert for NoteTag { @@ -122,12 +90,12 @@ impl SqlTypeConvert for NoteTag { #[inline(always)] fn from_raw_sql(raw: Self::Raw) -> Result { #[allow(clippy::cast_sign_loss)] - Ok(NoteTag::from(raw as u32)) + Ok(NoteTag::new(raw as u32)) } #[inline(always)] fn to_raw_sql(self) -> Self::Raw { - u32::from(self) as i32 + self.as_u32() as i32 } } @@ -212,24 +180,6 @@ pub(crate) fn note_type_to_raw_sql(note_type: u8) -> i32 { i32::from(note_type) } -#[inline(always)] -pub(crate) fn raw_sql_to_execution_hint(raw: i64) -> u64 { - raw as u64 -} -#[inline(always)] -pub(crate) fn execution_hint_to_raw_sql(hint: u64) -> i64 { - hint as i64 -} - -#[inline(always)] -pub(crate) fn raw_sql_to_aux(raw: i64) -> Felt { - Felt::try_from(raw as u64).unwrap() -} -#[inline(always)] -pub(crate) fn aux_to_raw_sql(hint: Felt) -> i64 { - hint.inner() as i64 -} - #[inline(always)] pub(crate) fn raw_sql_to_idx(raw: i32) -> usize { raw as usize diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index f23ecf7933..5c049916e5 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -44,7 +44,12 @@ use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; use crate::COMPONENT; -use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; +use crate::db::models::conv::{ + SqlTypeConvert, + network_account_id_to_prefix_sql, + nonce_to_raw_sql, + raw_sql_to_nonce, +}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; @@ -917,7 +922,7 @@ pub(crate) fn upsert_accounts( accounts: &[BlockAccountUpdate], block_num: BlockNumber, ) -> Result { - use proto::domain::account::NetworkAccountPrefix; + use proto::domain::account::NetworkAccountId; let mut count = 0; for update in accounts { @@ -925,8 +930,8 @@ pub(crate) fn upsert_accounts( let account_id_bytes = account_id.to_bytes(); let block_num_raw = block_num.to_raw_sql(); - let network_account_id_prefix = if account_id.is_network() { - Some(NetworkAccountPrefix::try_from(account_id)?) + let network_account_id = if account_id.is_network() { + Some(NetworkAccountId::try_from(account_id)?) } else { None }; @@ -1056,8 +1061,7 @@ pub(crate) fn upsert_accounts( let account_value = AccountRowInsert { account_id: account_id_bytes, - network_account_id_prefix: network_account_id_prefix - .map(NetworkAccountPrefix::to_raw_sql), + network_account_id_prefix: network_account_id.map(network_account_id_to_prefix_sql), account_commitment: update.final_state_commitment().to_bytes(), block_num: block_num_raw, nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 2df6309877..9206311a15 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -35,7 +35,7 @@ use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, Word}; -use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::code_builder::CodeBuilder; use super::*; @@ -151,7 +151,7 @@ fn create_test_account_with_storage() -> (Account, AccountId) { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -410,7 +410,7 @@ fn test_upsert_accounts_updates_is_latest_flag() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component_2) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -503,7 +503,7 @@ fn test_upsert_accounts_with_multiple_storage_slots() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -565,7 +565,7 @@ fn test_upsert_accounts_with_empty_storage() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 9204adea71..f68d5447b7 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -31,14 +31,14 @@ use miden_node_utils::limiter::{ QueryParamNoteCommitmentLimit, QueryParamNoteTagLimit, }; +use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::block::{BlockNoteIndex, BlockNumber}; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ NoteAssets, + NoteAttachment, NoteDetails, - NoteExecutionHint, - NoteExecutionMode, NoteId, NoteInclusionProof, NoteInputs, @@ -50,13 +50,11 @@ use miden_protocol::note::{ Nullifier, }; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{Felt, Word}; +use miden_standards::note::NetworkAccountTarget; use crate::COMPONENT; use crate::db::models::conv::{ SqlTypeConvert, - aux_to_raw_sql, - execution_hint_to_raw_sql, idx_to_raw_sql, note_type_to_raw_sql, raw_sql_to_idx, @@ -97,8 +95,7 @@ use crate::errors::NoteSyncError; /// note_type, /// sender, /// tag, -/// aux, -/// execution_hint, +/// attachment, /// inclusion_path /// FROM /// notes @@ -185,8 +182,7 @@ pub(crate) fn select_notes_since_block_by_tag_and_sender( /// notes.note_type, /// notes.sender, /// notes.tag, -/// notes.aux, -/// notes.execution_hint, +/// notes.attachment, /// notes.assets, /// notes.inputs, /// notes.serial_num, @@ -266,8 +262,7 @@ pub(crate) fn select_existing_note_commitments( /// notes.note_type, /// notes.sender, /// notes.tag, -/// notes.aux, -/// notes.execution_hint, +/// notes.attachment, /// notes.assets, /// notes.inputs, /// notes.serial_num, @@ -411,8 +406,7 @@ pub(crate) fn select_note_script_by_root( /// notes.note_type, /// notes.sender, /// notes.tag, -/// notes.aux, -/// notes.execution_hint, +/// notes.attachment, /// notes.assets, /// notes.inputs, /// notes.serial_num, @@ -422,7 +416,7 @@ pub(crate) fn select_note_script_by_root( /// FROM notes /// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root /// WHERE -/// execution_mode = 0 AND tag = ?1 AND +/// is_single_target_network_note = TRUE AND tag = ?1 AND /// committed_at <= ?2 AND /// (consumed_at IS NULL OR consumed_at > ?2) AND notes.rowid >= ?3 /// ORDER BY notes.rowid ASC @@ -442,12 +436,6 @@ pub(crate) fn select_unconsumed_network_notes_by_tag( block_num: BlockNumber, mut page: Page, ) -> Result<(Vec, Page), DatabaseError> { - assert_eq!( - NoteExecutionMode::Network as u8, - 0, - "Hardcoded execution value must match query" - ); - let rowid_sel = diesel::dsl::sql::("notes.rowid"); let rowid_sel_ge = diesel::dsl::sql::("notes.rowid >= ") @@ -486,7 +474,7 @@ pub(crate) fn select_unconsumed_network_notes_by_tag( rowid_sel.clone(), ), ) - .filter(schema::notes::execution_mode.eq(NoteExecutionMode::Network.to_raw_sql())) + .filter(schema::notes::is_single_target_network_note.eq(true)) .filter(schema::notes::tag.eq(tag as i32)) .filter(schema::notes::committed_at.le(block_num.to_raw_sql())) .filter( @@ -590,8 +578,7 @@ pub struct NoteRecordWithScriptRawJoined { pub note_type: i32, pub sender: Vec, // AccountId pub tag: i32, - pub aux: i64, - pub execution_hint: i64, + pub attachment: Vec, // #[diesel(embed)] // pub metadata: NoteMetadataRaw, pub assets: Option>, @@ -615,8 +602,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined note_type, sender, tag, - aux, - execution_hint, + attachment, assets, inputs, serial_num, @@ -631,8 +617,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined note_type, sender, tag, - aux, - execution_hint, + attachment, assets, inputs, serial_num, @@ -659,8 +644,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { note_type, sender, tag, - execution_hint, - aux, + attachment, // metadata ^^^, assets, inputs, @@ -671,13 +655,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { .. } = raw; let index = BlockNoteIndexRawRow { batch_index, note_index }; - let metadata = NoteMetadataRawRow { - note_type, - sender, - tag, - aux, - execution_hint, - }; + let metadata = NoteMetadataRawRow { note_type, sender, tag, attachment }; let details = NoteDetailsRawRow { assets, inputs, serial_num }; let metadata = metadata.try_into()?; @@ -730,8 +708,7 @@ pub struct NoteRecordRawRow { pub note_type: i32, pub sender: Vec, // AccountId pub tag: i32, - pub aux: i64, - pub execution_hint: i64, + pub attachment: Vec, pub assets: Option>, pub inputs: Option>, @@ -747,8 +724,7 @@ pub struct NoteMetadataRawRow { note_type: i32, sender: Vec, // AccountId tag: i32, - aux: i64, - execution_hint: i64, + attachment: Vec, } #[allow(clippy::cast_sign_loss)] @@ -758,11 +734,9 @@ impl TryInto for NoteMetadataRawRow { let sender = AccountId::read_from_bytes(&self.sender[..])?; let note_type = NoteType::try_from(self.note_type as u32) .map_err(DatabaseError::conversiont_from_sql::)?; - let tag = NoteTag::from(self.tag as u32); - let execution_hint = NoteExecutionHint::try_from(self.execution_hint as u64) - .map_err(DatabaseError::conversiont_from_sql::)?; - let aux = Felt::new(self.aux as u64); - Ok(NoteMetadata::new(sender, note_type, tag, execution_hint, aux)?) + let tag = NoteTag::new(self.tag as u32); + let attachment = NoteAttachment::read_from_bytes(&self.attachment)?; + Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) } } @@ -867,21 +841,26 @@ pub struct NoteInsertRowInsert { pub note_type: i32, pub sender: Vec, // AccountId pub tag: i32, - pub aux: i64, - pub execution_hint: i64, + pub attachment: Vec, pub consumed_at: Option, pub assets: Option>, pub inputs: Option>, pub serial_num: Option>, pub nullifier: Option>, pub script_root: Option>, - pub execution_mode: i32, + pub is_single_target_network_note: bool, pub inclusion_path: Vec, } impl From<(NoteRecord, Option)> for NoteInsertRowInsert { fn from((note, nullifier): (NoteRecord, Option)) -> Self { + let attachment = note.metadata.attachment(); + + let is_single_target_network_note = NetworkAccountTarget::try_from(attachment).is_ok(); + + let attachment_bytes = attachment.to_bytes(); + Self { committed_at: note.block_num.to_raw_sql(), batch_index: idx_to_raw_sql(note.note_index.batch_idx()), @@ -891,12 +870,11 @@ impl From<(NoteRecord, Option)> for NoteInsertRowInsert { note_type: note_type_to_raw_sql(note.metadata.note_type() as u8), sender: note.metadata.sender().to_bytes(), tag: note.metadata.tag().to_raw_sql(), - execution_mode: note.metadata.tag().execution_mode().to_raw_sql(), - aux: aux_to_raw_sql(note.metadata.aux()), - execution_hint: execution_hint_to_raw_sql(note.metadata.execution_hint().into()), + is_single_target_network_note, + attachment: attachment_bytes, inclusion_path: note.inclusion_path.to_bytes(), consumed_at: None::, // New notes are always unconsumed. - nullifier: nullifier.as_ref().map(Nullifier::to_bytes), /* Beware: `Option` also implements `to_bytes`, but this is not what you want. */ + nullifier: nullifier.as_ref().map(Nullifier::to_bytes), assets: note.details.as_ref().map(|d| d.assets().to_bytes()), inputs: note.details.as_ref().map(|d| d.inputs().to_bytes()), script_root: note.details.as_ref().map(|d| d.script().root().to_bytes()), diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 6bf6af3cf6..e9333057c9 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -67,9 +67,8 @@ diesel::table! { note_type -> Integer, sender -> Binary, tag -> Integer, - execution_mode -> Integer, - aux -> BigInt, - execution_hint -> BigInt, + is_single_target_network_note -> Bool, + attachment -> Binary, inclusion_path -> Binary, consumed_at -> Nullable, nullifier -> Nullable, diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 3988e160da..5d3785b456 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -39,6 +39,7 @@ use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::crypto::rand::RpoRandomCoin; use miden_protocol::note::{ Note, + NoteAttachment, NoteDetails, NoteExecutionHint, NoteHeader, @@ -64,10 +65,10 @@ use miden_protocol::transaction::{ TransactionId, }; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word, ZERO}; -use miden_standards::account::auth::AuthRpoFalcon512; +use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; +use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::code_builder::CodeBuilder; -use miden_standards::note::create_p2id_note; +use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; use pretty_assertions::assert_eq; use rand::Rng; @@ -225,7 +226,7 @@ pub fn create_note(account_id: AccountId) -> Note { FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 10).unwrap(), )], NoteType::Public, - Felt::default(), + NoteAttachment::default(), &mut *rng, ) .expect("Failed to create note") @@ -257,7 +258,7 @@ fn sql_select_notes() { note_index: BlockNoteIndex::new(0, i.try_into().unwrap()).unwrap(), note_id: num_to_word(u64::try_from(i).unwrap()), note_commitment: num_to_word(u64::try_from(i).unwrap()), - metadata: *new_note.metadata(), + metadata: new_note.metadata().clone(), details: Some(NoteDetails::from(&new_note)), inclusion_path: SparseMerklePath::default(), }; @@ -280,108 +281,6 @@ fn sql_select_notes() { } } -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_select_notes_different_execution_hints() { - let mut conn = create_db(); - let conn = &mut conn; - - let block_num = 1.into(); - create_block(conn, block_num); - - // test querying empty table - let notes = queries::select_all_notes(conn).unwrap(); - assert!(notes.is_empty()); - - let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); - - queries::upsert_accounts(conn, &[mock_block_account_update(sender, 0)], block_num).unwrap(); - - // test multiple entries - let mut state = vec![]; - - let new_note = create_note(sender); - - let note_none = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, 0).unwrap(), - note_id: num_to_word(0), - note_commitment: num_to_word(0), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - 0.into(), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: Some(NoteDetails::from(&new_note)), - inclusion_path: SparseMerklePath::default(), - }; - state.push(note_none.clone()); - - queries::insert_scripts(conn, [¬e_none]).unwrap(); // only necessary for the first note - let res = queries::insert_notes(conn, &[(note_none, None)]); - assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - - let note_id = NoteId::from_raw(num_to_word(0)); - let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; - - assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::none()); - - let note_always = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, 1).unwrap(), - note_id: num_to_word(1), - note_commitment: num_to_word(1), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - 0.into(), - NoteExecutionHint::always(), - Felt::default(), - ) - .unwrap(), - details: Some(NoteDetails::from(&new_note)), - inclusion_path: SparseMerklePath::default(), - }; - state.push(note_always.clone()); - - let res = queries::insert_notes(conn, &[(note_always, None)]); - assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - - let note_id = NoteId::from_raw(num_to_word(1)); - let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; - assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::always()); - - let note_after_block = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, 2).unwrap(), - note_id: num_to_word(2), - note_commitment: num_to_word(2), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - 2.into(), - NoteExecutionHint::after_block(12.into()).unwrap(), - Felt::default(), - ) - .unwrap(), - details: Some(NoteDetails::from(&new_note)), - inclusion_path: SparseMerklePath::default(), - }; - state.push(note_after_block.clone()); - - let res = queries::insert_notes(conn, &[(note_after_block, None)]); - assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note_id = NoteId::from_raw(num_to_word(2)); - let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; - assert_eq!( - note.metadata.execution_hint(), - NoteExecutionHint::after_block(12.into()).unwrap() - ); -} - #[test] #[miden_node_test_macro::enable_logging] fn sql_select_note_script_by_root() { @@ -403,7 +302,7 @@ fn sql_select_note_script_by_root() { note_index: BlockNoteIndex::new(0, 0.try_into().unwrap()).unwrap(), note_id: num_to_word(0), note_commitment: num_to_word(0), - metadata: *new_note.metadata(), + metadata: new_note.metadata().clone(), details: Some(NoteDetails::from(&new_note)), inclusion_path: SparseMerklePath::default(), }; @@ -466,6 +365,11 @@ fn sql_unconsumed_network_notes() { create_block(&mut conn, 0.into()); create_block(&mut conn, 1.into()); + // Create a NetworkAccountTarget attachment for the network account + let target = NetworkAccountTarget::new(account_note.0, NoteExecutionHint::Always) + .expect("NetworkAccountTarget creation should succeed for network account"); + let attachment: NoteAttachment = target.into(); + // Create an unconsumed note in each block. let notes = Vec::from_iter((0..2).map(|i: u32| { let note = NoteRecord { @@ -476,11 +380,9 @@ fn sql_unconsumed_network_notes() { metadata: NoteMetadata::new( account_note.0, NoteType::Public, - NoteTag::from_account_id(account_note.0), - NoteExecutionHint::none(), - Felt::default(), + NoteTag::with_account_target(account_note.0), ) - .unwrap(), + .with_attachment(attachment.clone()), details: None, inclusion_path: SparseMerklePath::default(), }; @@ -493,7 +395,7 @@ fn sql_unconsumed_network_notes() { (0..2).for_each(|i: u32| { let (result, _) = queries::select_unconsumed_network_notes_by_tag( &mut conn, - NoteTag::from_account_id(account_note.0).into(), + NoteTag::with_account_target(account_note.0).into(), i.into(), Page { token: None, @@ -510,7 +412,7 @@ fn sql_unconsumed_network_notes() { // Query against first block should return both notes. let (result, _) = queries::select_unconsumed_network_notes_by_tag( &mut conn, - NoteTag::from_account_id(account_note.0).into(), + NoteTag::with_account_target(account_note.0).into(), 0.into(), Page { token: None, @@ -523,7 +425,7 @@ fn sql_unconsumed_network_notes() { // Query against second block should return only first note. let (result, _) = queries::select_unconsumed_network_notes_by_tag( &mut conn, - NoteTag::from_account_id(account_note.0).into(), + NoteTag::with_account_target(account_note.0).into(), 1.into(), Page { token: None, @@ -1016,12 +918,10 @@ fn notes() { let new_note = create_note(sender); let note_index = BlockNoteIndex::new(0, 2).unwrap(); let tag = 5u32; - let note_metadata = - NoteMetadata::new(sender, NoteType::Public, tag.into(), NoteExecutionHint::none(), ZERO) - .unwrap(); + let note_metadata = NoteMetadata::new(sender, NoteType::Public, tag.into()); - let values = [(note_index, new_note.id(), note_metadata)]; - let notes_db = BlockNoteTree::with_entries(values.iter().copied()).unwrap(); + let values = [(note_index, new_note.id(), ¬e_metadata)]; + let notes_db = BlockNoteTree::with_entries(values).unwrap(); let inclusion_path = notes_db.open(note_index); let note = NoteRecord { @@ -1029,14 +929,7 @@ fn notes() { note_index, note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - tag.into(), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), + metadata: NoteMetadata::new(sender, NoteType::Public, tag.into()), details: Some(NoteDetails::from(&new_note)), inclusion_path: inclusion_path.clone(), }; @@ -1076,7 +969,7 @@ fn notes() { note_index: note.note_index, note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: note.metadata, + metadata: note.metadata.clone(), details: None, inclusion_path: inclusion_path.clone(), }; @@ -1345,7 +1238,7 @@ fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap() } @@ -1364,14 +1257,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader Word::try_from([num, num, 0, 0]).unwrap(), Word::try_from([0, 0, num, num]).unwrap(), ), - NoteMetadata::new( - account_id, - NoteType::Public, - NoteTag::LocalAny(num as u32), - NoteExecutionHint::None, - Felt::default(), - ) - .unwrap(), + NoteMetadata::new(account_id, NoteType::Public, NoteTag::new(num as u32)), )]; TransactionHeader::new_unchecked( @@ -1447,7 +1333,7 @@ fn mock_account_code_and_storage( .storage_mode(storage_mode) .with_assets(assets) .with_component(account_component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap() } @@ -1608,7 +1494,7 @@ fn genesis_with_account_assets() { .storage_mode(AccountStorageMode::Public) .with_component(account_component) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1657,7 +1543,7 @@ fn genesis_with_account_storage_map() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1704,7 +1590,7 @@ fn genesis_with_account_assets_and_storage() { .storage_mode(AccountStorageMode::Public) .with_component(account_component) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1735,7 +1621,7 @@ fn genesis_with_multiple_accounts() { .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component1) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1754,7 +1640,7 @@ fn genesis_with_multiple_accounts() { .storage_mode(AccountStorageMode::Public) .with_component(account_component2) .with_assets([fungible_asset.into()]) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1777,7 +1663,7 @@ fn genesis_with_multiple_accounts() { .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component3) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -1940,15 +1826,8 @@ fn serialization_symmetry_note_metadata() { let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type // bits - let tag = NoteTag::from_account_id(sender); - let metadata = NoteMetadata::new( - sender, - NoteType::Public, - tag, - NoteExecutionHint::always(), - Felt::new(42), - ) - .unwrap(); + let tag = NoteTag::with_account_target(sender); + let metadata = NoteMetadata::new(sender, NoteType::Public, tag); let bytes = metadata.to_bytes(); let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); @@ -2086,7 +1965,7 @@ fn db_roundtrip_notes() { note_index, note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: *new_note.metadata(), + metadata: new_note.metadata().clone(), details: Some(NoteDetails::from(&new_note)), inclusion_path: SparseMerklePath::default(), }; @@ -2280,7 +2159,7 @@ fn db_roundtrip_account_storage_with_maps() { .account_type(AccountType::RegularAccountUpdatableCode) .storage_mode(AccountStorageMode::Public) .with_component(account_component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() .unwrap(); @@ -2348,3 +2227,57 @@ fn db_roundtrip_account_storage_with_maps() { "Full account commitment must match after DB roundtrip" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn test_note_metadata_with_attachment_roundtrip() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let (account_id, _) = + make_account_and_note(&mut conn, block_num, [1u8; 32], AccountStorageMode::Network); + + let target = NetworkAccountTarget::new(account_id, NoteExecutionHint::Always) + .expect("NetworkAccountTarget creation should succeed for network account"); + let attachment: NoteAttachment = target.into(); + + // Create NoteMetadata with the attachment + let metadata = + NoteMetadata::new(account_id, NoteType::Public, NoteTag::with_account_target(account_id)) + .with_attachment(attachment.clone()); + + let note = NoteRecord { + block_num, + note_index: BlockNoteIndex::new(0, 0).unwrap(), + note_id: num_to_word(1), + note_commitment: num_to_word(1), + metadata: metadata.clone(), + details: None, + inclusion_path: SparseMerklePath::default(), + }; + + queries::insert_scripts(&mut conn, [¬e]).unwrap(); + queries::insert_notes(&mut conn, &[(note.clone(), None)]).unwrap(); + + // Fetch the note back and verify the attachment is preserved + let retrieved = queries::select_notes_by_id(&mut conn, &[NoteId::from_raw(note.note_id)]) + .expect("select_notes_by_id should succeed"); + + assert_eq!(retrieved.len(), 1, "Should retrieve exactly one note"); + + let retrieved_metadata = &retrieved[0].metadata; + assert_eq!( + retrieved_metadata.attachment(), + metadata.attachment(), + "Attachment should be preserved after DB roundtrip" + ); + + let retrieved_target = NetworkAccountTarget::try_from(retrieved_metadata.attachment()) + .expect("Should be able to parse NetworkAccountTarget from retrieved attachment"); + assert_eq!( + retrieved_target.target_id(), + account_id, + "NetworkAccountTarget should have the correct target account ID" + ); +} diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 32c345a985..a842c449d4 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -6,14 +6,13 @@ use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_node_utils::limiter::QueryLimitError; +use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::MerkleError; use miden_protocol::crypto::merkle::mmr::MmrError; use miden_protocol::crypto::utils::DeserializationError; -use miden_protocol::note::{NoteId, Nullifier}; -use miden_protocol::transaction::OutputNote; -use miden_protocol::{ +use miden_protocol::errors::{ AccountDeltaError, AccountError, AccountTreeError, @@ -23,8 +22,9 @@ use miden_protocol::{ NoteError, NullifierTreeError, StorageMapError, - Word, }; +use miden_protocol::note::{NoteId, Nullifier}; +use miden_protocol::transaction::OutputNote; use thiserror::Error; use tokio::sync::oneshot::error::RecvError; use tonic::Status; diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index e4eb4810ed..b39495c872 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,6 +1,12 @@ use miden_protocol::account::AccountId; +use miden_protocol::errors::{ + AccountDeltaError, + AccountError, + AssetError, + FeeError, + TokenSymbolError, +}; use miden_protocol::utils::DeserializationError; -use miden_protocol::{AccountError, AssetError, FeeError, TokenSymbolError}; use miden_standards::account::faucets::FungibleFaucetError; use miden_standards::account::wallets::BasicWalletError; @@ -16,7 +22,7 @@ pub enum GenesisConfigError { #[error("asset translation from config to state failed")] Asset(#[from] AssetError), #[error("adding assets to account failed")] - AccountDelta(#[from] miden_protocol::AccountDeltaError), + AccountDelta(#[from] AccountDeltaError), #[error("the defined asset {symbol:?} has no corresponding faucet")] MissingFaucetDefinition { symbol: TokenSymbolStr }, #[error("account with id {account_id} was referenced but is not part of given genesis state")] diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index ed0c0077cb..8d1a83437b 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -23,9 +23,10 @@ use miden_protocol::account::{ use miden_protocol::asset::{FungibleAsset, TokenSymbol}; use miden_protocol::block::FeeParameters; use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey as RpoSecretKey; -use miden_protocol::{Felt, FieldElement, ONE, TokenSymbolError, ZERO}; +use miden_protocol::errors::TokenSymbolError; +use miden_protocol::{Felt, FieldElement, ONE, ZERO}; use miden_standards::AuthScheme; -use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::account::faucets::BasicFungibleFaucet; use miden_standards::account::wallets::create_basic_wallet; use rand::distr::weighted::Weight; @@ -159,7 +160,7 @@ impl GenesisConfig { let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }; + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; let init_seed: [u8; 32] = rng.random(); let account_type = if has_updatable_code { @@ -346,7 +347,7 @@ impl FungibleFaucetConfig { } = self; let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthRpoFalcon512::new(secret_key.public_key().into()); + let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); let init_seed: [u8; 32] = rng.random(); let max_supply = Felt::try_from(max_supply) @@ -451,7 +452,7 @@ impl AccountSecrets { .get(&account_id) .ok_or(GenesisConfigError::MissingGenesisAccount { account_id })?; let account_file = - AccountFile::new(account.clone(), vec![AuthSecretKey::RpoFalcon512(secret_key)]); + AccountFile::new(account.clone(), vec![AuthSecretKey::Falcon512Rpo(secret_key)]); Ok(AccountFileWithName { name, account_file }) }) } diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 40f1ae5b39..f08e327284 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -1,6 +1,6 @@ use std::num::{NonZero, TryFromIntError}; -use miden_node_proto::domain::account::{AccountInfo, NetworkAccountPrefix}; +use miden_node_proto::domain::account::{AccountInfo, validate_network_account_prefix}; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::store::ntx_builder_server; use miden_node_proto::generated::{self as proto}; @@ -91,13 +91,13 @@ impl ntx_builder_server::NtxBuilder for StoreApi { let request = request.into_inner(); // Validate that the call is for a valid network account prefix - let prefix = NetworkAccountPrefix::try_from(request.account_id_prefix).map_err(|err| { + let prefix = validate_network_account_prefix(request.account_id_prefix).map_err(|err| { Status::invalid_argument( err.as_report_context("request does not contain a valid network account prefix"), ) })?; let account_info: Option = - self.state.get_network_account_details_by_prefix(prefix.inner()).await?; + self.state.get_network_account_details_by_prefix(prefix).await?; Ok(Response::new(proto::store::MaybeAccountDetails { details: account_info.map(|acc| (&acc).into()), @@ -117,8 +117,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { ) -> Result, Status> { let request = request.into_inner(); let block_num = BlockNumber::from(request.block_num); - let network_account_id_prefix = - NetworkAccountPrefix::try_from(request.network_account_id_prefix).map_err(|err| { + let network_account_prefix = + validate_network_account_prefix(request.network_account_id_prefix).map_err(|err| { invalid_argument(err.as_report_context("invalid network_account_id_prefix")) })?; @@ -132,7 +132,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created // instead let (notes, next_page) = state - .get_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) + .get_unconsumed_network_notes_for_account(network_account_prefix, block_num, page) .await .map_err(internal_error)?; diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index a798defcdd..d33a9aaf41 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -17,7 +17,6 @@ use miden_node_proto::domain::account::{ AccountStorageDetails, AccountStorageMapDetails, AccountVaultDetails, - NetworkAccountPrefix, SlotData, StorageMapRequest, }; @@ -367,7 +366,7 @@ impl State { note_index, note_id: note.id().as_word(), note_commitment: note.commitment(), - metadata: *note.metadata(), + metadata: note.metadata().clone(), details, inclusion_path, }; @@ -1207,12 +1206,12 @@ impl State { /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( &self, - network_account_id_prefix: NetworkAccountPrefix, + network_account_prefix: u32, block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page), DatabaseError> { self.db - .select_unconsumed_network_notes(network_account_id_prefix, block_num, page) + .select_unconsumed_network_notes(network_account_prefix, block_num, page) .await } diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index 68744010ff..416b2beb92 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use miden_protocol::ProposedBlockError; use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::errors::ProposedBlockError; use miden_protocol::transaction::TransactionId; use crate::server::ValidatedTransactions; diff --git a/crates/validator/src/tx_validation/data_store.rs b/crates/validator/src/tx_validation/data_store.rs index a48c2e8e6c..749ddaac1c 100644 --- a/crates/validator/src/tx_validation/data_store.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -58,37 +58,11 @@ impl DataStore for TransactionInputsDataStore { fn get_vault_asset_witnesses( &self, - account_id: AccountId, - vault_root: Word, - vault_keys: BTreeSet, + _account_id: AccountId, + _vault_root: Word, + _vault_keys: BTreeSet, ) -> impl FutureMaybeSend, DataStoreError>> { - async move { - if self.tx_inputs.account().id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - if self.tx_inputs.account().vault().root() != vault_root { - return Err(DataStoreError::Other { - error_msg: "vault root mismatch".into(), - source: None, - }); - } - - Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { - match self.tx_inputs.account().vault().open(vault_key) { - Ok(vault_proof) => { - AssetWitness::new(vault_proof.into()).map_err(|err| DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(err.into()), - }) - }, - Err(err) => Err(DataStoreError::Other { - error_msg: "failed to open vault".into(), - source: Some(err.into()), - }), - } - })) - } + std::future::ready(Ok(vec![])) } fn get_storage_map_witness( diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index 1177f350c8..ac125daa06 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -32,13 +32,10 @@ message NoteMetadata { // See `miden_protocol::note::note_tag` for more info. fixed32 tag = 3; - // Specifies when a note is ready to be consumed. + // Serialized note attachment // - // See `miden_protocol::note::execution_hint` for more info. - fixed64 execution_hint = 4; - - // An arbitrary user-defined value. - fixed64 aux = 5; + // See `miden_protocol::note::NoteAttachment` for more info. + bytes attachment = 4; } // Represents a note. From f18e2c07c39935d34ff60a9c035350917da22496 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 20 Jan 2026 16:41:55 -0300 Subject: [PATCH 103/125] chore: use miden-base release (#1555) --- Cargo.lock | 77 +++++++++++++++++++++++++----------------------------- Cargo.toml | 12 ++++----- 2 files changed, 41 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15ad3a6f49..aa6572578d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -1366,7 +1355,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1711,9 +1700,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -2144,7 +2130,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2513,8 +2499,9 @@ dependencies = [ [[package]] name = "miden-agglayer" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e78bbff1742ac44ddabcca7840c7ca85b1955c0d262df3bcd2db7a0b1341aa" dependencies = [ "fs-err", "miden-assembly", @@ -2581,8 +2568,9 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11e7778b6851a6348795e168b9423a4d38bd6f89d3d0acf64a1626c12917b9bd" dependencies = [ "miden-protocol", "thiserror 2.0.17", @@ -3075,8 +3063,9 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef3b133f424dbbca4ae1576258e695059f96746fc3d03aa2d0a9cc009648c0f" dependencies = [ "bech32", "fs-err", @@ -3105,8 +3094,9 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be64f4f5206eae03c83b9825b694bf5cf1176802bf664010fbc32a3fc726822" dependencies = [ "proc-macro2", "quote", @@ -3195,8 +3185,9 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8250694a887e0bbb18275e4ce5bc1dd7a3cc72fe6a8e1e47a7b46bace3cb51fe" dependencies = [ "fs-err", "miden-assembly", @@ -3212,8 +3203,9 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5dc1d6cef01839ef521f3e59fbc19731efad64260fcbc877079fc8095a258" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3228,14 +3220,14 @@ dependencies = [ "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", - "thiserror 2.0.17", "winterfell", ] [[package]] name = "miden-tx" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8136d02b3b83d8f9c2fe6bdc3a33f63951034c1b613351920056825a752f200" dependencies = [ "miden-processor", "miden-protocol", @@ -3247,8 +3239,9 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.14.0" -source = "git+https://github.com/0xMiden/miden-base.git?branch=next#99c6b5116a88cb3efc54bd8250a06ae082e9648f" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94581bf94b0e7cd5efeaad9e590c49714c075d054f4b19ca60eb2e43ae893ad" dependencies = [ "miden-protocol", "miden-tx", @@ -3501,7 +3494,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3897,7 +3890,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3933,7 +3926,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -4023,7 +4016,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -4055,7 +4048,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4875,7 +4868,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5508,7 +5501,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5517,7 +5510,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -6522,7 +6515,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6acc89250f..cf690b306c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,12 +49,12 @@ miden-node-validator = { path = "crates/validator", version = "0.13" } miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } -miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } -miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } -miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } -miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } -miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-block-prover = { version = "0.13" } +miden-protocol = { default-features = false, version = "0.13" } +miden-standards = { version = "0.13" } +miden-testing = { version = "0.13" } +miden-tx = { default-features = false, version = "0.13" } +miden-tx-batch-prover = { version = "0.13" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } From 665ea0f2eb78b85948face53ff8394946ed12ce4 Mon Sep 17 00:00:00 2001 From: igamigo Date: Tue, 20 Jan 2026 18:22:02 -0300 Subject: [PATCH 104/125] chore: bump crypto (#1557) --- Cargo.lock | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa6572578d..d32c2ff646 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -1700,6 +1711,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -2617,9 +2631,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc7981c1d907bb9864e24f2bd6304c4fca03a41fc4606c09edd6a7f5a8fc80fc" +checksum = "f0b49de9b0d8370c992ee04791f68a4509078198b6f42e5f72a262e7d4456487" dependencies = [ "blake3", "cc", @@ -3890,7 +3904,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3926,7 +3940,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -4016,7 +4030,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -4048,7 +4062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.1", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] From 976566a9e2b84559713b61e7b148ed548fdb410e Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Wed, 21 Jan 2026 11:12:12 +1300 Subject: [PATCH 105/125] feat(validator): FPI support in data store (#1493) --- CHANGELOG.md | 1 + .../validator/src/tx_validation/data_store.rs | 35 +++++++++++-------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c8fdd662b..2f623843a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). - Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). +- Add foreign account support to validator ([#1493](https://github.com/0xMiden/miden-node/pull/1493)). - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). - Limit number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). diff --git a/crates/validator/src/tx_validation/data_store.rs b/crates/validator/src/tx_validation/data_store.rs index 749ddaac1c..b2c9093d4f 100644 --- a/crates/validator/src/tx_validation/data_store.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -53,34 +53,39 @@ impl DataStore for TransactionInputsDataStore { foreign_account_id: AccountId, _ref_block: BlockNumber, ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } + async move { + self.tx_inputs.read_foreign_account_inputs(foreign_account_id).map_err(|err| { + DataStoreError::other_with_source("failed to read foreign account inputs", err) + }) + } } fn get_vault_asset_witnesses( &self, _account_id: AccountId, - _vault_root: Word, - _vault_keys: BTreeSet, + vault_root: Word, + vault_keys: BTreeSet, ) -> impl FutureMaybeSend, DataStoreError>> { - std::future::ready(Ok(vec![])) + async move { + // Retrieve native and foreign account asset witnesses from the advice inputs. + self.tx_inputs + .read_vault_asset_witnesses(vault_root, vault_keys) + .map_err(|err| { + DataStoreError::other_with_source("failed to read vault asset witnesses", err) + }) + } } fn get_storage_map_witness( &self, - account_id: AccountId, + _account_id: AccountId, _map_root: Word, _map_key: Word, ) -> impl FutureMaybeSend> { async move { - if self.tx_inputs.account().id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - // For partial accounts, storage map witness is not available. - Err(DataStoreError::Other { - error_msg: "storage map witness not available with partial account state".into(), - source: None, - }) + unimplemented!( + "get_storage_map_witness is not used during re-execution of transactions" + ) } } @@ -88,7 +93,7 @@ impl DataStore for TransactionInputsDataStore { &self, _script_root: Word, ) -> impl FutureMaybeSend, DataStoreError>> { - async move { Ok(None) } + async move { unimplemented!("get_note_script is not used during re-execution of transactions") } } } From de475f6df16e6a3c567459c5605faa22b4def2be Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Tue, 20 Jan 2026 19:18:24 -0300 Subject: [PATCH 106/125] fix: add attachment to network monitor increase counter note (#1559) --- bin/network-monitor/src/counter.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 86eb7a2f21..c044267331 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -20,6 +20,8 @@ use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; use miden_protocol::note::{ Note, NoteAssets, + NoteAttachment, + NoteExecutionHint, NoteInputs, NoteMetadata, NoteRecipient, @@ -32,6 +34,7 @@ use miden_protocol::utils::Deserializable; use miden_protocol::{Felt, Word}; use miden_standards::account::interface::{AccountInterface, AccountInterfaceExt}; use miden_standards::code_builder::CodeBuilder; +use miden_standards::note::NetworkAccountTarget; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{LocalTransactionProver, TransactionExecutor}; @@ -849,11 +852,18 @@ fn create_network_note( script: NoteScript, rng: &mut ChaCha20Rng, ) -> Result<(Note, NoteRecipient)> { + // Create the NetworkAccountTarget attachment - this is required for the note to be + // recognized as a network note by the ntx-builder + let target = NetworkAccountTarget::new(counter_account.id(), NoteExecutionHint::Always) + .context("Failed to create NetworkAccountTarget for counter account")?; + let attachment: NoteAttachment = target.into(); + let metadata = NoteMetadata::new( wallet_account.id(), NoteType::Public, NoteTag::with_account_target(counter_account.id()), - ); + ) + .with_attachment(attachment); let serial_num = Word::new([ Felt::new(rng.random()), From e257902502d868e01cfb96171d2e603fc7fd3dfd Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Wed, 21 Jan 2026 12:58:02 +1300 Subject: [PATCH 107/125] feat: Add store endpoints GetVaultAssetWitnesses and GetStorageMapWitness (#1529) --- CHANGELOG.md | 1 + crates/proto/src/generated/store.rs | 249 +++++++++++++++++++++++++ crates/store/src/errors.rs | 12 +- crates/store/src/inner_forest/mod.rs | 98 ++++++++-- crates/store/src/server/ntx_builder.rs | 129 ++++++++++++- crates/store/src/state/mod.rs | 40 +++- proto/proto/internal/store.proto | 86 +++++++++ 7 files changed, 594 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f623843a0..66f6eb657f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). - Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). - Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). ### Changes diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index abbd8b25d5..dfd0a456a7 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -223,6 +223,94 @@ pub struct CurrentBlockchainData { #[prost(message, optional, tag = "2")] pub current_block_header: ::core::option::Option, } +/// Request for vault asset witnesses for a specific account. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VaultAssetWitnessesRequest { + /// The account ID for which to retrieve vault asset witnesses. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Set of asset vault keys to retrieve witnesses for. + #[prost(message, repeated, tag = "2")] + pub vault_keys: ::prost::alloc::vec::Vec, + /// The witnesses returned correspond to the account state at the specified block number. + /// + /// Optional block number. If not provided, uses the latest state. + /// + /// The specified block number should be relatively near the chain tip else an error will be + /// returned. + #[prost(fixed32, optional, tag = "3")] + pub block_num: ::core::option::Option, +} +/// Response containing vault asset witnesses. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VaultAssetWitnessesResponse { + /// Block number at which the witnesses were generated. + /// + /// The witnesses returned corresponds to the account state at the specified block number. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// List of asset witnesses. + #[prost(message, repeated, tag = "2")] + pub asset_witnesses: ::prost::alloc::vec::Vec< + vault_asset_witnesses_response::VaultAssetWitness, + >, +} +/// Nested message and enum types in `VaultAssetWitnessesResponse`. +pub mod vault_asset_witnesses_response { + /// A vault asset witness containing the asset and its proof. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct VaultAssetWitness { + /// The SMT opening proof for the asset's inclusion in the vault. + #[prost(message, optional, tag = "1")] + pub proof: ::core::option::Option, + } +} +/// Request for a storage map witness for a specific account and storage entry. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StorageMapWitnessRequest { + /// The account ID for which to retrieve the storage map witness. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// The raw, user-provided storage map key for which to retrieve the witness. + #[prost(message, optional, tag = "2")] + pub map_key: ::core::option::Option, + /// Optional block number. If not provided, uses the latest state. + /// + /// The witness returned corresponds to the account state at the specified block number. + /// + /// Optional block number. If not provided, uses the latest state. + /// + /// The specified block number should be relatively near the chain tip else an error will be + /// returned. + #[prost(fixed32, optional, tag = "3")] + pub block_num: ::core::option::Option, + /// The storage slot name for the map. + #[prost(string, tag = "4")] + pub slot_name: ::prost::alloc::string::String, +} +/// Response containing a storage map witness. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StorageMapWitnessResponse { + /// The storage map witness. + #[prost(message, optional, tag = "1")] + pub witness: ::core::option::Option, + /// Block number at which the witness was generated. + #[prost(fixed32, tag = "2")] + pub block_num: u32, +} +/// Nested message and enum types in `StorageMapWitnessResponse`. +pub mod storage_map_witness_response { + /// Storage map witness data. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageWitness { + /// The raw, user-provided storage map key. + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option, + /// The SMT opening proof for the key-value pair. + #[prost(message, optional, tag = "3")] + pub proof: ::core::option::Option, + } +} /// Generated client implementations. pub mod rpc_client { #![allow( @@ -2400,6 +2488,56 @@ pub mod ntx_builder_client { .insert(GrpcMethod::new("store.NtxBuilder", "GetNoteScriptByRoot")); self.inner.unary(req, path, codec).await } + /// Returns vault asset witnesses for the specified account. + pub async fn get_vault_asset_witnesses( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetVaultAssetWitnesses", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetVaultAssetWitnesses")); + self.inner.unary(req, path, codec).await + } + /// Returns a storage map witness for the specified account and storage map entry. + pub async fn get_storage_map_witness( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetStorageMapWitness", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetStorageMapWitness")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -2466,6 +2604,22 @@ pub mod ntx_builder_server { tonic::Response, tonic::Status, >; + /// Returns vault asset witnesses for the specified account. + async fn get_vault_asset_witnesses( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a storage map witness for the specified account and storage map entry. + async fn get_storage_map_witness( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Store API for the network transaction builder component #[derive(Debug)] @@ -2840,6 +2994,101 @@ pub mod ntx_builder_server { }; Box::pin(fut) } + "/store.NtxBuilder/GetVaultAssetWitnesses" => { + #[allow(non_camel_case_types)] + struct GetVaultAssetWitnessesSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetVaultAssetWitnessesSvc { + type Response = super::VaultAssetWitnessesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_vault_asset_witnesses( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetVaultAssetWitnessesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetStorageMapWitness" => { + #[allow(non_camel_case_types)] + struct GetStorageMapWitnessSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetStorageMapWitnessSvc { + type Response = super::StorageMapWitnessResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_storage_map_witness(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetStorageMapWitnessSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index a842c449d4..df1f0fa653 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -31,7 +31,7 @@ use tonic::Status; use crate::db::manager::ConnectionManagerError; use crate::db::models::conv::DatabaseTypeConversionError; -use crate::inner_forest::InnerForestError; +use crate::inner_forest::{InnerForestError, WitnessError}; // DATABASE ERRORS // ================================================================================================= @@ -534,6 +534,16 @@ pub enum SyncTransactionsError { DeserializationFailed(#[from] ConversionError), #[error("account {0} not found")] AccountNotFound(AccountId), + #[error("failed to retrieve witness")] + WitnessError(#[from] WitnessError), +} + +#[derive(Debug, Error, GrpcError)] +pub enum GetWitnessesError { + #[error("malformed request")] + DeserializationFailed(#[from] ConversionError), + #[error("failed to retrieve witness")] + WitnessError(#[from] WitnessError), } // SCHEMA VERIFICATION ERRORS diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index a5d47ac515..d01f5da8ca 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,12 +1,19 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; -use miden_protocol::account::{AccountId, NonFungibleDeltaAction, StorageSlotName}; -use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::account::{ + AccountId, + NonFungibleDeltaAction, + StorageMap, + StorageMapWitness, + StorageSlotName, +}; +use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; +use miden_protocol::errors::{AssetError, StorageMapError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; @@ -30,6 +37,18 @@ pub enum InnerForestError { }, } +#[derive(Debug, Error)] +pub enum WitnessError { + #[error("root not found")] + RootNotFound, + #[error("merkle error")] + MerkleError(#[from] MerkleError), + #[error("storage map error")] + StorageMapError(#[from] StorageMapError), + #[error("failed to construct asset")] + AssetError(#[from] AssetError), +} + // INNER FOREST // ================================================================================================ @@ -123,24 +142,78 @@ impl InnerForest { .map_or_else(Self::empty_smt_root, |(_, root)| *root) } - /// Retrieves the vault SMT root for an account at or before the given block. + /// Retrieves a vault root for the specified account block number. + /// + /// Finds the most recent vault root before the specified block number for the account. + pub(crate) fn get_vault_root( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Option { + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + } + /// Retrieves the storage map SMT root for an account slot at or before the given block. /// - /// Finds the most recent storage root entry for the slot, since storage state persists - /// across blocks where no changes occur. - pub(crate) fn get_storage_root( + /// Finds the most recent storage map root at or before the specified block number. + pub(crate) fn get_storage_map_root( &self, account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, - ) -> Word { + ) -> Option { self.storage_map_roots .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) ..=(account_id, slot_name.clone(), block_num), ) .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) + .map(|(_, root)| *root) + } + + /// Retrieves a storage map witness for the specified account and storage slot. + /// + /// Finds the most recent witness at or before the specified block number. + /// + /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to + /// get the actual key into the storage map. + pub(crate) fn get_storage_map_witness( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + raw_key: Word, + ) -> Result { + let key = StorageMap::hash_key(raw_key); + let root = self + .get_storage_map_root(account_id, slot_name, block_num) + .ok_or(WitnessError::RootNotFound)?; + let proof = self.forest.open(root, key)?; + + Ok(StorageMapWitness::new(proof, vec![raw_key])?) + } + + /// Retrieves a vault asset witnesses for the specified account and asset keys at the specified + /// block number. + pub fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + block_num: BlockNumber, + asset_keys: BTreeSet, + ) -> Result, WitnessError> { + let root = self.get_vault_root(account_id, block_num).ok_or(WitnessError::RootNotFound)?; + let witnessees = asset_keys + .into_iter() + .map(|key| { + let proof = self.forest.open(root, key.into())?; + let asset = AssetWitness::new(proof)?; + Ok(asset) + }) + .collect::, WitnessError>>()?; + Ok(witnessees) } /// Opens a storage map and returns storage map details with SMT proofs for the given keys. @@ -157,12 +230,7 @@ impl InnerForest { block_num: BlockNumber, keys: &[Word], ) -> Option> { - let root = self.get_storage_root(account_id, &slot_name, block_num); - - // Empty root means no storage map exists for this account/slot - if root == Self::empty_smt_root() { - return None; - } + let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; if keys.len() > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { return Some(Ok(AccountStorageMapDetails { diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index f08e327284..1d45833e5f 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -1,10 +1,14 @@ +use std::collections::BTreeSet; use std::num::{NonZero, TryFromIntError}; +use miden_crypto::merkle::smt::SmtProof; use miden_node_proto::domain::account::{AccountInfo, validate_network_account_prefix}; +use miden_node_proto::generated as proto; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::store::ntx_builder_server; -use miden_node_proto::generated::{self as proto}; use miden_node_utils::ErrorReport; +use miden_protocol::account::StorageSlotName; +use miden_protocol::asset::AssetVaultKey; use miden_protocol::block::BlockNumber; use miden_protocol::note::Note; use tonic::{Request, Response, Status}; @@ -12,8 +16,15 @@ use tracing::{debug, instrument}; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError}; -use crate::server::api::{StoreApi, internal_error, invalid_argument, read_block_range, read_root}; +use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError, GetWitnessesError}; +use crate::server::api::{ + StoreApi, + internal_error, + invalid_argument, + read_account_id, + read_block_range, + read_root, +}; // NTX BUILDER ENDPOINTS // ================================================================================================ @@ -224,4 +235,116 @@ impl ntx_builder_server::NtxBuilder for StoreApi { script: note_script.map(Into::into), })) } + + #[instrument( + parent = None, + target = COMPONENT, + name = "store.ntx_builder_server.get_vault_asset_witnesses", + skip_all, + ret(level = "debug"), + err + )] + async fn get_vault_asset_witnesses( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + // Read account ID. + let account_id = + read_account_id::(request.account_id).map_err(invalid_argument)?; + + // Read vault keys. + let vault_keys = request + .vault_keys + .into_iter() + .map(|key_digest| { + let word = read_root::(Some(key_digest), "VaultKey") + .map_err(invalid_argument)?; + Ok(AssetVaultKey::new_unchecked(word)) + }) + .collect::, Status>>()?; + + // Read block number from request, use latest if not provided. + let block_num = if let Some(num) = request.block_num { + num.into() + } else { + self.state.latest_block_num().await + }; + + // Retrieve the asset witnesses. + let asset_witnesses = self + .state + .get_vault_asset_witnesses(account_id, block_num, vault_keys) + .await + .map_err(internal_error)?; + + // Convert AssetWitness to protobuf format by extracting witness data. + let proto_witnesses = asset_witnesses + .into_iter() + .map(|witness| { + let proof: SmtProof = witness.into(); + proto::store::vault_asset_witnesses_response::VaultAssetWitness { + proof: Some(proof.into()), + } + }) + .collect(); + + Ok(Response::new(proto::store::VaultAssetWitnessesResponse { + block_num: block_num.as_u32(), + asset_witnesses: proto_witnesses, + })) + } + + #[instrument( + parent = None, + target = COMPONENT, + name = "store.ntx_builder_server.get_storage_map_witness", + skip_all, + ret(level = "debug"), + err + )] + async fn get_storage_map_witness( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + // Read the account ID. + let account_id = + read_account_id::(request.account_id).map_err(invalid_argument)?; + + // Read the map key. + let map_key = + read_root::(request.map_key, "MapKey").map_err(invalid_argument)?; + + // Read the slot name. + let slot_name = StorageSlotName::new(request.slot_name).map_err(|err| { + tonic::Status::invalid_argument(format!("Invalid storage slot name: {err}")) + })?; + + // Read the block number, use latest if not provided. + let block_num = if let Some(num) = request.block_num { + num.into() + } else { + self.state.latest_block_num().await + }; + + // Retrieve the storage map witness. + let storage_witness = self + .state + .get_storage_map_witness(account_id, &slot_name, block_num, map_key) + .await + .map_err(internal_error)?; + + // Convert StorageMapWitness to protobuf format by extracting witness data. + let proof: SmtProof = storage_witness.into(); + Ok(Response::new(proto::store::StorageMapWitnessResponse { + witness: Some(proto::store::storage_map_witness_response::StorageWitness { + key: Some(map_key.into()), + proof: Some(proof.into()), + }), + block_num: self.state.latest_block_num().await.as_u32(), + })) + } } diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index d33a9aaf41..3c422121ba 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -24,8 +24,9 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_protocol::Word; -use miden_protocol::account::AccountId; use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::{AccountTree, AccountWitness}; use miden_protocol::block::nullifier_tree::NullifierWitness; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; @@ -61,7 +62,7 @@ use crate::errors::{ StateInitializationError, StateSyncError, }; -use crate::inner_forest::InnerForest; +use crate::inner_forest::{InnerForest, WitnessError}; use crate::{COMPONENT, DataDirectory}; mod loader; @@ -1232,4 +1233,39 @@ impl State { ) -> Result<(BlockNumber, Vec), DatabaseError> { self.db.select_transactions_records(account_ids, block_range).await } + + /// Returns vault asset witnesses for the specified account and block number. + pub async fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + block_num: BlockNumber, + vault_keys: BTreeSet, + ) -> Result, WitnessError> { + let witnesses = self + .forest + .read() + .await + .get_vault_asset_witnesses(account_id, block_num, vault_keys)?; + Ok(witnesses) + } + + /// Returns a storage map witness for the specified account and storage entry at the block + /// number. + /// + /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to + /// get the actual key into the storage map. + pub async fn get_storage_map_witness( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + raw_key: Word, + ) -> Result { + let witness = self + .forest + .read() + .await + .get_storage_map_witness(account_id, slot_name, block_num, raw_key)?; + Ok(witness) + } } diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 86f4aeff0d..900c7853e4 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -269,6 +269,12 @@ service NtxBuilder { // Returns the script for a note by its root. rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + + // Returns vault asset witnesses for the specified account. + rpc GetVaultAssetWitnesses(VaultAssetWitnessesRequest) returns (VaultAssetWitnessesResponse) {} + + // Returns a storage map witness for the specified account and storage map entry. + rpc GetStorageMapWitness(StorageMapWitnessRequest) returns (StorageMapWitnessResponse) {} } // GET NETWORK ACCOUNT DETAILS BY PREFIX @@ -347,3 +353,83 @@ message CurrentBlockchainData { // Current block header. optional blockchain.BlockHeader current_block_header = 2; } + +// GET VAULT ASSET WITNESSES +// ================================================================================================ + +// Request for vault asset witnesses for a specific account. +message VaultAssetWitnessesRequest { + // The account ID for which to retrieve vault asset witnesses. + account.AccountId account_id = 1; + + // Set of asset vault keys to retrieve witnesses for. + repeated primitives.Digest vault_keys = 2; + + // The witnesses returned correspond to the account state at the specified block number. + // + // Optional block number. If not provided, uses the latest state. + // + // The specified block number should be relatively near the chain tip else an error will be + // returned. + optional fixed32 block_num = 3; +} + +// Response containing vault asset witnesses. +message VaultAssetWitnessesResponse { + // A vault asset witness containing the asset and its proof. + message VaultAssetWitness { + // The SMT opening proof for the asset's inclusion in the vault. + primitives.SmtOpening proof = 1; + } + + // Block number at which the witnesses were generated. + // + // The witnesses returned corresponds to the account state at the specified block number. + fixed32 block_num = 1; + + // List of asset witnesses. + repeated VaultAssetWitness asset_witnesses = 2; +} + +// GET STORAGE MAP WITNESS +// ================================================================================================ + +// Request for a storage map witness for a specific account and storage entry. +message StorageMapWitnessRequest { + // The account ID for which to retrieve the storage map witness. + account.AccountId account_id = 1; + + // The raw, user-provided storage map key for which to retrieve the witness. + primitives.Digest map_key = 2; + + // Optional block number. If not provided, uses the latest state. + // + // The witness returned corresponds to the account state at the specified block number. + // + // Optional block number. If not provided, uses the latest state. + // + // The specified block number should be relatively near the chain tip else an error will be + // returned. + optional fixed32 block_num = 3; + + // The storage slot name for the map. + string slot_name = 4; +} + +// Response containing a storage map witness. +message StorageMapWitnessResponse { + // Storage map witness data. + message StorageWitness { + // The raw, user-provided storage map key. + primitives.Digest key = 1; + + // The SMT opening proof for the key-value pair. + primitives.SmtOpening proof = 3; + } + + // The storage map witness. + StorageWitness witness = 1; + + // Block number at which the witness was generated. + fixed32 block_num = 2; +} From 52eb151eaf06b6051eda2d07263b6d16a94107ab Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Tue, 20 Jan 2026 17:20:46 -0800 Subject: [PATCH 108/125] chore: minor comment fixes --- crates/proto/src/generated/store.rs | 2 +- crates/store/src/inner_forest/mod.rs | 8 ++------ proto/proto/internal/store.proto | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index dfd0a456a7..04993a872a 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -265,7 +265,7 @@ pub mod vault_asset_witnesses_response { pub proof: ::core::option::Option, } } -/// Request for a storage map witness for a specific account and storage entry. +/// Request for a storage map witness for a specific account and storage slot. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct StorageMapWitnessRequest { /// The account ID for which to retrieve the storage map witness. diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index d01f5da8ca..6693ba85cb 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -142,9 +142,7 @@ impl InnerForest { .map_or_else(Self::empty_smt_root, |(_, root)| *root) } - /// Retrieves a vault root for the specified account block number. - /// - /// Finds the most recent vault root before the specified block number for the account. + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, account_id: AccountId, @@ -156,9 +154,7 @@ impl InnerForest { .map(|(_, root)| *root) } - /// Retrieves the storage map SMT root for an account slot at or before the given block. - /// - /// Finds the most recent storage map root at or before the specified block number. + /// Retrieves the storage map root for an account slot at or before the specified block. pub(crate) fn get_storage_map_root( &self, account_id: AccountId, diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 900c7853e4..b059abfda8 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -394,7 +394,7 @@ message VaultAssetWitnessesResponse { // GET STORAGE MAP WITNESS // ================================================================================================ -// Request for a storage map witness for a specific account and storage entry. +// Request for a storage map witness for a specific account and storage slot. message StorageMapWitnessRequest { // The account ID for which to retrieve the storage map witness. account.AccountId account_id = 1; From 8e32261d9373d1b7822bc988c273711936e7195a Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Wed, 21 Jan 2026 14:24:21 +1300 Subject: [PATCH 109/125] fix: Hash raw key in open_storage_map (#1562) --- crates/proto/src/generated/store.rs | 2 -- crates/store/src/inner_forest/mod.rs | 9 ++++++--- proto/proto/internal/store.proto | 2 -- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 04993a872a..2fc8168fb8 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -278,8 +278,6 @@ pub struct StorageMapWitnessRequest { /// /// The witness returned corresponds to the account state at the specified block number. /// - /// Optional block number. If not provided, uses the latest state. - /// /// The specified block number should be relatively near the chain tip else an error will be /// returned. #[prost(fixed32, optional, tag = "3")] diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 6693ba85cb..403a3334e9 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -224,11 +224,11 @@ impl InnerForest { account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, - keys: &[Word], + raw_keys: &[Word], ) -> Option> { let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; - if keys.len() > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { + if raw_keys.len() > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { return Some(Ok(AccountStorageMapDetails { slot_name, entries: StorageMapEntries::LimitExceeded, @@ -236,7 +236,10 @@ impl InnerForest { } // Collect SMT proofs for each key - let proofs = Result::from_iter(keys.iter().map(|key| self.forest.open(root, *key))); + let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { + let key = StorageMap::hash_key(*raw_key); + self.forest.open(root, key) + })); Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index b059abfda8..4ff8ac05dc 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -406,8 +406,6 @@ message StorageMapWitnessRequest { // // The witness returned corresponds to the account state at the specified block number. // - // Optional block number. If not provided, uses the latest state. - // // The specified block number should be relatively near the chain tip else an error will be // returned. optional fixed32 block_num = 3; From 2d7c12b7409669e1568732ff9f8a94c3a34d3947 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 21 Jan 2026 09:41:10 +0200 Subject: [PATCH 110/125] fix(store): connect gRPC traces to handlers (#1553) gRPC trace layer was not connected to method handlers as the latter had its own root span. Fixed by removing this root span. --- CHANGELOG.md | 1 + crates/block-producer/src/server/mod.rs | 6 -- crates/rpc/src/server/api.rs | 116 +-------------------- crates/store/src/server/block_producer.rs | 38 +------ crates/store/src/server/ntx_builder.rs | 65 +----------- crates/store/src/server/rpc_api.rs | 117 +--------------------- 6 files changed, 5 insertions(+), 338 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66f6eb657f..2d4b14607e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ - Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). - Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). - Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/miden-node/pull/1501)). +- gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/miden-node/pull/1553)). ## v0.12.8 (2026-01-15) diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 68fe37b3f2..d8d6bdd972 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -405,12 +405,6 @@ impl api_server::Api for BlockProducerRpcServer { .map_err(Into::into) } - #[instrument( - target = COMPONENT, - name = "block_producer.server.status", - skip_all, - err - )] async fn status( &self, _request: tonic::Request<()>, diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index fd3ee97c65..1fc0c266b8 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -25,7 +25,7 @@ use miden_protocol::utils::serde::{Deserializable, Serializable}; use miden_protocol::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; use tonic::{IntoRequest, Request, Response, Status}; -use tracing::{debug, info, instrument}; +use tracing::{debug, info}; use url::Url; use crate::COMPONENT; @@ -154,14 +154,6 @@ impl RpcService { #[tonic::async_trait] impl api_server::Api for RpcService { - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.check_nullifiers", - skip_all, - ret(level = "debug"), - err - )] async fn check_nullifiers( &self, request: Request, @@ -180,14 +172,6 @@ impl api_server::Api for RpcService { self.store.clone().check_nullifiers(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_nullifiers", - skip_all, - ret(level = "debug"), - err - )] async fn sync_nullifiers( &self, request: Request, @@ -199,14 +183,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_nullifiers(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_block_header_by_number", - skip_all, - ret(level = "debug"), - err - )] async fn get_block_header_by_number( &self, request: Request, @@ -216,14 +192,6 @@ impl api_server::Api for RpcService { self.store.clone().get_block_header_by_number(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_state", - skip_all, - ret(level = "debug"), - err - )] async fn sync_state( &self, request: Request, @@ -236,14 +204,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_state(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_storage_maps", - skip_all, - ret(level = "debug"), - err - )] async fn sync_storage_maps( &self, request: Request, @@ -253,14 +213,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_storage_maps(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_notes", - skip_all, - ret(level = "debug"), - err - )] async fn sync_notes( &self, request: Request, @@ -272,14 +224,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_notes(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_notes_by_id", - skip_all, - ret(level = "debug"), - err - )] async fn get_notes_by_id( &self, request: Request, @@ -301,14 +245,6 @@ impl api_server::Api for RpcService { self.store.clone().get_notes_by_id(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_account_vault", - skip_all, - ret(level = "debug"), - err - )] async fn sync_account_vault( &self, request: tonic::Request, @@ -319,7 +255,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_account_vault(request).await } - #[instrument(parent = None, target = COMPONENT, name = "rpc.server.submit_proven_transaction", skip_all, err)] async fn submit_proven_transaction( &self, request: Request, @@ -398,7 +333,6 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_transaction(request).await } - #[instrument(parent = None, target = COMPONENT, name = "rpc.server.submit_proven_batch", skip_all, err)] async fn submit_proven_batch( &self, request: tonic::Request, @@ -457,14 +391,6 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_batch(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_block_by_number", - skip_all, - ret(level = "debug"), - err - )] async fn get_block_by_number( &self, request: Request, @@ -476,14 +402,6 @@ impl api_server::Api for RpcService { self.store.clone().get_block_by_number(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_account", - skip_all, - ret(level = "debug"), - err - )] async fn get_account( &self, request: Request, @@ -514,14 +432,6 @@ impl api_server::Api for RpcService { self.store.clone().get_account(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.status", - skip_all, - ret(level = "debug"), - err - )] async fn status( &self, request: Request<()>, @@ -558,14 +468,6 @@ impl api_server::Api for RpcService { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err - )] async fn get_note_script_by_root( &self, request: Request, @@ -575,14 +477,6 @@ impl api_server::Api for RpcService { self.store.clone().get_note_script_by_root(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_transactions", - skip_all, - ret(level = "debug"), - err - )] async fn sync_transactions( &self, request: Request, @@ -592,14 +486,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_transactions(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_limits", - skip_all, - ret(level = "debug"), - err - )] async fn get_limits( &self, request: Request<()>, diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 03073c762b..9dd2b39c4d 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -9,9 +9,8 @@ use miden_protocol::Word; use miden_protocol::block::{BlockNumber, ProvenBlock}; use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; -use tracing::{Instrument, instrument}; +use tracing::Instrument; -use crate::COMPONENT; use crate::errors::ApplyBlockError; use crate::server::api::{ StoreApi, @@ -31,13 +30,6 @@ impl block_producer_server::BlockProducer for StoreApi { /// Returns block header for the specified block number. /// /// If the block number is not provided, block header for the latest block is returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_block_header_by_number", - skip_all, - err - )] async fn get_block_header_by_number( &self, request: Request, @@ -46,13 +38,6 @@ impl block_producer_server::BlockProducer for StoreApi { } /// Updates the local DB by inserting a new block header and the related data. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.apply_block", - skip_all, - err - )] async fn apply_block( &self, request: Request, @@ -106,13 +91,6 @@ impl block_producer_server::BlockProducer for StoreApi { } /// Returns data needed by the block producer to construct and prove the next block. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_block_inputs", - skip_all, - err - )] async fn get_block_inputs( &self, request: Request, @@ -145,13 +123,6 @@ impl block_producer_server::BlockProducer for StoreApi { /// Fetches the inputs for a transaction batch from the database. /// /// See [`State::get_batch_inputs`] for details. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_batch_inputs", - skip_all, - err - )] async fn get_batch_inputs( &self, request: Request, @@ -177,13 +148,6 @@ impl block_producer_server::BlockProducer for StoreApi { .map_err(|err| tonic::Status::internal(err.as_report())) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_transaction_inputs", - skip_all, - err - )] async fn get_transaction_inputs( &self, request: Request, diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 1d45833e5f..800eeedf6c 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -12,7 +12,7 @@ use miden_protocol::asset::AssetVaultKey; use miden_protocol::block::BlockNumber; use miden_protocol::note::Note; use tonic::{Request, Response, Status}; -use tracing::{debug, instrument}; +use tracing::debug; use crate::COMPONENT; use crate::db::models::Page; @@ -34,14 +34,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { /// Returns block header for the specified block number. /// /// If the block number is not provided, block header for the latest block is returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_block_header_by_number", - skip_all, - ret(level = "debug"), - err - )] async fn get_block_header_by_number( &self, request: Request, @@ -54,14 +46,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { /// /// This returns all the blockchain-related information needed for executing transactions /// without authenticating notes. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_current_blockchain_data", - skip_all, - ret(level = "debug"), - err - )] async fn get_current_blockchain_data( &self, request: Request, @@ -87,14 +71,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { Ok(Response::new(response)) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_network_account_details_by_prefix", - skip_all, - ret(level = "debug"), - err - )] async fn get_network_account_details_by_prefix( &self, request: Request, @@ -115,13 +91,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_unconsumed_network_notes", - skip_all, - err - )] async fn get_unconsumed_network_notes( &self, request: Request, @@ -170,14 +139,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { /// truncated at a block boundary to ensure all accounts from included blocks are returned. /// /// The response includes pagination info with the last block number that was fully included. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_network_account_ids", - skip_all, - ret(level = "debug"), - err - )] async fn get_network_account_ids( &self, request: Request, @@ -209,14 +170,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err - )] async fn get_note_script_by_root( &self, request: Request, @@ -236,14 +189,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_vault_asset_witnesses", - skip_all, - ret(level = "debug"), - err - )] async fn get_vault_asset_witnesses( &self, request: Request, @@ -296,14 +241,6 @@ impl ntx_builder_server::NtxBuilder for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_storage_map_witness", - skip_all, - ret(level = "debug"), - err - )] async fn get_storage_map_witness( &self, request: Request, diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index a354a9ed9d..845855aa7d 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -12,7 +12,7 @@ use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::note::NoteId; use tonic::{Request, Response, Status}; -use tracing::{debug, info, instrument}; +use tracing::{debug, info}; use crate::COMPONENT; use crate::errors::{ @@ -45,15 +45,6 @@ impl rpc_server::Rpc for StoreApi { /// Returns block header for the specified block number. /// /// If the block number is not provided, block header for the latest block is returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_block_header_by_number", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_block_header_by_number( &self, request: Request, @@ -65,15 +56,6 @@ impl rpc_server::Rpc for StoreApi { /// /// This endpoint also returns Merkle authentication path for each requested nullifier which can /// be verified against the latest root of the nullifier database. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.check_nullifiers", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn check_nullifiers( &self, request: Request, @@ -97,15 +79,6 @@ impl rpc_server::Rpc for StoreApi { /// Returns nullifiers that match the specified prefixes and have been consumed. /// /// Currently the only supported prefix length is 16 bits. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_nullifiers", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_nullifiers( &self, request: Request, @@ -146,15 +119,6 @@ impl rpc_server::Rpc for StoreApi { /// Returns info which can be used by the client to sync up to the latest state of the chain /// for the objects the client is interested in. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_state", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_state( &self, request: Request, @@ -202,15 +166,6 @@ impl rpc_server::Rpc for StoreApi { } /// Returns info which can be used by the client to sync note state. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_notes", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_notes( &self, request: Request, @@ -245,15 +200,6 @@ impl rpc_server::Rpc for StoreApi { /// /// If the list is empty or no [`Note`] matched the requested [`NoteId`] and empty list is /// returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_notes_by_id", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_notes_by_id( &self, request: Request, @@ -281,15 +227,6 @@ impl rpc_server::Rpc for StoreApi { Ok(Response::new(proto::note::CommittedNoteList { notes })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_block_by_number", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_block_by_number( &self, request: Request, @@ -307,15 +244,6 @@ impl rpc_server::Rpc for StoreApi { Ok(Response::new(proto::blockchain::MaybeBlock { block })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_account", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_account( &self, request: Request, @@ -329,15 +257,6 @@ impl rpc_server::Rpc for StoreApi { Ok(Response::new(account_data.into())) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_account_vault", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_account_vault( &self, request: Request, @@ -387,15 +306,6 @@ impl rpc_server::Rpc for StoreApi { /// Returns storage map updates for the specified account within a block range. /// /// Supports cursor-based pagination for large storage maps. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_storage_maps", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_storage_maps( &self, request: Request, @@ -441,15 +351,6 @@ impl rpc_server::Rpc for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.status", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn status( &self, _request: Request<()>, @@ -461,14 +362,6 @@ impl rpc_server::Rpc for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err - )] async fn get_note_script_by_root( &self, request: Request, @@ -488,14 +381,6 @@ impl rpc_server::Rpc for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_transactions", - skip_all, - ret(level = "debug"), - err - )] async fn sync_transactions( &self, request: Request, From 9b70174e10be85b0925c433b8c726b479c4732c9 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Wed, 21 Jan 2026 12:46:03 -0300 Subject: [PATCH 111/125] chore: load network accounts asynchronously in NTX Builder (#1495) * chore: decouple ntx builder * review: send one account at a time * review: increase channel capacity, remove pagination reference * review: return NetworkAccountPrefix * review: improve tracing * review: add exponential backoff * review: remove max iterations * review: reduce channel capacity Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> * review: abort on failure * fix: exponential backoff overflow * review: fix instrumentation * review: add warn! when receiver drops * review: flatten double result Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> * review: remove Option * review: remove fields from trace Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> * review: remove parameters from submit_page * review: log full errors * review: rename submit and fetch page functions * review: use _inner approach for errors --------- Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- CHANGELOG.md | 1 + bin/node/src/commands/block_producer.rs | 3 - bin/node/src/commands/bundled.rs | 13 -- crates/block-producer/src/server/mod.rs | 15 +-- crates/ntx-builder/src/block_producer.rs | 4 +- crates/ntx-builder/src/builder.rs | 80 ++++++----- crates/ntx-builder/src/store.rs | 161 +++++++++++++++++------ crates/rpc/src/server/api.rs | 4 +- 8 files changed, 173 insertions(+), 108 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d4b14607e..d82a7c4dbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). - Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). +- Decoupled ntx-builder from block-producer startup by loading network accounts asynchronously via a background task ([#????](https://github.com/0xMiden/miden-node/pull/????)). - Add foreign account support to validator ([#1493](https://github.com/0xMiden/miden-node/pull/1493)). - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). - Limit number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index d50182d872..5cfbc78fcc 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; use miden_node_utils::grpc::UrlExt; -use tokio::sync::Barrier; use url::Url; use super::{ENV_BLOCK_PRODUCER_URL, ENV_STORE_BLOCK_PRODUCER_URL}; @@ -93,7 +91,6 @@ impl BlockProducerCommand { block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, max_batches_per_block: block_producer.max_batches_per_block, - production_checkpoint: Arc::new(Barrier::new(1)), grpc_timeout, mempool_tx_capacity: block_producer.mempool_tx_capacity, } diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 1a864a3817..28fba84e94 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; use std::path::PathBuf; -use std::sync::Arc; use std::time::Duration; use anyhow::Context; @@ -14,7 +13,6 @@ use miden_protocol::block::BlockSigner; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; -use tokio::sync::Barrier; use tokio::task::JoinSet; use url::Url; @@ -222,19 +220,11 @@ impl BundledCommand { }) .id(); - // A sync point between the ntx-builder and block-producer components. let should_start_ntx_builder = !ntx_builder.disabled; - let checkpoint = if should_start_ntx_builder { - Barrier::new(2) - } else { - Barrier::new(1) - }; - let checkpoint = Arc::new(checkpoint); // Start block-producer. The block-producer's endpoint is available after loading completes. let block_producer_id = join_set .spawn({ - let checkpoint = Arc::clone(&checkpoint); let store_url = Url::parse(&format!("http://{store_block_producer_address}")) .context("Failed to parse URL")?; let validator_url = Url::parse(&format!("http://{validator_address}")) @@ -250,7 +240,6 @@ impl BundledCommand { block_interval: block_producer.block_interval, max_batches_per_block: block_producer.max_batches_per_block, max_txs_per_batch: block_producer.max_txs_per_batch, - production_checkpoint: checkpoint, grpc_timeout, mempool_tx_capacity: block_producer.mempool_tx_capacity, } @@ -323,8 +312,6 @@ impl BundledCommand { block_producer_url, validator_url, ntx_builder.tx_prover_url, - ntx_builder.ticker_interval, - checkpoint, ntx_builder.script_cache_size, ) .run() diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index d8d6bdd972..8245c1ee6b 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -18,7 +18,7 @@ use miden_protocol::block::BlockNumber; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::serde::Deserializable; use tokio::net::TcpListener; -use tokio::sync::{Barrier, Mutex, RwLock}; +use tokio::sync::{Mutex, RwLock}; use tokio_stream::wrappers::{ReceiverStream, TcpListenerStream}; use tonic::Status; use tower_http::trace::TraceLayer; @@ -65,11 +65,6 @@ pub struct BlockProducer { pub max_txs_per_batch: usize, /// The maximum number of batches per block. pub max_batches_per_block: usize, - /// Block production only begins after this checkpoint barrier completes. - /// - /// The block-producers gRPC endpoint will be available before this point, so this lets the - /// mempool synchronize its event stream without risking a race condition. - pub production_checkpoint: Arc, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. @@ -155,12 +150,7 @@ impl BlockProducer { // any complete or fail, we can shutdown the rest (somewhat) gracefully. let mut tasks = tokio::task::JoinSet::new(); - // Launch the gRPC server and wait at the checkpoint for any other components to be in sync. - // - // This is used to ensure the ntx-builder can subscribe to the mempool events without - // playing catch up caused by block-production. - // - // This is a temporary work-around until the ntx-builder can resync on the fly. + // Launch the gRPC server. let rpc_id = tasks .spawn({ let mempool = mempool.clone(); @@ -171,7 +161,6 @@ impl BlockProducer { } }) .id(); - self.production_checkpoint.wait().await; let batch_builder_id = tasks .spawn({ diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index 87d3da7e6f..ce4d7b9c6a 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -67,9 +67,9 @@ impl BlockProducerClient { loop { match self.subscribe_to_mempool(chain_tip).await { Err(err) if err.code() == tonic::Code::Unavailable => { - // exponential backoff with base 500ms and max 30s + // Exponential backoff with base 500ms and max 30s. let backoff = Duration::from_millis(500) - .saturating_mul(1 << retry_counter) + .saturating_mul(1 << retry_counter.min(6)) .min(Duration::from_secs(30)); tracing::warn!( diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 84c711385f..c564a5012d 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -1,6 +1,5 @@ use std::num::NonZeroUsize; use std::sync::Arc; -use std::time::Duration; use anyhow::Context; use futures::TryStreamExt; @@ -13,8 +12,7 @@ use miden_protocol::block::BlockHeader; use miden_protocol::crypto::merkle::mmr::PartialMmr; use miden_protocol::note::NoteScript; use miden_protocol::transaction::PartialBlockchain; -use tokio::sync::{Barrier, RwLock}; -use tokio::time; +use tokio::sync::{RwLock, mpsc}; use url::Url; use crate::MAX_IN_PROGRESS_TXS; @@ -78,13 +76,6 @@ pub struct NetworkTransactionBuilder { /// Address of the remote prover. If `None`, transactions will be proven locally, which is /// undesirable due to the performance impact. tx_prover_url: Option, - /// Interval for checking pending notes and executing network transactions. - ticker_interval: Duration, - /// A checkpoint used to sync start-up process with the block-producer. - /// - /// This informs the block-producer when we have subscribed to mempool events and that it is - /// safe to begin block-production. - bp_checkpoint: Arc, /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. /// This cache is shared across all account actors. script_cache: LruCache, @@ -93,14 +84,15 @@ pub struct NetworkTransactionBuilder { } impl NetworkTransactionBuilder { + /// Channel capacity for account loading. + const ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; + /// Creates a new instance of the network transaction builder. pub fn new( store_url: Url, block_producer_url: Url, validator_url: Url, tx_prover_url: Option, - ticker_interval: Duration, - bp_checkpoint: Arc, script_cache_size: NonZeroUsize, ) -> Self { let script_cache = LruCache::new(script_cache_size); @@ -110,8 +102,6 @@ impl NetworkTransactionBuilder { block_producer_url, validator_url, tx_prover_url, - ticker_interval, - bp_checkpoint, script_cache, coordinator, } @@ -131,15 +121,6 @@ impl NetworkTransactionBuilder { .await .context("failed to subscribe to mempool events")?; - // Unlock the block-producer's block production. The block-producer is prevented from - // producing blocks until we have subscribed to mempool events. - // - // This is a temporary work-around until the ntx-builder can resync on the fly. - self.bp_checkpoint.wait().await; - - let mut interval = tokio::time::interval(self.ticker_interval); - interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - // Create chain state that will be updated by the coordinator and read by actors. let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); @@ -152,15 +133,17 @@ impl NetworkTransactionBuilder { script_cache: self.script_cache.clone(), }; - // Create initial set of actors based on all known network accounts. - let account_ids = store.get_network_account_ids().await?; - for account_id in account_ids { - if let Ok(account_id) = NetworkAccountId::try_from(account_id) { - self.coordinator - .spawn_actor(AccountOrigin::store(account_id), &actor_context) - .await?; - } - } + // Spawn a background task to load network accounts from the store. + // Accounts are sent through a channel in batches and processed in the main event loop. + let (account_tx, mut account_rx) = + mpsc::channel::(Self::ACCOUNT_CHANNEL_CAPACITY); + let account_loader_store = store.clone(); + let mut account_loader_handle = tokio::spawn(async move { + account_loader_store + .stream_network_account_ids(account_tx) + .await + .context("failed to load network accounts from store") + }); // Main loop which manages actors and passes mempool events to them. loop { @@ -181,10 +164,43 @@ impl NetworkTransactionBuilder { chain_state.clone(), ).await?; }, + // Handle account batches loaded from the store. + // Once all accounts are loaded, the channel closes and this branch + // becomes inactive (recv returns None and we stop matching). + Some(account_id) = account_rx.recv() => { + self.handle_loaded_account(account_id, &actor_context).await?; + }, + // Handle account loader task completion/failure. + // If the task fails, we abort since the builder would be in a degraded state + // where existing notes against network accounts won't be processed. + result = &mut account_loader_handle => { + result + .context("account loader task panicked") + .flatten()?; + + tracing::info!("account loading from store completed"); + account_loader_handle = tokio::spawn(std::future::pending()); + }, } } } + /// Handles a batch of account IDs loaded from the store by spawning actors for them. + #[tracing::instrument( + name = "ntx.builder.handle_loaded_accounts", + skip(self, account_id, actor_context) + )] + async fn handle_loaded_account( + &mut self, + account_id: NetworkAccountId, + actor_context: &AccountActorContext, + ) -> Result<(), anyhow::Error> { + self.coordinator + .spawn_actor(AccountOrigin::store(account_id), actor_context) + .await?; + Ok(()) + } + /// Handles mempool events by sending them to actors via the coordinator and/or spawning new /// actors as required. #[tracing::instrument( diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 784a27101e..1a7c7b309a 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -1,3 +1,4 @@ +use std::ops::RangeInclusive; use std::time::Duration; use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; @@ -7,6 +8,7 @@ use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; +use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{Account, AccountId}; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -57,7 +59,7 @@ impl StoreClient { Err(StoreError::GrpcClientError(err)) => { // Exponential backoff with base 500ms and max 30s. let backoff = Duration::from_millis(500) - .saturating_mul(1 << retry_counter) + .saturating_mul(1 << retry_counter.min(6)) .min(Duration::from_secs(30)); tracing::warn!( @@ -173,62 +175,137 @@ impl StoreClient { Ok(all_notes) } - /// Get all network account IDs. + /// Streams network account IDs to the provided sender. /// - /// Since the `GetNetworkAccountIds` method is paginated, we loop through all pages until we - /// reach the end. + /// This method is designed to be run in a background task, sending accounts to the main event + /// loop as they are loaded. This allows the ntx-builder to start processing mempool events + /// without waiting for all accounts to be preloaded. + pub async fn stream_network_account_ids( + &self, + sender: tokio::sync::mpsc::Sender, + ) -> Result<(), StoreError> { + let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + + while let Some(next_start) = self.load_accounts_page(block_range, &sender).await? { + block_range = next_start..=BlockNumber::from(u32::MAX); + } + + Ok(()) + } + + /// Loads a single page of network accounts and submits them to the sender. /// - /// Each page can return up to `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` - /// accounts (~289,000). With `100_000` iterations, which is assumed to be sufficient for the - /// foreseeable future. - #[instrument(target = COMPONENT, name = "store.client.get_network_account_ids", skip_all, err)] - pub async fn get_network_account_ids(&self) -> Result, StoreError> { - const MAX_ITERATIONS: u32 = 100_000; + /// Returns the next block number to fetch from, or `None` if the chain tip has been reached. + #[instrument(target = COMPONENT, name = "store.client.load_accounts_page", skip_all, err)] + async fn load_accounts_page( + &self, + block_range: RangeInclusive, + sender: &tokio::sync::mpsc::Sender, + ) -> Result, StoreError> { + let (accounts, pagination_info) = self.fetch_network_account_ids_page(block_range).await?; - let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + let chain_tip = pagination_info.chain_tip; + let current_height = pagination_info.block_num; - let mut ids = Vec::new(); - let mut iterations_count = 0; + self.send_accounts_to_channel(accounts, sender).await?; - loop { - let response = self + if current_height >= chain_tip { + Ok(None) + } else { + Ok(Some(BlockNumber::from(current_height))) + } + } + + #[instrument(target = COMPONENT, name = "store.client.fetch_network_account_ids_page", skip_all, err)] + async fn fetch_network_account_ids_page( + &self, + block_range: std::ops::RangeInclusive, + ) -> Result<(Vec, proto::rpc::PaginationInfo), StoreError> { + self.fetch_network_account_ids_page_inner(block_range) + .await + .inspect_err(|err| tracing::Span::current().set_error(err)) + } + + async fn fetch_network_account_ids_page_inner( + &self, + block_range: std::ops::RangeInclusive, + ) -> Result<(Vec, proto::rpc::PaginationInfo), StoreError> { + let mut retry_counter = 0u32; + + let response = loop { + match self .inner .clone() .get_network_account_ids(Into::::into(block_range.clone())) - .await? - .into_inner(); - - let accounts: Result, ConversionError> = response - .account_ids - .into_iter() - .map(|account_id| { - AccountId::read_from_bytes(&account_id.id) - .map_err(|err| ConversionError::deserialization_error("account_id", err)) - }) - .collect(); + .await + { + Ok(response) => break response.into_inner(), + Err(err) => { + // Exponential backoff with base 500ms and max 30s. + let backoff = Duration::from_millis(500) + .saturating_mul(1 << retry_counter.min(6)) + .min(Duration::from_secs(30)); + + tracing::warn!( + ?backoff, + %retry_counter, + %err, + "store connection failed while fetching committed accounts page, retrying" + ); - let pagination_info = response.pagination_info.ok_or( - ConversionError::MissingFieldInProtobufRepresentation { - entity: "NetworkAccountIdList", - field_name: "pagination_info", + retry_counter += 1; + tokio::time::sleep(backoff).await; }, - )?; + } + }; + + let accounts = response + .account_ids + .into_iter() + .map(|account_id| { + let account_id = AccountId::read_from_bytes(&account_id.id).map_err(|err| { + StoreError::DeserializationError(ConversionError::deserialization_error( + "account_id", + err, + )) + })?; + NetworkAccountId::try_from(account_id).map_err(|_| { + StoreError::MalformedResponse( + "account id is not a valid network account".into(), + ) + }) + }) + .collect::, StoreError>>()?; - ids.extend(accounts?); - iterations_count += 1; - block_range = - BlockNumber::from(pagination_info.block_num)..=BlockNumber::from(u32::MAX); + let pagination_info = response.pagination_info.ok_or( + ConversionError::MissingFieldInProtobufRepresentation { + entity: "NetworkAccountIdList", + field_name: "pagination_info", + }, + )?; - if pagination_info.block_num >= pagination_info.chain_tip { - break; - } + Ok((accounts, pagination_info)) + } - if iterations_count >= MAX_ITERATIONS { - return Err(StoreError::MaxIterationsReached("GetNetworkAccountIds".to_string())); + #[instrument( + target = COMPONENT, + name = "store.client.send_accounts_to_channel", + skip_all + )] + async fn send_accounts_to_channel( + &self, + accounts: Vec, + sender: &tokio::sync::mpsc::Sender, + ) -> Result<(), StoreError> { + for account in accounts { + // If the receiver is dropped, stop loading. + if sender.send(account).await.is_err() { + tracing::warn!("Account receiver dropped"); + return Ok(()); } } - Ok(ids) + Ok(()) } #[instrument(target = COMPONENT, name = "store.client.get_note_script_by_root", skip_all, err)] @@ -268,6 +345,4 @@ pub enum StoreError { MalformedResponse(String), #[error("failed to parse response")] DeserializationError(#[from] ConversionError), - #[error("max iterations reached: {0}")] - MaxIterationsReached(String), } diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 1fc0c266b8..45e4bf8950 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -131,9 +131,9 @@ impl RpcService { return Ok(header); }, Err(err) if err.code() == tonic::Code::Unavailable => { - // exponential backoff with base 500ms and max 30s + // Exponential backoff with base 500ms and max 30s. let backoff = Duration::from_millis(500) - .saturating_mul(1 << retry_counter) + .saturating_mul(1 << retry_counter.min(6)) .min(Duration::from_secs(30)); tracing::warn!( From 68de2c0461e02231344f2aa7c6977ed38be237c8 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Wed, 21 Jan 2026 14:20:47 -0800 Subject: [PATCH 112/125] chore: refresh Cargo.lock file --- Cargo.lock | 279 ++++++++++++++++++++++++++++------------------------- 1 file changed, 148 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d32c2ff646..76ceb34e7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -362,7 +351,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -496,25 +485,26 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.9" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +checksum = "87a0c0e6148f11f01f32650a2ea02d532b2ad4e81d8bd41e6e565b5adc5e6082" dependencies = [ "serde", + "serde_core", ] [[package]] name = "cargo_metadata" -version = "0.19.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9" dependencies = [ "camino", "cargo-platform", "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -525,9 +515,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.52" +version = "1.2.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" +checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" dependencies = [ "find-msvc-tools", "jobserver", @@ -604,9 +594,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", @@ -1409,9 +1399,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixedbitset" @@ -1711,9 +1701,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -2168,6 +2155,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -2219,9 +2215,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2513,9 +2509,9 @@ dependencies = [ [[package]] name = "miden-agglayer" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e78bbff1742ac44ddabcca7840c7ca85b1955c0d262df3bcd2db7a0b1341aa" +checksum = "7ccbc1ccbd5549688623ef84c5ac2aa1d110da32f04a75bdd9892d2cab45d244" dependencies = [ "fs-err", "miden-assembly", @@ -2536,7 +2532,7 @@ checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" dependencies = [ "miden-core", "miden-utils-indexing", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-air", "winter-prover", ] @@ -2553,7 +2549,7 @@ dependencies = [ "miden-core", "miden-mast-package", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2577,17 +2573,17 @@ dependencies = [ "rustc_version 0.4.1", "semver 1.0.27", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "miden-block-prover" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e7778b6851a6348795e168b9423a4d38bd6f89d3d0acf64a1626c12917b9bd" +checksum = "9c6f32b0f6f1e1fabd419d29f413c6eff3fd45fe04377051ea9aa17cdcb0df67" dependencies = [ "miden-protocol", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2607,7 +2603,7 @@ dependencies = [ "num-traits", "proptest", "proptest-derive", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-math", "winter-utils", ] @@ -2626,14 +2622,14 @@ dependencies = [ "miden-processor", "miden-utils-sync", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "miden-crypto" -version = "0.19.3" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b49de9b0d8370c992ee04791f68a4509078198b6f42e5f72a262e7d4456487" +checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" dependencies = [ "blake3", "cc", @@ -2657,7 +2653,7 @@ dependencies = [ "sha2", "sha3", "subtle", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-crypto", "winter-math", "winter-utils", @@ -2666,9 +2662,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.2" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83479e7af490784c6f2d2e02cec5210fd6e5bc6ce3d4427734e36a773bca72d2" +checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" dependencies = [ "quote", "syn 2.0.114", @@ -2689,7 +2685,7 @@ dependencies = [ "paste", "serde", "serde_spanned 1.0.4", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2710,7 +2706,7 @@ dependencies = [ "derive_more", "miden-assembly-syntax", "miden-core", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2739,7 +2735,7 @@ dependencies = [ "syn 2.0.114", "terminal_size 0.3.0", "textwrap", - "thiserror 2.0.17", + "thiserror 2.0.18", "trybuild", "unicode-width 0.1.14", ] @@ -2828,7 +2824,7 @@ dependencies = [ "rstest", "serial_test", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2862,7 +2858,7 @@ dependencies = [ "miden-standards", "miden-tx", "rstest", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -2888,7 +2884,7 @@ dependencies = [ "miette", "proptest", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tonic", "tonic-prost", "tonic-prost-build", @@ -2925,7 +2921,7 @@ dependencies = [ "rstest", "semver 1.0.27", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2965,7 +2961,7 @@ dependencies = [ "regex", "serde", "termtree", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "toml 0.9.11+spec-1.1.0", @@ -3023,7 +3019,7 @@ dependencies = [ "opentelemetry_sdk", "rand 0.9.2", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tower-http", @@ -3046,7 +3042,7 @@ dependencies = [ "miden-node-utils", "miden-protocol", "miden-tx", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -3069,7 +3065,7 @@ dependencies = [ "miden-utils-indexing", "paste", "rayon", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", "winter-prover", @@ -3077,9 +3073,9 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef3b133f424dbbca4ae1576258e695059f96746fc3d03aa2d0a9cc009648c0f" +checksum = "51d43c655cc029e17e3af4a1bed8e69f3eeac81462142e9b8a240a235dcb3b22" dependencies = [ "bech32", "fs-err", @@ -3100,7 +3096,7 @@ dependencies = [ "regex", "semver 1.0.27", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "toml 0.9.11+spec-1.1.0", "walkdir", "winter-rand-utils", @@ -3108,9 +3104,9 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be64f4f5206eae03c83b9825b694bf5cf1176802bf664010fbc32a3fc726822" +checksum = "a7b0c1ac8c94d679ce91036c98c6c9bf2d1791daa548f7c666376ebe3b98eaa0" dependencies = [ "proc-macro2", "quote", @@ -3163,7 +3159,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_qs", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -3188,7 +3184,7 @@ dependencies = [ "miden-tx", "miette", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tonic-prost", @@ -3199,9 +3195,9 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8250694a887e0bbb18275e4ce5bc1dd7a3cc72fe6a8e1e47a7b46bace3cb51fe" +checksum = "9522792de31fff9fb5b1e1fb9afe479f2595e8322b40ec2385b812077888f741" dependencies = [ "fs-err", "miden-assembly", @@ -3211,15 +3207,15 @@ dependencies = [ "miden-protocol", "rand 0.9.2", "regex", - "thiserror 2.0.17", + "thiserror 2.0.18", "walkdir", ] [[package]] name = "miden-testing" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5dc1d6cef01839ef521f3e59fbc19731efad64260fcbc877079fc8095a258" +checksum = "626400c8c6be3f8d8f55a83d0a23e742a8f3405976d0a6a1c836e844fa337693" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3239,23 +3235,23 @@ dependencies = [ [[package]] name = "miden-tx" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8136d02b3b83d8f9c2fe6bdc3a33f63951034c1b613351920056825a752f200" +checksum = "8e88de3a252ea6b7d903aa837371f73afc1b8259fe44bd0f3c57cae2e0086390" dependencies = [ "miden-processor", "miden-protocol", "miden-prover", "miden-standards", "miden-verifier", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "miden-tx-batch-prover" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94581bf94b0e7cd5efeaad9e590c49714c075d054f4b19ca60eb2e43ae893ad" +checksum = "94b59038568a52c4a3cc17b96af6a1f2f49ba09b3fb765674883056c412ca77b" dependencies = [ "miden-protocol", "miden-tx", @@ -3291,7 +3287,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" dependencies = [ - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3313,7 +3309,7 @@ checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" dependencies = [ "miden-air", "miden-core", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "winter-verifier", ] @@ -3326,7 +3322,7 @@ checksum = "9d4cfab04baffdda3fb9eafa5f873604059b89a1699aa95e4f1057397a69f0b5" dependencies = [ "miden-formatting", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3688,9 +3684,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-probe" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "openssl-sys" @@ -3714,7 +3710,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -3729,7 +3725,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", ] @@ -3759,7 +3755,7 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", ] @@ -3904,7 +3900,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3940,7 +3936,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -4030,7 +4026,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -4062,7 +4058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4307,9 +4303,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -4354,7 +4350,7 @@ dependencies = [ "memchr", "parking_lot", "protobuf 3.7.2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4491,7 +4487,7 @@ dependencies = [ "prost-reflect", "prost-types", "protox-parse", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4503,7 +4499,7 @@ dependencies = [ "logos", "miette", "prost-types", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4790,6 +4786,16 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rstest" version = "0.26.1" @@ -4821,9 +4827,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" dependencies = [ "arrayvec", "num-traits", @@ -4831,9 +4837,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" @@ -4906,7 +4912,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.0", + "openssl-probe 0.2.1", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4914,18 +4920,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -5138,7 +5144,7 @@ checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" dependencies = [ "percent-encoding", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5337,14 +5343,13 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e98301bf8b0540c7de45ecd760539b9c62f5772aed172f08efba597c11cd5d" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" dependencies = [ "cc", - "hashbrown 0.16.1", "js-sys", - "thiserror 2.0.17", + "rsqlite-vfs", "wasm-bindgen", ] @@ -5584,11 +5589,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -5604,9 +5609,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -5992,7 +5997,7 @@ dependencies = [ "httparse", "js-sys", "pin-project", - "thiserror 2.0.17", + "thiserror 2.0.18", "tonic", "tower-service", "wasm-bindgen", @@ -6093,7 +6098,7 @@ checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" dependencies = [ "chrono", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "tracing-subscriber", ] @@ -6320,9 +6325,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" -version = "9.0.6" +version = "9.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" +checksum = "b849a1f6d8639e8de261e81ee0fc881e3e3620db1af9f2e0da015d4382ceaf75" dependencies = [ "anyhow", "cargo_metadata", @@ -6330,7 +6335,7 @@ dependencies = [ "regex", "rustc_version 0.4.1", "rustversion", - "vergen-lib", + "vergen-lib 9.1.0", ] [[package]] @@ -6344,7 +6349,7 @@ dependencies = [ "rustversion", "time", "vergen", - "vergen-lib", + "vergen-lib 0.1.6", ] [[package]] @@ -6358,6 +6363,17 @@ dependencies = [ "rustversion", ] +[[package]] +name = "vergen-lib" +version = "9.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b34a29ba7e9c59e62f229ae1932fb1b8fb8a6fdcc99215a641913f5f5a59a569" +dependencies = [ + "anyhow", + "derive_builder", + "rustversion", +] + [[package]] name = "version_check" version = "0.9.5" @@ -6409,18 +6425,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -6431,11 +6447,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -6444,9 +6461,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6454,9 +6471,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", @@ -6467,9 +6484,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -6489,9 +6506,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -6963,9 +6980,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "writeable" @@ -7103,9 +7120,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" +checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" [[package]] name = "zstd" From 49cbe6a5498b728bc68463616a61598aa7ff2a7e Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Wed, 21 Jan 2026 16:43:37 -0800 Subject: [PATCH 113/125] chore: revert Cargo.lock refresh except for miden-crypto update --- Cargo.lock | 275 +++++++++++++++++++++++++---------------------------- 1 file changed, 129 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 76ceb34e7c..19a6ca2362 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -351,7 +362,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.10.5", "proc-macro2", "quote", "regex", @@ -485,26 +496,25 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.3.2" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87a0c0e6148f11f01f32650a2ea02d532b2ad4e81d8bd41e6e565b5adc5e6082" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", - "serde_core", ] [[package]] name = "cargo_metadata" -version = "0.23.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -515,9 +525,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.53" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "jobserver", @@ -594,9 +604,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "js-sys", @@ -1399,9 +1409,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.8" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "fixedbitset" @@ -1701,6 +1711,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -2155,15 +2168,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.14.0" @@ -2215,9 +2219,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -2509,9 +2513,9 @@ dependencies = [ [[package]] name = "miden-agglayer" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ccbc1ccbd5549688623ef84c5ac2aa1d110da32f04a75bdd9892d2cab45d244" +checksum = "64e78bbff1742ac44ddabcca7840c7ca85b1955c0d262df3bcd2db7a0b1341aa" dependencies = [ "fs-err", "miden-assembly", @@ -2532,7 +2536,7 @@ checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" dependencies = [ "miden-core", "miden-utils-indexing", - "thiserror 2.0.18", + "thiserror 2.0.17", "winter-air", "winter-prover", ] @@ -2549,7 +2553,7 @@ dependencies = [ "miden-core", "miden-mast-package", "smallvec", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -2573,17 +2577,17 @@ dependencies = [ "rustc_version 0.4.1", "semver 1.0.27", "smallvec", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] name = "miden-block-prover" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c6f32b0f6f1e1fabd419d29f413c6eff3fd45fe04377051ea9aa17cdcb0df67" +checksum = "11e7778b6851a6348795e168b9423a4d38bd6f89d3d0acf64a1626c12917b9bd" dependencies = [ "miden-protocol", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -2603,7 +2607,7 @@ dependencies = [ "num-traits", "proptest", "proptest-derive", - "thiserror 2.0.18", + "thiserror 2.0.17", "winter-math", "winter-utils", ] @@ -2622,7 +2626,7 @@ dependencies = [ "miden-processor", "miden-utils-sync", "sha2", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -2653,7 +2657,7 @@ dependencies = [ "sha2", "sha3", "subtle", - "thiserror 2.0.18", + "thiserror 2.0.17", "winter-crypto", "winter-math", "winter-utils", @@ -2662,9 +2666,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.4" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" +checksum = "83479e7af490784c6f2d2e02cec5210fd6e5bc6ce3d4427734e36a773bca72d2" dependencies = [ "quote", "syn 2.0.114", @@ -2685,7 +2689,7 @@ dependencies = [ "paste", "serde", "serde_spanned 1.0.4", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -2706,7 +2710,7 @@ dependencies = [ "derive_more", "miden-assembly-syntax", "miden-core", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -2735,7 +2739,7 @@ dependencies = [ "syn 2.0.114", "terminal_size 0.3.0", "textwrap", - "thiserror 2.0.18", + "thiserror 2.0.17", "trybuild", "unicode-width 0.1.14", ] @@ -2824,7 +2828,7 @@ dependencies = [ "rstest", "serial_test", "tempfile", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", "tonic", @@ -2858,7 +2862,7 @@ dependencies = [ "miden-standards", "miden-tx", "rstest", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -2884,7 +2888,7 @@ dependencies = [ "miette", "proptest", "prost", - "thiserror 2.0.18", + "thiserror 2.0.17", "tonic", "tonic-prost", "tonic-prost-build", @@ -2921,7 +2925,7 @@ dependencies = [ "rstest", "semver 1.0.27", "tempfile", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", "tonic", @@ -2961,7 +2965,7 @@ dependencies = [ "regex", "serde", "termtree", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", "toml 0.9.11+spec-1.1.0", @@ -3019,7 +3023,7 @@ dependencies = [ "opentelemetry_sdk", "rand 0.9.2", "serde", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tonic", "tower-http", @@ -3042,7 +3046,7 @@ dependencies = [ "miden-node-utils", "miden-protocol", "miden-tx", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", "tonic", @@ -3065,7 +3069,7 @@ dependencies = [ "miden-utils-indexing", "paste", "rayon", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tracing", "winter-prover", @@ -3073,9 +3077,9 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d43c655cc029e17e3af4a1bed8e69f3eeac81462142e9b8a240a235dcb3b22" +checksum = "0ef3b133f424dbbca4ae1576258e695059f96746fc3d03aa2d0a9cc009648c0f" dependencies = [ "bech32", "fs-err", @@ -3096,7 +3100,7 @@ dependencies = [ "regex", "semver 1.0.27", "serde", - "thiserror 2.0.18", + "thiserror 2.0.17", "toml 0.9.11+spec-1.1.0", "walkdir", "winter-rand-utils", @@ -3104,9 +3108,9 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7b0c1ac8c94d679ce91036c98c6c9bf2d1791daa548f7c666376ebe3b98eaa0" +checksum = "1be64f4f5206eae03c83b9825b694bf5cf1176802bf664010fbc32a3fc726822" dependencies = [ "proc-macro2", "quote", @@ -3159,7 +3163,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_qs", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", "tonic", @@ -3184,7 +3188,7 @@ dependencies = [ "miden-tx", "miette", "prost", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tonic", "tonic-prost", @@ -3195,9 +3199,9 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9522792de31fff9fb5b1e1fb9afe479f2595e8322b40ec2385b812077888f741" +checksum = "8250694a887e0bbb18275e4ce5bc1dd7a3cc72fe6a8e1e47a7b46bace3cb51fe" dependencies = [ "fs-err", "miden-assembly", @@ -3207,15 +3211,15 @@ dependencies = [ "miden-protocol", "rand 0.9.2", "regex", - "thiserror 2.0.18", + "thiserror 2.0.17", "walkdir", ] [[package]] name = "miden-testing" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626400c8c6be3f8d8f55a83d0a23e742a8f3405976d0a6a1c836e844fa337693" +checksum = "9cf5dc1d6cef01839ef521f3e59fbc19731efad64260fcbc877079fc8095a258" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3235,23 +3239,23 @@ dependencies = [ [[package]] name = "miden-tx" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e88de3a252ea6b7d903aa837371f73afc1b8259fe44bd0f3c57cae2e0086390" +checksum = "d8136d02b3b83d8f9c2fe6bdc3a33f63951034c1b613351920056825a752f200" dependencies = [ "miden-processor", "miden-protocol", "miden-prover", "miden-standards", "miden-verifier", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] name = "miden-tx-batch-prover" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b59038568a52c4a3cc17b96af6a1f2f49ba09b3fb765674883056c412ca77b" +checksum = "b94581bf94b0e7cd5efeaad9e590c49714c075d054f4b19ca60eb2e43ae893ad" dependencies = [ "miden-protocol", "miden-tx", @@ -3287,7 +3291,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" dependencies = [ - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -3309,7 +3313,7 @@ checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" dependencies = [ "miden-air", "miden-core", - "thiserror 2.0.18", + "thiserror 2.0.17", "tracing", "winter-verifier", ] @@ -3322,7 +3326,7 @@ checksum = "9d4cfab04baffdda3fb9eafa5f873604059b89a1699aa95e4f1057397a69f0b5" dependencies = [ "miden-formatting", "smallvec", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -3684,9 +3688,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-probe" -version = "0.2.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" +checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" [[package]] name = "openssl-sys" @@ -3710,7 +3714,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.18", + "thiserror 2.0.17", "tracing", ] @@ -3725,7 +3729,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tonic", ] @@ -3755,7 +3759,7 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "thiserror 2.0.18", + "thiserror 2.0.17", "tokio", "tokio-stream", ] @@ -3900,7 +3904,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3936,7 +3940,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -4026,7 +4030,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -4058,7 +4062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.1", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] @@ -4303,9 +4307,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.106" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -4350,7 +4354,7 @@ dependencies = [ "memchr", "parking_lot", "protobuf 3.7.2", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -4487,7 +4491,7 @@ dependencies = [ "prost-reflect", "prost-types", "protox-parse", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -4499,7 +4503,7 @@ dependencies = [ "logos", "miette", "prost-types", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -4786,16 +4790,6 @@ dependencies = [ "librocksdb-sys", ] -[[package]] -name = "rsqlite-vfs" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" -dependencies = [ - "hashbrown 0.16.1", - "thiserror 2.0.18", -] - [[package]] name = "rstest" version = "0.26.1" @@ -4827,9 +4821,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.40.0" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" +checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" dependencies = [ "arrayvec", "num-traits", @@ -4837,9 +4831,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.27" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -4912,7 +4906,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe 0.2.0", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4920,18 +4914,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.14.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -5144,7 +5138,7 @@ checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" dependencies = [ "percent-encoding", "serde", - "thiserror 2.0.18", + "thiserror 2.0.17", ] [[package]] @@ -5343,13 +5337,14 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" +checksum = "05e98301bf8b0540c7de45ecd760539b9c62f5772aed172f08efba597c11cd5d" dependencies = [ "cc", + "hashbrown 0.16.1", "js-sys", - "rsqlite-vfs", + "thiserror 2.0.17", "wasm-bindgen", ] @@ -5589,11 +5584,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.18" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.18", + "thiserror-impl 2.0.17", ] [[package]] @@ -5609,9 +5604,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.18" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", @@ -5997,7 +5992,7 @@ dependencies = [ "httparse", "js-sys", "pin-project", - "thiserror 2.0.18", + "thiserror 2.0.17", "tonic", "tower-service", "wasm-bindgen", @@ -6098,7 +6093,7 @@ checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" dependencies = [ "chrono", "smallvec", - "thiserror 2.0.18", + "thiserror 2.0.17", "tracing", "tracing-subscriber", ] @@ -6325,9 +6320,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" -version = "9.1.0" +version = "9.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b849a1f6d8639e8de261e81ee0fc881e3e3620db1af9f2e0da015d4382ceaf75" +checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" dependencies = [ "anyhow", "cargo_metadata", @@ -6335,7 +6330,7 @@ dependencies = [ "regex", "rustc_version 0.4.1", "rustversion", - "vergen-lib 9.1.0", + "vergen-lib", ] [[package]] @@ -6349,7 +6344,7 @@ dependencies = [ "rustversion", "time", "vergen", - "vergen-lib 0.1.6", + "vergen-lib", ] [[package]] @@ -6363,17 +6358,6 @@ dependencies = [ "rustversion", ] -[[package]] -name = "vergen-lib" -version = "9.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34a29ba7e9c59e62f229ae1932fb1b8fb8a6fdcc99215a641913f5f5a59a569" -dependencies = [ - "anyhow", - "derive_builder", - "rustversion", -] - [[package]] name = "version_check" version = "0.9.5" @@ -6425,18 +6409,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.2+wasi-0.2.9" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -6447,12 +6431,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", - "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -6461,9 +6444,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6471,9 +6454,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", @@ -6484,9 +6467,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -6506,9 +6489,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -6980,9 +6963,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.51.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" @@ -7120,9 +7103,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.16" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" +checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" [[package]] name = "zstd" From aca0ec54a48cf40bff650f2a6b4699e2f050f19a Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Thu, 22 Jan 2026 01:02:05 -0800 Subject: [PATCH 114/125] chore: refresh Cargo.lock with the latest versions of miden-base crates --- Cargo.lock | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19a6ca2362..132b415fc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1366,7 +1366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -2144,7 +2144,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -2513,9 +2513,9 @@ dependencies = [ [[package]] name = "miden-agglayer" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e78bbff1742ac44ddabcca7840c7ca85b1955c0d262df3bcd2db7a0b1341aa" +checksum = "ccebe2f7aa9e173913a9da60bd21e8402936c784fdf1eba8c48956667def354e" dependencies = [ "fs-err", "miden-assembly", @@ -2582,9 +2582,9 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11e7778b6851a6348795e168b9423a4d38bd6f89d3d0acf64a1626c12917b9bd" +checksum = "aa9c89257b227d0668105b4a6e81ea33956795c89549cc1baa3f253d753e81e5" dependencies = [ "miden-protocol", "thiserror 2.0.17", @@ -3077,9 +3077,9 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef3b133f424dbbca4ae1576258e695059f96746fc3d03aa2d0a9cc009648c0f" +checksum = "dfed3ae85e2fabbf8a2e7416e388a40519e10cbf0cdceda222ef858c2f270b35" dependencies = [ "bech32", "fs-err", @@ -3108,9 +3108,9 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be64f4f5206eae03c83b9825b694bf5cf1176802bf664010fbc32a3fc726822" +checksum = "f41a93dd532baa3a4c821073baad5d700aab119b3831ef7fdf004e196c10157e" dependencies = [ "proc-macro2", "quote", @@ -3199,9 +3199,9 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8250694a887e0bbb18275e4ce5bc1dd7a3cc72fe6a8e1e47a7b46bace3cb51fe" +checksum = "16144e41701794b45b7a361ec7d35407a90c4d1d129a43df0bc278d5f3327999" dependencies = [ "fs-err", "miden-assembly", @@ -3217,9 +3217,9 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5dc1d6cef01839ef521f3e59fbc19731efad64260fcbc877079fc8095a258" +checksum = "9bd0c6d0ceb4e6719a5afe76b9627b73e91506ebb66350d56ca9ed606127e4dc" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3239,9 +3239,9 @@ dependencies = [ [[package]] name = "miden-tx" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8136d02b3b83d8f9c2fe6bdc3a33f63951034c1b613351920056825a752f200" +checksum = "a97f26c833633cea0d95ddb38bcd8bd7e8225b4e7746c15070cb9ab7b85e248c" dependencies = [ "miden-processor", "miden-protocol", @@ -3253,9 +3253,9 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94581bf94b0e7cd5efeaad9e590c49714c075d054f4b19ca60eb2e43ae893ad" +checksum = "0669ce9d9c7aacd49e4923edb88fe668e370c02a754d1564b10a97501e37310f" dependencies = [ "miden-protocol", "miden-tx", @@ -4869,7 +4869,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4882,7 +4882,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -5515,7 +5515,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6529,7 +6529,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] From e67230e932988a5e52939ce1c017197cc8bc01ac Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 22 Jan 2026 12:59:46 +0200 Subject: [PATCH 115/125] fix(validator): load foreign code from inputs (#1570) --- crates/validator/src/tx_validation/data_store.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/validator/src/tx_validation/data_store.rs b/crates/validator/src/tx_validation/data_store.rs index b2c9093d4f..ebd382e44a 100644 --- a/crates/validator/src/tx_validation/data_store.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -24,6 +24,9 @@ impl TransactionInputsDataStore { pub fn new(tx_inputs: TransactionInputs) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(tx_inputs.account().code()); + for code in tx_inputs.foreign_account_code() { + mast_store.load_account_code(code); + } Self { tx_inputs, mast_store } } } From 3758846070f5907a910bef9b84bfe0d6bd1a5f23 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 22 Jan 2026 12:15:32 +0100 Subject: [PATCH 116/125] feat: add termination on root mismatch in store (#1569) --- CHANGELOG.md | 1 + bin/stress-test/src/store/mod.rs | 3 ++- crates/store/src/server/mod.rs | 18 +++++++++++++++--- crates/store/src/state/mod.rs | 25 +++++++++++++++++++++++-- 4 files changed, 41 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d82a7c4dbd..dbf050f30f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). - Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). - Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). ### Changes diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index e4960bb7e1..fa39303aed 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -486,7 +486,8 @@ async fn sync_transactions_paginated( pub async fn load_state(data_directory: &Path) { let start = Instant::now(); - let _state = State::load(data_directory).await.unwrap(); + let (termination_ask, _) = tokio::sync::mpsc::channel(1); + let _state = State::load(data_directory, termination_ask).await.unwrap(); let elapsed = start.elapsed(); // Get database path and run SQL commands to count records diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 420ec4921b..b4b5798db9 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -21,6 +21,7 @@ use tracing::{info, instrument}; use crate::blocks::BlockStore; use crate::db::Db; +use crate::errors::ApplyBlockError; use crate::state::State; use crate::{COMPONENT, GenesisState}; @@ -91,8 +92,13 @@ impl Store { block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_timeout, "Loading database"); - let state = - Arc::new(State::load(&self.data_directory).await.context("failed to load state")?); + let (termination_ask, mut termination_signal) = + tokio::sync::mpsc::channel::(1); + let state = Arc::new( + State::load(&self.data_directory, termination_ask) + .await + .context("failed to load state")?, + ); let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); @@ -173,7 +179,13 @@ impl Store { ); // SAFETY: The joinset is definitely not empty. - join_set.join_next().await.unwrap()?.map_err(Into::into) + let service = async move { join_set.join_next().await.unwrap()?.map_err(Into::into) }; + tokio::select! { + result = service => result, + Some(err) = termination_signal.recv() => { + Err(anyhow::anyhow!("received termination signal").context(err)) + } + } } } diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 3c422121ba..af21e76580 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -130,6 +130,9 @@ pub struct State { /// To allow readers to access the tree data while an update in being performed, and prevent /// TOCTOU issues, there must be no concurrent writers. This locks to serialize the writers. writer: Mutex<()>, + + /// Request termination of the process due to a fatal internal state error. + termination_ask: tokio::sync::mpsc::Sender, } impl State { @@ -138,7 +141,10 @@ impl State { /// Loads the state from the data directory. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(data_path: &Path) -> Result { + pub async fn load( + data_path: &Path, + termination_ask: tokio::sync::mpsc::Sender, + ) -> Result { let data_directory = DataDirectory::load(data_path.to_path_buf()) .map_err(StateInitializationError::DataDirectoryLoadError)?; @@ -178,7 +184,14 @@ impl State { let writer = Mutex::new(()); let db = Arc::new(db); - Ok(Self { db, block_store, inner, forest, writer }) + Ok(Self { + db, + block_store, + inner, + forest, + writer, + termination_ask, + }) } // STATE MUTATOR @@ -304,6 +317,11 @@ impl State { .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); } @@ -327,6 +345,9 @@ impl State { })?; if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); } From fe2d173b404533c136e4137026f47045b98d0bc7 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 22 Jan 2026 16:30:40 +0200 Subject: [PATCH 117/125] fix(ntx): mempool subscription has a race condition (#1568) --- crates/ntx-builder/src/builder.rs | 33 +++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index c564a5012d..8b789779f7 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -112,14 +112,31 @@ impl NetworkTransactionBuilder { let store = StoreClient::new(self.store_url.clone()); let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - let mut mempool_events = block_producer - .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) - .await - .context("failed to subscribe to mempool events")?; + // Loop until we successfully subscribe. + // + // The mempool rejects our subscription if we don't have the same view of the chain aka + // if our chain tip does not match the mempools. This can occur if a new block is committed + // _after_ we fetch the chain tip from the store but _before_ our subscription request is + // handled. + // + // This is a hack-around for https://github.com/0xMiden/miden-node/issues/1566. + let (chain_tip_header, chain_mmr, mut mempool_events) = loop { + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .expect("store should contain a latest block"); + + match block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + { + Ok(subscription) => break (chain_tip_header, chain_mmr, subscription), + Err(status) if status.code() == tonic::Code::InvalidArgument => { + tracing::error!(err=%status, "mempool subscription failed due to desync, trying again"); + }, + Err(err) => return Err(err).context("failed to subscribe to mempool events"), + } + }; // Create chain state that will be updated by the coordinator and read by actors. let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); From ee462855c8e542d4a3c6c2524efe7d042da08fda Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 22 Jan 2026 11:32:54 -0300 Subject: [PATCH 118/125] chore: miden base update followups (#1563) * chore: update miden-base to 0.13.1, use NetworkAccountTargetError * chore: remove network account prefix references * chore: NetworkNoteType enum * review: rename NoteRowInsert --- crates/ntx-builder/src/actor/account_state.rs | 10 +-- crates/ntx-builder/src/actor/mod.rs | 4 +- crates/ntx-builder/src/coordinator.rs | 74 ++++++++++--------- crates/proto/src/domain/account.rs | 6 +- crates/proto/src/domain/note.rs | 6 +- .../db/migrations/2025062000000_setup/up.sql | 6 +- crates/store/src/db/mod.rs | 6 +- crates/store/src/db/models/queries/notes.rs | 39 ++++++++-- crates/store/src/db/schema.rs | 2 +- crates/store/src/state/mod.rs | 4 +- 10 files changed, 92 insertions(+), 65 deletions(-) diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index 3a9015a265..a130b8079f 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -162,7 +162,7 @@ impl NetworkAccountState { account_delta, } => { // Filter network notes relevant to this account. - let network_notes = filter_by_prefix_and_map_to_single_target( + let network_notes = filter_by_account_id_and_map_to_single_target( self.account_id, network_notes.clone(), ); @@ -249,8 +249,8 @@ impl NetworkAccountState { return; }; - if let Some(prefix) = impact.account_delta { - if prefix == self.account_id { + if let Some(delta_account_id) = impact.account_delta { + if delta_account_id == self.account_id { self.account.commit_delta(); } } @@ -330,8 +330,8 @@ impl TransactionImpact { } } -/// Filters network notes by prefix and maps them to single target network notes. -fn filter_by_prefix_and_map_to_single_target( +/// Filters network notes by account ID and maps them to single target network notes. +fn filter_by_account_id_and_map_to_single_target( account_id: NetworkAccountId, notes: Vec, ) -> Vec { diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index f743d79085..ae8f63629e 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -202,9 +202,9 @@ impl AccountActor { // Load the account state from the store and set up the account actor state. let account = { match self.origin { - AccountOrigin::Store(account_prefix) => self + AccountOrigin::Store(account_id) => self .store - .get_network_account(account_prefix) + .get_network_account(account_id) .await .expect("actor should be able to load account") .expect("actor account should exist"), diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index f6c0389114..285cee47af 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -38,7 +38,7 @@ impl ActorHandle { /// /// The `Coordinator` is the central orchestrator of the network transaction builder system. /// It manages the lifecycle of account actors. Each actor is responsible for handling transactions -/// for a specific network account prefix. The coordinator provides the following core +/// for a specific network account. The coordinator provides the following core /// functionality: /// /// ## Actor Management @@ -62,7 +62,7 @@ impl ActorHandle { /// 3. Actor completion/failure events are monitored and handled. /// 4. Failed or completed actors are cleaned up from the registry. pub struct Coordinator { - /// Mapping of network account prefixes to their respective message channels and cancellation + /// Mapping of network account IDs to their respective message channels and cancellation /// tokens. /// /// This registry serves as the primary directory for communicating with active account actors. @@ -88,7 +88,7 @@ pub struct Coordinator { semaphore: Arc, /// Cache of events received from the mempool that predate corresponding network accounts. - /// Grouped by account prefix to allow targeted event delivery to actors upon creation. + /// Grouped by network account ID to allow targeted event delivery to actors upon creation. predating_events: HashMap>>, } @@ -111,18 +111,18 @@ impl Coordinator { /// /// This method creates a new [`AccountActor`] instance for the specified account origin /// and adds it to the coordinator's management system. The actor will be responsible for - /// processing transactions and managing state for accounts matching the network prefix. + /// processing transactions and managing state for the network account. #[tracing::instrument(name = "ntx.builder.spawn_actor", skip(self, origin, actor_context))] pub async fn spawn_actor( &mut self, origin: AccountOrigin, actor_context: &AccountActorContext, ) -> Result<(), SendError>> { - let account_prefix = origin.id(); + let account_id = origin.id(); - // If an actor already exists for this account prefix, something has gone wrong. - if let Some(handle) = self.actor_registry.remove(&account_prefix) { - tracing::error!("account actor already exists for prefix: {}", account_prefix); + // If an actor already exists for this account ID, something has gone wrong. + if let Some(handle) = self.actor_registry.remove(&account_id) { + tracing::error!("account actor already exists for account: {}", account_id); handle.cancel_token.cancel(); } @@ -136,14 +136,14 @@ impl Coordinator { self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); // Send the new actor any events that contain notes that predate account creation. - if let Some(prefix_events) = self.predating_events.remove(&account_prefix) { - for event in prefix_events.values() { + if let Some(predating_events) = self.predating_events.remove(&account_id) { + for event in predating_events.values() { Self::send(&handle, event.clone()).await?; } } - self.actor_registry.insert(account_prefix, handle); - tracing::info!("created actor for account prefix: {}", account_prefix); + self.actor_registry.insert(account_id, handle); + tracing::info!("created actor for account: {}", account_id); Ok(()) } @@ -163,16 +163,16 @@ impl Coordinator { let mut failed_actors = Vec::new(); // Send event to all actors. - for (account_prefix, handle) in &self.actor_registry { + for (account_id, handle) in &self.actor_registry { if let Err(err) = Self::send(handle, event.clone()).await { - tracing::error!("failed to send event to actor {}: {}", account_prefix, err); - failed_actors.push(*account_prefix); + tracing::error!("failed to send event to actor {}: {}", account_id, err); + failed_actors.push(*account_id); } } // Remove failed actors from registry and cancel them. - for prefix in failed_actors { + for account_id in failed_actors { let handle = - self.actor_registry.remove(&prefix).expect("actor found in send loop above"); + self.actor_registry.remove(&account_id).expect("actor found in send loop above"); handle.cancel_token.cancel(); } } @@ -189,15 +189,15 @@ impl Coordinator { let actor_result = self.actor_join_set.join_next().await; match actor_result { Some(Ok(shutdown_reason)) => match shutdown_reason { - ActorShutdownReason::Cancelled(account_prefix) => { + ActorShutdownReason::Cancelled(account_id) => { // Do not remove the actor from the registry, as it may be re-spawned. // The coordinator should always remove actors immediately after cancellation. - tracing::info!("account actor cancelled: {}", account_prefix); + tracing::info!("account actor cancelled: {}", account_id); Ok(()) }, - ActorShutdownReason::AccountReverted(account_prefix) => { - tracing::info!("account reverted: {}", account_prefix); - self.actor_registry.remove(&account_prefix); + ActorShutdownReason::AccountReverted(account_id) => { + tracing::info!("account reverted: {}", account_id); + self.actor_registry.remove(&account_id); Ok(()) }, ActorShutdownReason::EventChannelClosed => { @@ -239,9 +239,10 @@ impl Coordinator { if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { let account_id = delta.id(); if account_id.is_network() { - let prefix = account_id.try_into().expect("account is network account"); - if let Some(actor) = self.actor_registry.get(&prefix) { - target_actors.insert(prefix, actor); + let network_account_id = + account_id.try_into().expect("account is network account"); + if let Some(actor) = self.actor_registry.get(&network_account_id) { + target_actors.insert(network_account_id, actor); } } } @@ -249,13 +250,16 @@ impl Coordinator { // Determine target actors for each note. for note in network_notes { let NetworkNote::SingleTarget(note) = note; - let prefix = note.account_id(); - if let Some(actor) = self.actor_registry.get(&prefix) { + let network_account_id = note.account_id(); + if let Some(actor) = self.actor_registry.get(&network_account_id) { // Register actor as target. - target_actors.insert(prefix, actor); + target_actors.insert(network_account_id, actor); } else { // Cache event for every note that doesn't have a corresponding actor. - self.predating_events.entry(prefix).or_default().insert(*id, event.clone()); + self.predating_events + .entry(network_account_id) + .or_default() + .insert(*id, event.clone()); } } } @@ -266,15 +270,15 @@ impl Coordinator { Ok(()) } - /// Removes any cached events for a given transaction ID from all account prefix caches. + /// Removes any cached events for a given transaction ID from all account caches. pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { - // Remove the transaction from all prefix caches. + // Remove the transaction from all account caches. // This iterates over all predating events which is fine because the count is expected to be // low. - self.predating_events.retain(|_, prefix_event| { - prefix_event.shift_remove(tx_id); - // Remove entries for account prefixes with no more cached events. - !prefix_event.is_empty() + self.predating_events.retain(|_, account_events| { + account_events.shift_remove(tx_id); + // Remove entries for accounts with no more cached events. + !account_events.is_empty() }); } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 3bd3aa87cc..558c0fa1d3 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -20,7 +20,7 @@ use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::note::NoteAttachment; use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; -use miden_standards::note::NetworkAccountTarget; +use miden_standards::note::{NetworkAccountTarget, NetworkAccountTargetError}; use thiserror::Error; use super::try_convert; @@ -1065,7 +1065,7 @@ impl TryFrom<&NoteAttachment> for NetworkAccountId { fn try_from(attachment: &NoteAttachment) -> Result { let target = NetworkAccountTarget::try_from(attachment) - .map_err(|e| NetworkAccountError::InvalidAttachment(e.to_string()))?; + .map_err(NetworkAccountError::InvalidAttachment)?; Ok(NetworkAccountId(target.target_id())) } } @@ -1097,7 +1097,7 @@ pub enum NetworkAccountError { #[error("account ID {0} is not a valid network account ID")] NotNetworkAccount(AccountId), #[error("invalid network account attachment: {0}")] - InvalidAttachment(String), + InvalidAttachment(#[source] NetworkAccountTargetError), #[error("invalid network account prefix: {0}")] InvalidPrefix(u32), } diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 5098ef41fb..b7e07f2cc2 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -14,7 +14,7 @@ use miden_protocol::note::{ Nullifier, }; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_standards::note::NetworkAccountTarget; +use miden_standards::note::{NetworkAccountTarget, NetworkAccountTargetError}; use thiserror::Error; use super::account::NetworkAccountId; @@ -282,7 +282,7 @@ impl TryFrom for SingleTargetNetworkNote { // Single-target network notes are identified by having a NetworkAccountTarget attachment let attachment = note.metadata().attachment(); let account_target = NetworkAccountTarget::try_from(attachment) - .map_err(|e| NetworkNoteError::InvalidAttachment(e.to_string()))?; + .map_err(NetworkNoteError::InvalidAttachment)?; Ok(Self { note, account_target }) } } @@ -315,7 +315,7 @@ where #[derive(Debug, Error)] pub enum NetworkNoteError { #[error("note does not have a valid NetworkAccountTarget attachment: {0}")] - InvalidAttachment(String), + InvalidAttachment(#[source] NetworkAccountTargetError), } // NOTE SCRIPT diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 32c618aaba..d3638a5bee 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -51,7 +51,7 @@ CREATE TABLE notes ( note_type INTEGER NOT NULL, -- 1-Public (0b01), 2-Private (0b10), 3-Encrypted (0b11) sender BLOB NOT NULL, tag INTEGER NOT NULL, - is_single_target_network_note INTEGER NOT NULL, -- 1 if note has NetworkAccountTarget attachment, 0 otherwise + network_note_type INTEGER NOT NULL, -- 0-not a network note, 1-single account target network note attachment BLOB NOT NULL, -- Serialized note attachment data inclusion_path BLOB NOT NULL, -- Serialized sparse Merkle path of the note in the block's note tree consumed_at INTEGER, -- Block number when the note was consumed @@ -63,7 +63,7 @@ CREATE TABLE notes ( PRIMARY KEY (committed_at, batch_index, note_index), CONSTRAINT notes_type_in_enum CHECK (note_type BETWEEN 1 AND 3), - CONSTRAINT notes_is_single_target_network_note_is_bool CHECK (is_single_target_network_note BETWEEN 0 AND 1), + CONSTRAINT notes_network_note_type_in_enum CHECK (network_note_type BETWEEN 0 AND 1), CONSTRAINT notes_consumed_at_is_u32 CHECK (consumed_at BETWEEN 0 AND 0xFFFFFFFF), CONSTRAINT notes_batch_index_is_u32 CHECK (batch_index BETWEEN 0 AND 0xFFFFFFFF), CONSTRAINT notes_note_index_is_u32 CHECK (note_index BETWEEN 0 AND 0xFFFFFFFF) @@ -74,7 +74,7 @@ CREATE INDEX idx_notes_note_commitment ON notes(note_commitment); CREATE INDEX idx_notes_sender ON notes(sender, committed_at); CREATE INDEX idx_notes_tag ON notes(tag, committed_at); CREATE INDEX idx_notes_nullifier ON notes(nullifier); -CREATE INDEX idx_unconsumed_network_notes ON notes(is_single_target_network_note, consumed_at); +CREATE INDEX idx_unconsumed_network_notes ON notes(network_note_type, consumed_at); -- Index for joining with block_headers on committed_at CREATE INDEX idx_notes_committed_at ON notes(committed_at); -- Index for joining with note_scripts diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fe9c74974..7ecfcb7451 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -656,9 +656,9 @@ impl Db { block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page)> { - // Network notes sent to a specific account have their tags set to the prefix of the target - // account ID. So we can convert the ID prefix into a note tag to query the notes for a - // given account. + // Single-target network notes have their tags derived from the target account ID. + // The 30-bit account ID prefix is used as the note tag, allowing us to query notes + // for a given network account. self.transact("unconsumed network notes for account", move |conn| { models::queries::select_unconsumed_network_notes_by_tag( conn, diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index f68d5447b7..10b8316dec 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -64,6 +64,25 @@ use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{DatabaseError, NoteRecord, NoteSyncRecord, NoteSyncUpdate, Page, schema}; use crate::errors::NoteSyncError; +// NETWORK NOTE TYPE +// ================================================================================================ + +/// Classifies network notes for database storage. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(i32)] +pub(crate) enum NetworkNoteType { + /// Not a network note. + None = 0, + /// Single account target network note (has `NetworkAccountTarget` attachment). + SingleTarget = 1, +} + +impl From for i32 { + fn from(value: NetworkNoteType) -> Self { + value as i32 + } +} + /// Select notes matching the tags and account IDs search criteria within a block range. /// /// # Parameters @@ -416,7 +435,7 @@ pub(crate) fn select_note_script_by_root( /// FROM notes /// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root /// WHERE -/// is_single_target_network_note = TRUE AND tag = ?1 AND +/// network_note_type = 1 AND tag = ?1 AND /// committed_at <= ?2 AND /// (consumed_at IS NULL OR consumed_at > ?2) AND notes.rowid >= ?3 /// ORDER BY notes.rowid ASC @@ -474,7 +493,7 @@ pub(crate) fn select_unconsumed_network_notes_by_tag( rowid_sel.clone(), ), ) - .filter(schema::notes::is_single_target_network_note.eq(true)) + .filter(schema::notes::network_note_type.eq(i32::from(NetworkNoteType::SingleTarget))) .filter(schema::notes::tag.eq(tag as i32)) .filter(schema::notes::committed_at.le(block_num.to_raw_sql())) .filter( @@ -786,7 +805,7 @@ pub(crate) fn insert_notes( .values(Vec::from_iter( notes .iter() - .map(|(note, nullifier)| NoteInsertRowInsert::from((note.clone(), *nullifier))), + .map(|(note, nullifier)| NoteInsertRow::from((note.clone(), *nullifier))), )) .execute(conn)?; Ok(count) @@ -829,7 +848,7 @@ pub(crate) fn insert_scripts<'a>( #[derive(Debug, Clone, PartialEq, Insertable)] #[diesel(table_name = schema::notes)] -pub struct NoteInsertRowInsert { +pub struct NoteInsertRow { pub committed_at: i64, pub batch_index: i32, @@ -849,15 +868,19 @@ pub struct NoteInsertRowInsert { pub serial_num: Option>, pub nullifier: Option>, pub script_root: Option>, - pub is_single_target_network_note: bool, + pub network_note_type: i32, pub inclusion_path: Vec, } -impl From<(NoteRecord, Option)> for NoteInsertRowInsert { +impl From<(NoteRecord, Option)> for NoteInsertRow { fn from((note, nullifier): (NoteRecord, Option)) -> Self { let attachment = note.metadata.attachment(); - let is_single_target_network_note = NetworkAccountTarget::try_from(attachment).is_ok(); + let network_note_type = if NetworkAccountTarget::try_from(attachment).is_ok() { + NetworkNoteType::SingleTarget + } else { + NetworkNoteType::None + }; let attachment_bytes = attachment.to_bytes(); @@ -870,7 +893,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRowInsert { note_type: note_type_to_raw_sql(note.metadata.note_type() as u8), sender: note.metadata.sender().to_bytes(), tag: note.metadata.tag().to_raw_sql(), - is_single_target_network_note, + network_note_type: network_note_type.into(), attachment: attachment_bytes, inclusion_path: note.inclusion_path.to_bytes(), consumed_at: None::, // New notes are always unconsumed. diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index e9333057c9..8b6b7e832a 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -67,7 +67,7 @@ diesel::table! { note_type -> Integer, sender -> Binary, tag -> Integer, - is_single_target_network_note -> Bool, + network_note_type -> Integer, attachment -> Binary, inclusion_path -> Binary, consumed_at -> Nullable, diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index af21e76580..85332ec2ba 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -695,8 +695,8 @@ impl State { /// Loads data to synchronize a client. /// - /// The client's request contains a list of tag prefixes, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filter based on this + /// The client's request contains a list of note tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filtered based on this /// block range. /// /// # Arguments From 6d15bd8c9addd5b26e781ae445d6f02d11f015b2 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 22 Jan 2026 16:55:25 +0100 Subject: [PATCH 119/125] feat: remove prefix for network note lookup (#1572) --- CHANGELOG.md | 1 + crates/ntx-builder/src/store.rs | 2 +- crates/proto/src/generated/store.rs | 8 +++---- .../db/migrations/2025062000000_setup/up.sql | 3 ++- crates/store/src/db/mod.rs | 12 +++-------- crates/store/src/db/models/queries/notes.rs | 21 +++++++++++-------- crates/store/src/db/schema.rs | 1 + crates/store/src/db/tests.rs | 12 +++++------ crates/store/src/server/ntx_builder.rs | 7 ++----- crates/store/src/state/mod.rs | 6 ++---- proto/proto/internal/store.proto | 4 ++-- 11 files changed, 36 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbf050f30f..76e48827f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ - Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). - Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). - [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). +- [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/miden-node/pull/1572)). ### Fixes diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 1a7c7b309a..02e12d8964 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -156,7 +156,7 @@ impl StoreClient { let req = proto::store::UnconsumedNetworkNotesRequest { page_token, page_size: PAGE_SIZE, - network_account_id_prefix: network_account_id.prefix(), + account_id: Some(network_account_id.inner().into()), block_num, }; let resp = store_client.get_unconsumed_network_notes(req).await?.into_inner(); diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 2fc8168fb8..aad46a4224 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -168,7 +168,7 @@ pub struct MaybeAccountDetails { /// Returns a paginated list of unconsumed network notes for an account. /// /// Notes created or consumed after the specified block are excluded from the result. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct UnconsumedNetworkNotesRequest { /// This should be null on the first call, and set to the response token until the response token /// is null, at which point all data has been fetched. @@ -179,9 +179,9 @@ pub struct UnconsumedNetworkNotesRequest { /// Number of notes to retrieve per page. #[prost(uint64, tag = "2")] pub page_size: u64, - /// The network account ID prefix to filter notes by. - #[prost(uint32, tag = "3")] - pub network_account_id_prefix: u32, + /// The full account ID to filter notes by. + #[prost(message, optional, tag = "3")] + pub account_id: ::core::option::Option, /// The block number to filter the returned notes by. /// /// Notes that are created or consumed after this block are excluded from the result. diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index d3638a5bee..b3ca25d563 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -52,6 +52,7 @@ CREATE TABLE notes ( sender BLOB NOT NULL, tag INTEGER NOT NULL, network_note_type INTEGER NOT NULL, -- 0-not a network note, 1-single account target network note + target_account_id BLOB, -- Full target account ID for single-target network notes attachment BLOB NOT NULL, -- Serialized note attachment data inclusion_path BLOB NOT NULL, -- Serialized sparse Merkle path of the note in the block's note tree consumed_at INTEGER, -- Block number when the note was consumed @@ -74,7 +75,7 @@ CREATE INDEX idx_notes_note_commitment ON notes(note_commitment); CREATE INDEX idx_notes_sender ON notes(sender, committed_at); CREATE INDEX idx_notes_tag ON notes(tag, committed_at); CREATE INDEX idx_notes_nullifier ON notes(nullifier); -CREATE INDEX idx_unconsumed_network_notes ON notes(network_note_type, consumed_at); +CREATE INDEX idx_notes_target_account ON notes(target_account_id, committed_at) WHERE target_account_id IS NOT NULL; -- Index for joining with block_headers on committed_at CREATE INDEX idx_notes_committed_at ON notes(committed_at); -- Index for joining with note_scripts diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7ecfcb7451..ee7c722c8a 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -652,19 +652,13 @@ impl Db { /// Pagination is used to limit the number of notes returned. pub(crate) async fn select_unconsumed_network_notes( &self, - network_account_prefix: u32, + account_id: AccountId, block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page)> { - // Single-target network notes have their tags derived from the target account ID. - // The 30-bit account ID prefix is used as the note tag, allowing us to query notes - // for a given network account. self.transact("unconsumed network notes for account", move |conn| { - models::queries::select_unconsumed_network_notes_by_tag( - conn, - network_account_prefix, - block_num, - page, + models::queries::select_unconsumed_network_notes_by_account_id( + conn, account_id, block_num, page, ) }) .await diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 10b8316dec..a2ab7b1bb0 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -435,7 +435,7 @@ pub(crate) fn select_note_script_by_root( /// FROM notes /// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root /// WHERE -/// network_note_type = 1 AND tag = ?1 AND +/// network_note_type = 1 AND target_account_id = ?1 AND /// committed_at <= ?2 AND /// (consumed_at IS NULL OR consumed_at > ?2) AND notes.rowid >= ?3 /// ORDER BY notes.rowid ASC @@ -449,9 +449,9 @@ pub(crate) fn select_note_script_by_root( clippy::too_many_lines, reason = "Lines will be reduced when schema is updated to simplify logic" )] -pub(crate) fn select_unconsumed_network_notes_by_tag( +pub(crate) fn select_unconsumed_network_notes_by_account_id( conn: &mut SqliteConnection, - tag: u32, + account_id: AccountId, block_num: BlockNumber, mut page: Page, ) -> Result<(Vec, Page), DatabaseError> { @@ -494,7 +494,7 @@ pub(crate) fn select_unconsumed_network_notes_by_tag( ), ) .filter(schema::notes::network_note_type.eq(i32::from(NetworkNoteType::SingleTarget))) - .filter(schema::notes::tag.eq(tag as i32)) + .filter(schema::notes::target_account_id.eq(Some(account_id.to_bytes()))) .filter(schema::notes::committed_at.le(block_num.to_raw_sql())) .filter( schema::notes::consumed_at @@ -861,22 +861,24 @@ pub struct NoteInsertRow { pub sender: Vec, // AccountId pub tag: i32, + pub network_note_type: i32, + pub target_account_id: Option>, pub attachment: Vec, + pub inclusion_path: Vec, pub consumed_at: Option, + pub nullifier: Option>, pub assets: Option>, pub inputs: Option>, - pub serial_num: Option>, - pub nullifier: Option>, pub script_root: Option>, - pub network_note_type: i32, - pub inclusion_path: Vec, + pub serial_num: Option>, } impl From<(NoteRecord, Option)> for NoteInsertRow { fn from((note, nullifier): (NoteRecord, Option)) -> Self { let attachment = note.metadata.attachment(); - let network_note_type = if NetworkAccountTarget::try_from(attachment).is_ok() { + let target_account_id = NetworkAccountTarget::try_from(attachment).ok(); + let network_note_type = if target_account_id.is_some() { NetworkNoteType::SingleTarget } else { NetworkNoteType::None @@ -894,6 +896,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRow { sender: note.metadata.sender().to_bytes(), tag: note.metadata.tag().to_raw_sql(), network_note_type: network_note_type.into(), + target_account_id: target_account_id.map(|t| t.target_id().to_bytes()), attachment: attachment_bytes, inclusion_path: note.inclusion_path.to_bytes(), consumed_at: None::, // New notes are always unconsumed. diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 8b6b7e832a..0132848929 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -68,6 +68,7 @@ diesel::table! { sender -> Binary, tag -> Integer, network_note_type -> Integer, + target_account_id -> Nullable, attachment -> Binary, inclusion_path -> Binary, consumed_at -> Nullable, diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 5d3785b456..6bd26dda10 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -393,9 +393,9 @@ fn sql_unconsumed_network_notes() { // Both notes are unconsumed, query should return both notes on both blocks. (0..2).for_each(|i: u32| { - let (result, _) = queries::select_unconsumed_network_notes_by_tag( + let (result, _) = queries::select_unconsumed_network_notes_by_account_id( &mut conn, - NoteTag::with_account_target(account_note.0).into(), + account_note.0, i.into(), Page { token: None, @@ -410,9 +410,9 @@ fn sql_unconsumed_network_notes() { queries::insert_nullifiers_for_block(&mut conn, &[notes[1].1.unwrap()], 1.into()).unwrap(); // Query against first block should return both notes. - let (result, _) = queries::select_unconsumed_network_notes_by_tag( + let (result, _) = queries::select_unconsumed_network_notes_by_account_id( &mut conn, - NoteTag::with_account_target(account_note.0).into(), + account_note.0, 0.into(), Page { token: None, @@ -423,9 +423,9 @@ fn sql_unconsumed_network_notes() { assert_eq!(result.len(), 2); // Query against second block should return only first note. - let (result, _) = queries::select_unconsumed_network_notes_by_tag( + let (result, _) = queries::select_unconsumed_network_notes_by_account_id( &mut conn, - NoteTag::with_account_target(account_note.0).into(), + account_note.0, 1.into(), Page { token: None, diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 800eeedf6c..f407ff8617 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -97,10 +97,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { ) -> Result, Status> { let request = request.into_inner(); let block_num = BlockNumber::from(request.block_num); - let network_account_prefix = - validate_network_account_prefix(request.network_account_id_prefix).map_err(|err| { - invalid_argument(err.as_report_context("invalid network_account_id_prefix")) - })?; + let account_id = read_account_id::(request.account_id)?; let state = self.state.clone(); @@ -112,7 +109,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created // instead let (notes, next_page) = state - .get_unconsumed_network_notes_for_account(network_account_prefix, block_num, page) + .get_unconsumed_network_notes_for_account(account_id, block_num, page) .await .map_err(internal_error)?; diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 85332ec2ba..a3f4dbd778 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -1228,13 +1228,11 @@ impl State { /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( &self, - network_account_prefix: u32, + account_id: AccountId, block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page), DatabaseError> { - self.db - .select_unconsumed_network_notes(network_account_prefix, block_num, page) - .await + self.db.select_unconsumed_network_notes(account_id, block_num, page).await } /// Returns the script for a note by its root. diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 4ff8ac05dc..9b5351367f 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -308,8 +308,8 @@ message UnconsumedNetworkNotesRequest { // Number of notes to retrieve per page. uint64 page_size = 2; - // The network account ID prefix to filter notes by. - uint32 network_account_id_prefix = 3; + // The full account ID to filter notes by. + account.AccountId account_id = 3; // The block number to filter the returned notes by. // From af42eac7df7a731c674361ca8734c566a014bd4f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 22 Jan 2026 17:16:43 +0100 Subject: [PATCH 120/125] chore/*trees: remove full paths (#1574) --- crates/store/src/state/loader.rs | 27 +++++++++++++++------------ crates/store/src/state/mod.rs | 14 +++++--------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 4aa8e2590b..504ea06313 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -8,10 +8,11 @@ //! - **Persistent mode** (`rocksdb` feature enabled): Trees are loaded from persistent storage if //! data exists, otherwise rebuilt from the database and persisted. +use std::future::Future; use std::path::Path; use miden_protocol::Word; -use miden_protocol::block::account_tree::account_id_to_smt_key; +use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] @@ -84,15 +85,13 @@ pub trait StorageLoader: SmtStorage + Sized { fn load_account_tree( self, db: &mut Db, - ) -> impl std::future::Future, StateInitializationError>> + Send; + ) -> impl Future>, StateInitializationError>> + Send; /// Loads a nullifier tree, either from persistent storage or by rebuilding from DB. fn load_nullifier_tree( self, db: &mut Db, - ) -> impl std::future::Future< - Output = Result>, StateInitializationError>, - > + Send; + ) -> impl Future>, StateInitializationError>> + Send; } // MEMORY STORAGE IMPLEMENTATION @@ -107,13 +106,14 @@ impl StorageLoader for MemoryStorage { async fn load_account_tree( self, db: &mut Db, - ) -> Result, StateInitializationError> { + ) -> Result>, StateInitializationError> { let account_data = db.select_all_account_commitments().await?; let smt_entries = account_data .into_iter() .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - LargeSmt::with_entries(self, smt_entries) - .map_err(account_tree_large_smt_error_to_init_error) + let smt = LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } async fn load_nullifier_tree( @@ -144,13 +144,15 @@ impl StorageLoader for RocksDbStorage { async fn load_account_tree( self, db: &mut Db, - ) -> Result, StateInitializationError> { + ) -> Result>, StateInitializationError> { // If RocksDB storage has data, load from it directly let has_data = self .has_leaves() .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; if has_data { - return load_smt(self); + let smt = load_smt(self)?; + return AccountTree::new(smt) + .map_err(StateInitializationError::FailedToCreateAccountsTree); } info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); @@ -158,8 +160,9 @@ impl StorageLoader for RocksDbStorage { let smt_entries = account_data .into_iter() .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - LargeSmt::with_entries(self, smt_entries) - .map_err(account_tree_large_smt_error_to_init_error) + let smt = LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } async fn load_nullifier_tree( diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index a3f4dbd778..9f318d0711 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -27,11 +27,11 @@ use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; -use miden_protocol::block::account_tree::{AccountTree, AccountWitness}; -use miden_protocol::block::nullifier_tree::NullifierWitness; +use miden_protocol::block::account_tree::AccountWitness; +use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; -use miden_protocol::crypto::merkle::smt::{SmtProof, SmtStorage}; +use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; use miden_protocol::transaction::{OutputNote, PartialBlockchain}; use miden_protocol::utils::Serializable; @@ -91,9 +91,7 @@ struct InnerState where S: SmtStorage, { - nullifier_tree: miden_protocol::block::nullifier_tree::NullifierTree< - miden_protocol::crypto::merkle::smt::LargeSmt, - >, + nullifier_tree: NullifierTree>, blockchain: Blockchain, account_tree: AccountTreeWithHistory, } @@ -162,9 +160,7 @@ impl State { let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); let account_storage = TreeStorage::create(data_path, ACCOUNT_TREE_STORAGE_DIR)?; - let smt = account_storage.load_account_tree(&mut db).await?; - let account_tree = - AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree)?; + let account_tree = account_storage.load_account_tree(&mut db).await?; let nullifier_storage = TreeStorage::create(data_path, NULLIFIER_TREE_STORAGE_DIR)?; let nullifier_tree = nullifier_storage.load_nullifier_tree(&mut db).await?; From 5479207308335d754108594b2b6f85fa90260827 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 22 Jan 2026 17:56:56 +0100 Subject: [PATCH 121/125] fix/get_account: remove duplicate check in store (#1575) --- crates/store/src/inner_forest/mod.rs | 10 -------- crates/store/src/inner_forest/tests.rs | 34 -------------------------- crates/store/src/state/mod.rs | 19 -------------- 3 files changed, 63 deletions(-) diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 403a3334e9..0a4bd00d62 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -216,9 +216,6 @@ impl InnerForest { /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. - /// - /// If the number of requested keys exceeds [`AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES`], - /// returns `LimitExceeded`. pub(crate) fn open_storage_map( &self, account_id: AccountId, @@ -228,13 +225,6 @@ impl InnerForest { ) -> Option> { let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; - if raw_keys.len() > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { - return Some(Ok(AccountStorageMapDetails { - slot_name, - entries: StorageMapEntries::LimitExceeded, - })); - } - // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index fd60f535c4..216ef42061 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -428,37 +428,3 @@ fn test_storage_map_incremental_updates() { assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } - -#[test] -fn test_open_storage_map_returns_limit_exceeded_for_too_many_keys() { - use std::collections::BTreeMap; - - use assert_matches::assert_matches; - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let slot_name = StorageSlotName::mock(3); - let block_num = BlockNumber::GENESIS.child(); - - // Create a storage map with entries - let num_entries = AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES + 5; - let mut map_delta = StorageMapDelta::default(); - for i in 0..num_entries as u32 { - let key = Word::from([i, 0, 0, 0]); - let value = Word::from([0, 0, 0, i]); - map_delta.insert(key, value); - } - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); - - // Request proofs for more than MAX_SMT_PROOF_ENTRIES keys. - // Should return LimitExceeded. - let keys: Vec = (0..num_entries as u32).map(|i| Word::from([i, 0, 0, 0])).collect(); - let result = forest.open_storage_map(account_id, slot_name.clone(), block_num, &keys); - - let details = result.expect("Should return Some").expect("Should not error"); - assert_matches!(details.entries, StorageMapEntries::LimitExceeded); -} diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 9f318d0711..b275f400a2 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -1104,25 +1104,6 @@ impl State { None => AccountVaultDetails::empty(), }; - // Check total keys limit upfront before expensive open operations - let total_keys: usize = storage_requests - .iter() - .filter_map(|req| match &req.slot_data { - SlotData::MapKeys(keys) => Some(keys.len()), - SlotData::All => None, - }) - .sum(); - - if total_keys > AccountStorageMapDetails::MAX_SMT_PROOF_ENTRIES { - return Ok(AccountDetails::with_storage_limits_exceeded( - account_header, - account_code, - vault_details, - storage_header, - storage_requests.into_iter().map(|req| req.slot_name), - )); - } - let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); From 29949561c7cdd8f81998fb26b40c951c6b313956 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 22 Jan 2026 20:34:05 +0200 Subject: [PATCH 122/125] refactor: yank vergen (#1576) --- CHANGELOG.md | 1 + Cargo.lock | 86 --------------- bin/node/Cargo.toml | 4 - bin/node/build.rs | 9 -- bin/node/src/main.rs | 22 +--- crates/utils/Cargo.toml | 8 -- crates/utils/src/lib.rs | 1 - crates/utils/src/version/mod.rs | 184 -------------------------------- 8 files changed, 2 insertions(+), 313 deletions(-) delete mode 100644 bin/node/build.rs delete mode 100644 crates/utils/src/version/mod.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 76e48827f7..e456b69663 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ - Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). - [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). - [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/miden-node/pull/1572)). +- Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/miden-node/pull/1576)). ### Fixes diff --git a/Cargo.lock b/Cargo.lock index 132b415fc7..c0c6828462 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,38 +485,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "camino" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e629a66d692cb9ff1a1c664e41771b3dcaf961985a9774c0eb0bd1b51cf60a48" -dependencies = [ - "serde_core", -] - -[[package]] -name = "cargo-platform" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.27", - "serde", - "serde_json", - "thiserror 2.0.17", -] - [[package]] name = "cast" version = "0.3.0" @@ -3012,7 +2980,6 @@ dependencies = [ "anyhow", "bytes", "figment", - "fs-err", "http", "http-body-util", "itertools 0.14.0", @@ -3032,8 +2999,6 @@ dependencies = [ "tracing-opentelemetry", "tracing-subscriber", "url", - "vergen", - "vergen-gitcl", ] [[package]] @@ -3612,15 +3577,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - [[package]] name = "object" version = "0.37.3" @@ -5640,9 +5596,7 @@ checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", - "libc", "num-conv", - "num_threads", "powerfmt", "serde_core", "time-core", @@ -6318,46 +6272,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vergen" -version = "9.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" -dependencies = [ - "anyhow", - "cargo_metadata", - "derive_builder", - "regex", - "rustc_version 0.4.1", - "rustversion", - "vergen-lib", -] - -[[package]] -name = "vergen-gitcl" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9dfc1de6eb2e08a4ddf152f1b179529638bedc0ea95e6d667c014506377aefe" -dependencies = [ - "anyhow", - "derive_builder", - "rustversion", - "time", - "vergen", - "vergen-lib", -] - -[[package]] -name = "vergen-lib" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166" -dependencies = [ - "anyhow", - "derive_builder", - "rustversion", -] - [[package]] name = "version_check" version = "0.9.5" diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index c7a126c97b..b6ade3b4da 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -36,7 +36,3 @@ url = { workspace = true } [dev-dependencies] figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } - -[build-dependencies] -# Required to inject build metadata. -miden-node-utils = { features = ["vergen"], workspace = true } diff --git a/bin/node/build.rs b/bin/node/build.rs deleted file mode 100644 index ae41be1485..0000000000 --- a/bin/node/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -fn main() { - // Configures environment variables for build metadata intended for extended version - // information. - if let Err(e) = miden_node_utils::version::vergen() { - // Don't let an error here bring down the build. Build metadata will be empty which isn't a - // critical failure. - println!("cargo:warning=Failed to embed build metadata: {e:?}"); - } -} diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index d7a288beec..be4b0d4ae3 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -4,7 +4,6 @@ use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; -use miden_node_utils::version::LongVersion; mod commands; @@ -12,7 +11,7 @@ mod commands; // ================================================================================================ #[derive(Parser)] -#[command(version, about, long_about = None, long_version = long_version().to_string())] +#[command(version, about, long_about = None)] pub struct Cli { #[command(subcommand)] pub command: Command, @@ -84,22 +83,3 @@ async fn main() -> anyhow::Result<()> { cli.command.execute().await } - -// HELPERS & UTILITIES -// ================================================================================================ - -/// Generates [`LongVersion`] using the metadata generated by build.rs. -fn long_version() -> LongVersion { - LongVersion { - version: env!("CARGO_PKG_VERSION"), - sha: option_env!("VERGEN_GIT_SHA").unwrap_or_default(), - branch: option_env!("VERGEN_GIT_BRANCH").unwrap_or_default(), - dirty: option_env!("VERGEN_GIT_DIRTY").unwrap_or_default(), - features: option_env!("VERGEN_CARGO_FEATURES").unwrap_or_default(), - rust_version: option_env!("VERGEN_RUSTC_SEMVER").unwrap_or_default(), - host: option_env!("VERGEN_RUSTC_HOST_TRIPLE").unwrap_or_default(), - target: option_env!("VERGEN_CARGO_TARGET_TRIPLE").unwrap_or_default(), - opt_level: option_env!("VERGEN_CARGO_OPT_LEVEL").unwrap_or_default(), - debug: option_env!("VERGEN_CARGO_DEBUG").unwrap_or_default(), - } -} diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index d944c1099c..e61930937e 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -15,8 +15,6 @@ version.workspace = true workspace = true [features] -# Enables depedencies intended for build script generation of version metadata. -vergen = ["dep:vergen", "dep:vergen-gitcl"] # Enables utility functions for testing traces created by some other crate's stack. testing = ["miden-protocol/testing"] @@ -24,7 +22,6 @@ testing = ["miden-protocol/testing"] anyhow = { workspace = true } bytes = { version = "1.10" } figment = { features = ["env", "toml"], version = "0.10" } -fs-err = { workspace = true } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } @@ -45,10 +42,5 @@ tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } -# Optional dependencies enabled by `vergen` feature. -# This must match the version expected by `vergen-gitcl`. -vergen = { "version" = "9.0", optional = true } -vergen-gitcl = { features = ["cargo", "rustc"], optional = true, version = "1.0" } - [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index c894e31fb6..530e971e49 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -10,7 +10,6 @@ pub mod logging; pub mod lru_cache; pub mod panic; pub mod tracing; -pub mod version; pub trait ErrorReport: std::error::Error { /// Returns a string representation of the error and its source chain. diff --git a/crates/utils/src/version/mod.rs b/crates/utils/src/version/mod.rs deleted file mode 100644 index 7d378558c5..0000000000 --- a/crates/utils/src/version/mod.rs +++ /dev/null @@ -1,184 +0,0 @@ -#[cfg(feature = "vergen")] -pub use vergen::vergen; - -/// Contains build metadata which can be formatted into a pretty --version -/// output using its Display implementation. -/// -/// The build metadata can be embedded at compile time using the `vergen` function -/// available from the `vergen` feature. See that functions description for a list -/// of the environment variables emitted which map nicely to [`LongVersion`]. -/// -/// Unfortunately these values must be transferred manually by the end user since the -/// env variables are only available once the caller's build script has run - which is -/// after this crate is compiled. -pub struct LongVersion { - pub version: &'static str, - pub sha: &'static str, - pub branch: &'static str, - pub dirty: &'static str, - pub features: &'static str, - pub rust_version: &'static str, - pub host: &'static str, - pub target: &'static str, - pub opt_level: &'static str, - pub debug: &'static str, -} - -impl std::fmt::Display for LongVersion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let &Self { - version, - mut sha, - mut branch, - dirty, - features, - rust_version, - host, - target, - opt_level, - debug, - } = self; - - let dirty = match dirty { - "true" => "-dirty", - _ => "", - }; - - // This is the default value set by `vergen` when these values are missing. - // The git values can be missing for a published crate, and while we do attempt - // to set default values in the build.rs, its still possible for these to be skipped - // e.g. when cargo publish --allow-dirty is used. - if branch == "VERGEN_IDEMPOTENT_OUTPUT" { - branch = ""; - } - if sha == "VERGEN_IDEMPOTENT_OUTPUT" { - sha = ""; - } - - f.write_fmt(format_args!( - "{version} - -SHA: {sha}{dirty} -branch: {branch} -features: {features} -rust version: {rust_version} -target arch: {target} -host arch: {host} -opt-level: {opt_level} -debug: {debug} -" - )) - } -} - -#[cfg(feature = "vergen")] -mod vergen { - use std::path::PathBuf; - - use anyhow::{Context, Result}; - - /// Emits environment variables for build metadata intended for extended version information. - /// - /// The following environment variables are emitted: - /// - /// - `VERGEN_GIT_BRANCH` - /// - `VERGEN_GIT_SHA` - /// - `VERGEN_GIT_DIRTY` - /// - `VERGEN_RUSTC_SEMVER` - /// - `VERGEN_RUSTC_HOST_TRIPLE` - /// - `VERGEN_CARGO_TARGET_TRIPLE` - /// - `VERGEN_CARGO_FEATURES` - /// - `VERGEN_CARGO_OPT_LEVEL` - /// - `VERGEN_CARGO_DEBUG` - pub fn vergen() -> Result<()> { - if let Some(sha) = published_git_sha().context("Checking for published vcs info")? { - // git data is not available if in a published state, so we set them manually. - println!("cargo::rustc-env=VERGEN_GIT_SHA={sha}"); - println!("cargo::rustc-env=VERGEN_GIT_BRANCH=NA (published)"); - println!("cargo::rustc-env=VERGEN_GIT_DIRTY="); - - vergen_gitcl::Emitter::new() - } else { - // In a non-published state so we can expect git instructions to work. - let mut emitter = vergen_gitcl::Emitter::new(); - emitter - .add_instructions(&git_instructions()?) - .context("Adding git instructions")?; - - emitter - } - .add_instructions(&cargo_instructions()?) - .context("Adding cargo instructions")? - .add_instructions(&rustc_instructions()?) - .context("Adding rustc instructions")? - .emit() - } - - /// Normal git info is lost on `cargo publish`, which instead adds a file containing the SHA1 - /// hash. - /// - /// This function returns the short SHA value. If present, this indicates this we're in a - /// published state. - fn published_git_sha() -> Result> { - let cargo_vcs_info = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".cargo_vcs_info.json"); - if cargo_vcs_info.exists() { - // The file is small so reading to string is acceptable. - let contents = fs_err::read_to_string(cargo_vcs_info).context("Reading vcs info")?; - - // File format: - // { - // "git": { - // "sha1": "9d48046e9654d93a86212e77d6c92f14c95de44b" - // }, - // "path_in_vcs": "bin/node" - // } - let offset = contents.find(r#""sha1""#).context("Searching for sha1 property")? - + r#""sha1""#.len(); - - let sha1 = contents[offset + 1..] - .chars() - // Find and skip opening quote. - .skip_while(|&c| c != '"') - .skip(1) - // Take until closing quote. - .take_while(|&c| c != '"') - // Short SHA format is 7 digits. - .take(7) - .collect(); - - Ok(Some(sha1)) - } else { - Ok(None) - } - } - - fn git_instructions() -> Result { - const INCLUDE_UNTRACKED: bool = true; - const SHORT_SHA: bool = true; - - vergen_gitcl::GitclBuilder::default() - .branch(true) - .dirty(INCLUDE_UNTRACKED) - .sha(SHORT_SHA) - .build() - .context("Building git instructions") - } - - fn cargo_instructions() -> Result { - vergen_gitcl::CargoBuilder::default() - .debug(true) - .features(true) - .target_triple(true) - .opt_level(true) - .build() - .context("Building git instructions") - } - - fn rustc_instructions() -> Result { - vergen_gitcl::RustcBuilder::default() - .semver(true) - .host_triple(true) - .build() - .context("Building rustc instructions") - } -} From 93ca1d26e7e74b8b4fa45a5cf2d38db1372b92c0 Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Thu, 22 Jan 2026 10:41:53 -0800 Subject: [PATCH 123/125] chore: refresh Cargo.lock --- Cargo.lock | 225 +++++++++++++++++++++++++++-------------------------- 1 file changed, 115 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0c6828462..8ea0e75ef9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -362,7 +351,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -493,9 +482,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.52" +version = "1.2.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" +checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" dependencies = [ "find-msvc-tools", "jobserver", @@ -572,9 +561,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", @@ -1334,7 +1323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -1377,9 +1366,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixedbitset" @@ -1679,9 +1668,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -2112,7 +2098,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -2136,6 +2122,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -2187,9 +2182,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2504,7 +2499,7 @@ checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" dependencies = [ "miden-core", "miden-utils-indexing", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-air", "winter-prover", ] @@ -2521,7 +2516,7 @@ dependencies = [ "miden-core", "miden-mast-package", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2545,7 +2540,7 @@ dependencies = [ "rustc_version 0.4.1", "semver 1.0.27", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2555,7 +2550,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa9c89257b227d0668105b4a6e81ea33956795c89549cc1baa3f253d753e81e5" dependencies = [ "miden-protocol", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2575,7 +2570,7 @@ dependencies = [ "num-traits", "proptest", "proptest-derive", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-math", "winter-utils", ] @@ -2594,7 +2589,7 @@ dependencies = [ "miden-processor", "miden-utils-sync", "sha2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2625,7 +2620,7 @@ dependencies = [ "sha2", "sha3", "subtle", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-crypto", "winter-math", "winter-utils", @@ -2634,9 +2629,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.2" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83479e7af490784c6f2d2e02cec5210fd6e5bc6ce3d4427734e36a773bca72d2" +checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" dependencies = [ "quote", "syn 2.0.114", @@ -2657,7 +2652,7 @@ dependencies = [ "paste", "serde", "serde_spanned 1.0.4", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2678,7 +2673,7 @@ dependencies = [ "derive_more", "miden-assembly-syntax", "miden-core", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2707,7 +2702,7 @@ dependencies = [ "syn 2.0.114", "terminal_size 0.3.0", "textwrap", - "thiserror 2.0.17", + "thiserror 2.0.18", "trybuild", "unicode-width 0.1.14", ] @@ -2796,7 +2791,7 @@ dependencies = [ "rstest", "serial_test", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2830,7 +2825,7 @@ dependencies = [ "miden-standards", "miden-tx", "rstest", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -2856,7 +2851,7 @@ dependencies = [ "miette", "proptest", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tonic", "tonic-prost", "tonic-prost-build", @@ -2893,7 +2888,7 @@ dependencies = [ "rstest", "semver 1.0.27", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2933,7 +2928,7 @@ dependencies = [ "regex", "serde", "termtree", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "toml 0.9.11+spec-1.1.0", @@ -2990,7 +2985,7 @@ dependencies = [ "opentelemetry_sdk", "rand 0.9.2", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tower-http", @@ -3011,7 +3006,7 @@ dependencies = [ "miden-node-utils", "miden-protocol", "miden-tx", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -3034,7 +3029,7 @@ dependencies = [ "miden-utils-indexing", "paste", "rayon", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tracing", "winter-prover", @@ -3065,7 +3060,7 @@ dependencies = [ "regex", "semver 1.0.27", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "toml 0.9.11+spec-1.1.0", "walkdir", "winter-rand-utils", @@ -3128,7 +3123,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_qs", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -3153,7 +3148,7 @@ dependencies = [ "miden-tx", "miette", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tonic-prost", @@ -3176,7 +3171,7 @@ dependencies = [ "miden-protocol", "rand 0.9.2", "regex", - "thiserror 2.0.17", + "thiserror 2.0.18", "walkdir", ] @@ -3213,7 +3208,7 @@ dependencies = [ "miden-prover", "miden-standards", "miden-verifier", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3256,7 +3251,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" dependencies = [ - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3278,7 +3273,7 @@ checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" dependencies = [ "miden-air", "miden-core", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "winter-verifier", ] @@ -3291,7 +3286,7 @@ checksum = "9d4cfab04baffdda3fb9eafa5f873604059b89a1699aa95e4f1057397a69f0b5" dependencies = [ "miden-formatting", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3644,9 +3639,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-probe" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "openssl-sys" @@ -3670,7 +3665,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -3685,7 +3680,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", ] @@ -3715,7 +3710,7 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", ] @@ -3860,7 +3855,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3896,7 +3891,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -3986,7 +3981,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -4018,7 +4013,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4263,9 +4258,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -4310,7 +4305,7 @@ dependencies = [ "memchr", "parking_lot", "protobuf 3.7.2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4447,7 +4442,7 @@ dependencies = [ "prost-reflect", "prost-types", "protox-parse", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4459,7 +4454,7 @@ dependencies = [ "logos", "miette", "prost-types", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4746,6 +4741,16 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rstest" version = "0.26.1" @@ -4777,9 +4782,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" dependencies = [ "arrayvec", "num-traits", @@ -4787,9 +4792,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" @@ -4825,7 +4830,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4838,7 +4843,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -4862,7 +4867,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.0", + "openssl-probe 0.2.1", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4870,18 +4875,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -5094,7 +5099,7 @@ checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" dependencies = [ "percent-encoding", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5293,14 +5298,13 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e98301bf8b0540c7de45ecd760539b9c62f5772aed172f08efba597c11cd5d" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" dependencies = [ "cc", - "hashbrown 0.16.1", "js-sys", - "thiserror 2.0.17", + "rsqlite-vfs", "wasm-bindgen", ] @@ -5471,7 +5475,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5540,11 +5544,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -5560,9 +5564,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -5946,7 +5950,7 @@ dependencies = [ "httparse", "js-sys", "pin-project", - "thiserror 2.0.17", + "thiserror 2.0.18", "tonic", "tower-service", "wasm-bindgen", @@ -6047,7 +6051,7 @@ checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" dependencies = [ "chrono", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "tracing-subscriber", ] @@ -6323,18 +6327,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -6345,11 +6349,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -6358,9 +6363,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6368,9 +6373,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", @@ -6381,9 +6386,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -6403,9 +6408,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -6443,7 +6448,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6877,9 +6882,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "writeable" @@ -7017,9 +7022,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" +checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" [[package]] name = "zstd" From b7a458cc006d8d4c4cd44dab6be16a04f2cd4aab Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Fri, 23 Jan 2026 07:44:40 +1300 Subject: [PATCH 124/125] feat: add FPI support to NtxDataStore (#1521) --- CHANGELOG.md | 1 + crates/ntx-builder/src/actor/execute.rs | 155 ++++++++++++++------- crates/ntx-builder/src/store.rs | 176 +++++++++++++++++++++++- crates/proto/src/domain/account.rs | 2 +- crates/proto/src/errors/mod.rs | 4 +- crates/proto/src/generated/rpc.rs | 6 +- crates/proto/src/generated/store.rs | 82 ++++++++++- crates/store/src/server/ntx_builder.rs | 13 ++ proto/proto/internal/store.proto | 5 +- proto/proto/rpc.proto | 4 +- 10 files changed, 382 insertions(+), 66 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e456b69663..176b1653ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ - Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). - Limit number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). +- Add support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). - Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). - Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 66f22f8c06..c53dc96f69 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -1,4 +1,5 @@ -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; +use std::sync::Arc; use miden_node_proto::clients::ValidatorClient; use miden_node_proto::generated::{self as proto}; @@ -8,9 +9,11 @@ use miden_protocol::Word; use miden_protocol::account::{ Account, AccountId, + AccountStorageHeader, PartialAccount, StorageMapWitness, - StorageSlotContent, + StorageSlotName, + StorageSlotType, }; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -45,6 +48,7 @@ use miden_tx::{ TransactionMastStore, TransactionProverError, }; +use tokio::sync::Mutex; use tokio::task::JoinError; use tracing::{Instrument, instrument}; @@ -222,7 +226,7 @@ impl NtxContext { match Box::pin(checker.check_notes_consumability( data_store.account.id(), - data_store.reference_header.block_num(), + data_store.reference_block.block_num(), notes, TransactionArgs::default(), )) @@ -256,7 +260,7 @@ impl NtxContext { Box::pin(executor.execute_transaction( data_store.account.id(), - data_store.reference_header.block_num(), + data_store.reference_block.block_num(), notes, TransactionArgs::default(), )) @@ -322,20 +326,42 @@ impl NtxContext { /// This is sufficient for executing a network transaction. struct NtxDataStore { account: Account, - reference_header: BlockHeader, + reference_block: BlockHeader, chain_mmr: PartialBlockchain, mast_store: TransactionMastStore, /// Store client for retrieving note scripts. store: StoreClient, /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + /// Mapping of storage map roots to storage slot names observed during various calls. + /// + /// The registered slot names are subsequently used to retrieve storage map witnesses from the + /// store. We need this because the store interface (and the underling SMT forest) use storage + /// slot names, but the `DataStore` interface works with tree roots. To get around this problem + /// we populate this map when: + /// - The the native account is loaded (in `get_transaction_inputs()`). + /// - When a foreign account is loaded (in `get_foreign_account_inputs`). + /// + /// The assumption here are: + /// - Once an account is loaded, the mapping between `(account_id, map_root)` and slot names do + /// not change. This is always the case. + /// - New storage slots created during transaction execution will not be accesses in the same + /// transaction. The mechanism for adding new storage slots is not implemented yet, but the + /// plan for it is consistent with this assumption. + /// + /// One nuance worth mentioning: it is possible that there could be a root collision where an + /// account has two storage maps with the same root. In this case, the map will contain only a + /// single entry with the storage slot name that was added last. Thus, technically, requests + /// to the store could be "wrong", but given that two identical maps have identical witnesses + /// this does not cause issues in practice. + storage_slots: Arc>>, } impl NtxDataStore { /// Creates a new `NtxDataStore` with default cache size. fn new( account: Account, - reference_header: BlockHeader, + reference_block: BlockHeader, chain_mmr: PartialBlockchain, store: StoreClient, script_cache: LruCache, @@ -345,11 +371,28 @@ impl NtxDataStore { Self { account, - reference_header, + reference_block, chain_mmr, mast_store, store, script_cache, + storage_slots: Arc::new(Mutex::new(BTreeMap::default())), + } + } + + /// Registers storage map slot names for the given account ID and storage header. + /// + /// These slot names are subsequently used to query for storage map witnesses against the store. + async fn register_storage_map_slots( + &self, + account_id: AccountId, + storage_header: &AccountStorageHeader, + ) { + let mut storage_slots = self.storage_slots.lock().await; + for slot_header in storage_header.slots() { + if let StorageSlotType::Map = slot_header.slot_type() { + storage_slots.insert((account_id, slot_header.value()), slot_header.name().clone()); + } } } } @@ -366,53 +409,63 @@ impl DataStore for NtxDataStore { return Err(DataStoreError::AccountNotFound(account_id)); } + // The latest supplied reference block must match the current reference block. match ref_blocks.last().copied() { - Some(reference) if reference == self.reference_header.block_num() => {}, - + Some(reference) if reference == self.reference_block.block_num() => {}, Some(other) => return Err(DataStoreError::BlockNotFound(other)), None => return Err(DataStoreError::other("no reference block requested")), } - let partial_account = PartialAccount::from(&self.account); + // Register slot names from the native account for later use. + self.register_storage_map_slots(account_id, &self.account.storage().to_header()) + .await; - Ok((partial_account, self.reference_header.clone(), self.chain_mmr.clone())) + let partial_account = PartialAccount::from(&self.account); + Ok((partial_account, self.reference_block.clone(), self.chain_mmr.clone())) } } fn get_foreign_account_inputs( &self, foreign_account_id: AccountId, - _ref_block: BlockNumber, + ref_block: BlockNumber, ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } + async move { + debug_assert_eq!(ref_block, self.reference_block.block_num()); + + // Get foreign account inputs from store. + let account_inputs = + self.store.get_account_inputs(foreign_account_id, ref_block).await.map_err( + |err| DataStoreError::other_with_source("failed to get account inputs", err), + )?; + + // Register slot names from the foreign account for later use. + self.register_storage_map_slots(foreign_account_id, account_inputs.storage().header()) + .await; + + Ok(account_inputs) + } } fn get_vault_asset_witnesses( &self, account_id: AccountId, - vault_root: Word, + _vault_root: Word, vault_keys: BTreeSet, ) -> impl FutureMaybeSend, DataStoreError>> { async move { - if self.account.id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } + let ref_block = self.reference_block.block_num(); - if self.account.vault().root() != vault_root { - return Err(DataStoreError::Other { - error_msg: "vault root mismatch".into(), - source: None, - }); - } + // Get vault asset witnesses from the store. + let witnesses = self + .store + .get_vault_asset_witnesses(account_id, vault_keys, Some(ref_block)) + .await + .map_err(|err| { + DataStoreError::other_with_source("failed to get vault asset witnesses", err) + })?; - Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { - AssetWitness::new(self.account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) - })) + Ok(witnesses) } } @@ -423,27 +476,27 @@ impl DataStore for NtxDataStore { map_key: Word, ) -> impl FutureMaybeSend> { async move { - if self.account.id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - let mut map_witness = None; - for slot in self.account.storage().slots() { - if let StorageSlotContent::Map(map) = slot.content() { - if map.root() == map_root { - map_witness = Some(map.open(&map_key)); - } - } - } + // The slot name that corresponds to the given account ID and map root must have been + // registered during previous calls of this data store. + let storage_slots = self.storage_slots.lock().await; + let Some(slot_name) = storage_slots.get(&(account_id, map_root)) else { + return Err(DataStoreError::other( + "requested storage slot has not been registered", + )); + }; + + let ref_block = self.reference_block.block_num(); + + // Get storage map witness from the store. + let witness = self + .store + .get_storage_map_witness(account_id, slot_name.clone(), map_key, Some(ref_block)) + .await + .map_err(|err| { + DataStoreError::other_with_source("failed to get storage map witness", err) + })?; - if let Some(map_witness) = map_witness { - Ok(map_witness) - } else { - Err(DataStoreError::Other { - error_msg: "account storage does not contain the expected root".into(), - source: None, - }) - } + Ok(witness) } } diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 02e12d8964..ac94f20b72 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -1,8 +1,9 @@ +use std::collections::BTreeSet; use std::ops::RangeInclusive; use std::time::Duration; use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; -use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::account::{AccountDetails, AccountResponse, NetworkAccountId}; use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::BlockRange; @@ -10,11 +11,22 @@ use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; -use miden_protocol::account::{Account, AccountId}; +use miden_protocol::account::{ + Account, + AccountCode, + AccountId, + PartialAccount, + PartialStorage, + StorageMapWitness, + StorageSlotName, +}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness, PartialVault}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks, PartialMmr}; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::note::NoteScript; -use miden_tx::utils::Deserializable; +use miden_protocol::transaction::AccountInputs; +use miden_tx::utils::{Deserializable, Serializable}; use thiserror::Error; use tracing::{info, instrument}; use url::Url; @@ -137,6 +149,46 @@ impl StoreClient { Ok(account) } + /// Get the inputs for an account at a given block number from the store. + /// + /// Retrieves account details from the store. The retrieved details are limited to the account + /// code, account header, and storage header. The vault and storage slots are not required for + /// the purposes of the NTX Builder. + #[instrument(target = COMPONENT, name = "store.client.get_account_inputs", skip_all, err)] + pub async fn get_account_inputs( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result { + // Construct proto request. + let proto_request = proto::rpc::AccountRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + block_num: Some(block_num.into()), + // Request account code, account header, and storage header in order to build minimal + // partial account. + details: Some(proto::rpc::account_request::AccountDetailRequest { + code_commitment: Some(Word::default().into()), + asset_vault_commitment: None, + storage_maps: vec![], + }), + }; + + // Make the gRPC call. + let proto_response = self.inner.clone().get_account(proto_request).await?.into_inner(); + + // Convert proto response to domain type. + let account_response = + AccountResponse::try_from(proto_response).map_err(StoreError::DeserializationError)?; + + // Build partial account. + let account_details = account_response + .details + .ok_or(StoreError::MissingDetails("account details".into()))?; + let partial_account = build_minimal_foreign_account(&account_details)?; + + Ok(AccountInputs::new(partial_account, account_response.witness)) + } + /// Returns the list of unconsumed network notes for a specific network account up to a /// specified block. #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] @@ -332,9 +384,88 @@ impl StoreClient { Ok(None) } } + + #[instrument(target = COMPONENT, name = "store.client.get_vault_asset_witnesses", skip_all, err)] + pub async fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + vault_keys: BTreeSet, + block_num: Option, + ) -> Result, StoreError> { + // Construct proto request. + let request = proto::store::VaultAssetWitnessesRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + vault_keys: vault_keys + .into_iter() + .map(|key| { + let word: Word = key.into(); + word.into() + }) + .collect(), + block_num: block_num.map(|num| num.as_u32()), + }; + + // Make the gRPC request. + let witness_proto = + self.inner.clone().get_vault_asset_witnesses(request).await?.into_inner(); + + // Convert the response to domain type. + let mut asset_witnesses = Vec::new(); + for asset_witness in witness_proto.asset_witnesses { + let smt_opening = asset_witness.proof.ok_or_else(|| { + StoreError::MalformedResponse("missing proof in vault asset witness".to_string()) + })?; + let proof: SmtProof = + smt_opening.try_into().map_err(StoreError::DeserializationError)?; + let witness = AssetWitness::new(proof) + .map_err(|err| StoreError::DeserializationError(ConversionError::from(err)))?; + + asset_witnesses.push(witness); + } + + Ok(asset_witnesses) + } + + #[instrument(target = COMPONENT, name = "store.client.get_storage_map_witness", skip_all, err)] + pub async fn get_storage_map_witness( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + map_key: Word, + block_num: Option, + ) -> Result { + // Construct proto request. + let request = proto::store::StorageMapWitnessRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + map_key: Some(map_key.into()), + slot_name: slot_name.to_string(), + block_num: block_num.map(|num| num.as_u32()), + }; + + // Make the request to the store. + let witness_proto = self.inner.clone().get_storage_map_witness(request).await?.into_inner(); + + // Convert the response to domain type. + let witness_proto = witness_proto.witness.ok_or_else(|| { + StoreError::MalformedResponse("missing storage map witness in response".to_string()) + })?; + + let smt_opening = witness_proto.proof.ok_or_else(|| { + StoreError::MalformedResponse("missing proof in storage map witness".to_string()) + })?; + + let proof: SmtProof = smt_opening.try_into().map_err(StoreError::DeserializationError)?; + + // Create the storage map witness using the proof and raw map key. + let witness = StorageMapWitness::new(proof, [map_key]).map_err(|_err| { + StoreError::MalformedResponse("failed to create storage map witness".to_string()) + })?; + + Ok(witness) + } } -// Store errors +// STORE ERROR // ================================================================================================= #[derive(Debug, Error)] @@ -345,4 +476,41 @@ pub enum StoreError { MalformedResponse(String), #[error("failed to parse response")] DeserializationError(#[from] ConversionError), + #[error("missing details: {0}")] + MissingDetails(String), +} + +// HELPERS +// ================================================================================================= + +/// Builds a minimal partial account from the provided account details. +/// +/// The partial account is built without storage maps or an asset vault. This is intended to be used +/// to retrieve foreign account data during transaction execution. +pub fn build_minimal_foreign_account( + account_details: &AccountDetails, +) -> Result { + // Derive account code. + let account_code_bytes = account_details + .account_code + .as_ref() + .ok_or(ConversionError::AccountCodeMissing)?; + let account_code = AccountCode::from_bytes(account_code_bytes)?; + + // Derive partial storage. Storage maps are not required for foreign accounts. + let partial_storage = PartialStorage::new(account_details.storage_details.header.clone(), [])?; + + // Derive partial vault from vault root only. + let partial_vault = PartialVault::new(account_details.account_header.vault_root()); + + // Construct partial account. + let partial_account = PartialAccount::new( + account_details.account_header.id(), + account_details.account_header.nonce(), + account_code, + partial_storage, + partial_vault, + None, + )?; + Ok(partial_account) } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 558c0fa1d3..cf24e253f8 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -134,7 +134,7 @@ impl TryFrom for AccountStorageHeader { } } -// ACCOUNT PROOF REQUEST +// ACCOUNT REQUEST // ================================================================================================ /// Represents a request for an account proof. diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index d654141884..d2fc936167 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -17,6 +17,8 @@ mod test_macro; pub enum ConversionError { #[error("asset error")] AssetError(#[from] AssetError), + #[error("account code missing")] + AccountCodeMissing, #[error("account error")] AccountError(#[from] AccountError), #[error("fee parameters error")] @@ -48,8 +50,6 @@ pub enum ConversionError { entity: &'static str, field_name: &'static str, }, - #[error("MMR error")] - MmrError(#[from] miden_protocol::crypto::merkle::mmr::MmrError), #[error("failed to deserialize {entity}")] DeserializationError { entity: &'static str, diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index caa8258132..3e3ef1d0d6 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -93,7 +93,7 @@ pub struct MaybeNoteScript { #[prost(message, optional, tag = "1")] pub script: ::core::option::Option, } -/// Returns the latest state proof of the specified account. +/// Defines the request for account details. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountRequest { /// ID of the account for which we want to get data @@ -747,7 +747,7 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "CheckNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns the latest state proof of the specified account. + /// Returns the latest details of the specified account. pub async fn get_account( &mut self, request: impl tonic::IntoRequest, @@ -1147,7 +1147,7 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns the latest state proof of the specified account. + /// Returns the latest details of the specified account. async fn get_account( &self, request: tonic::Request, diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index aad46a4224..4892b7b9c9 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -459,7 +459,7 @@ pub mod rpc_client { req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns the latest state proof of the specified account. + /// Returns the latest details the specified account. pub async fn get_account( &mut self, request: impl tonic::IntoRequest, @@ -788,7 +788,7 @@ pub mod rpc_server { tonic::Response, tonic::Status, >; - /// Returns the latest state proof of the specified account. + /// Returns the latest details the specified account. async fn get_account( &self, request: tonic::Request, @@ -2461,6 +2461,31 @@ pub mod ntx_builder_client { .insert(GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountIds")); self.inner.unary(req, path, codec).await } + /// Returns the latest details of the specified account. + pub async fn get_account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetAccount", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetAccount")); + self.inner.unary(req, path, codec).await + } /// Returns the script for a note by its root. pub async fn get_note_script_by_root( &mut self, @@ -2594,6 +2619,14 @@ pub mod ntx_builder_server { tonic::Response, tonic::Status, >; + /// Returns the latest details of the specified account. + async fn get_account( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Returns the script for a note by its root. async fn get_note_script_by_root( &self, @@ -2946,6 +2979,51 @@ pub mod ntx_builder_server { }; Box::pin(fut) } + "/store.NtxBuilder/GetAccount" => { + #[allow(non_camel_case_types)] + struct GetAccountSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetAccountSvc { + type Response = super::super::rpc::AccountResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/store.NtxBuilder/GetNoteScriptByRoot" => { #[allow(non_camel_case_types)] struct GetNoteScriptByRootSvc(pub Arc); diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index f407ff8617..5f0fd764de 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -167,6 +167,19 @@ impl ntx_builder_server::NtxBuilder for StoreApi { })) } + async fn get_account( + &self, + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, ?request); + let request = request.into_inner(); + let account_request = request.try_into()?; + + let proof = self.state.get_account(account_request).await?; + + Ok(Response::new(proof.into())) + } + async fn get_note_script_by_root( &self, request: Request, diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 9b5351367f..c68e7b30a5 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -31,7 +31,7 @@ service Rpc { // Verify proofs against the nullifier tree root in the latest block header. rpc CheckNullifiers(rpc.NullifierList) returns (rpc.CheckNullifiersResponse) {} - // Returns the latest state proof of the specified account. + // Returns the latest details the specified account. rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} // Returns raw block data for the specified block number. @@ -267,6 +267,9 @@ service NtxBuilder { // Returns a list of all network account ids. rpc GetNetworkAccountIds(rpc.BlockRange) returns (NetworkAccountIdList) {} + // Returns the latest details of the specified account. + rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} + // Returns the script for a note by its root. rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index d32459cf1d..b0f1046f59 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -30,7 +30,7 @@ service Api { // Verify proofs against the nullifier tree root in the latest block header. rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - // Returns the latest state proof of the specified account. + // Returns the latest details of the specified account. rpc GetAccount(AccountRequest) returns (AccountResponse) {} // Returns raw block data for the specified block number. @@ -219,7 +219,7 @@ message MaybeNoteScript { // GET ACCOUNT PROOF // ================================================================================================ -// Returns the latest state proof of the specified account. +// Defines the request for account details. message AccountRequest { // Request the details for a public account. message AccountDetailRequest { From 29e77b4f1b0e754995cb1f6ebc0472674e3bfa77 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Thu, 22 Jan 2026 16:03:13 -0300 Subject: [PATCH 125/125] fix: add missing is_latest filter to network account query (#1578) --- CHANGELOG.md | 1 + crates/store/src/db/models/queries/accounts.rs | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 176b1653ec..06b5def80a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ - Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). - Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/miden-node/pull/1501)). - gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/miden-node/pull/1553)). +- Fixed ntx-builder crash on node restart after network transaction by adding missing `is_latest` filter to network account query ([#1578](https://github.com/0xMiden/miden-node/pull/1578)). ## v0.12.8 (2026-01-15) diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 5c049916e5..6568d5723e 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -536,7 +536,8 @@ pub(crate) fn select_all_network_account_ids( let account_ids_raw: Vec<(Vec, i64)> = Box::new( QueryDsl::select( schema::accounts::table - .filter(schema::accounts::network_account_id_prefix.is_not_null()), + .filter(schema::accounts::network_account_id_prefix.is_not_null()) + .filter(schema::accounts::is_latest.eq(true)), (schema::accounts::account_id, schema::accounts::created_at_block), ) .filter(