diff --git a/.github/actions/cleanup-runner/action.yml b/.github/actions/cleanup-runner/action.yml new file mode 100644 index 0000000000..22edac443f --- /dev/null +++ b/.github/actions/cleanup-runner/action.yml @@ -0,0 +1,12 @@ +name: 'Cleanup Runner' +description: 'Remove unused tools in the runner image to free disk space' + +runs: + using: 'composite' + steps: + - name: Remove unused tools in the runner image + shell: bash + run: | + sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL + sudo docker image prune --all --force || true + sudo docker builder prune -a --force || true diff --git a/.github/actions/install-rocksdb/action.yml b/.github/actions/install-rocksdb/action.yml new file mode 100644 index 0000000000..c42cb98250 --- /dev/null +++ b/.github/actions/install-rocksdb/action.yml @@ -0,0 +1,14 @@ +name: "Install RocksDB dependencies" +description: "Install dependencies for RocksDB compilation" + +runs: + using: "composite" + steps: + - name: Install LLVM/Clang for RocksDB + shell: bash + run: | + set -eux + sudo apt-get update + # Install clang/llvm for bindgen (needed for FFI bindings). + # RocksDB is compiled from source by librocksdb-sys. + sudo apt-get install -y clang llvm-dev libclang-dev diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 2806838def..10e48c5f1d 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -34,7 +34,7 @@ jobs: # The documentation is uploaded as a github artifact IFF it is required for deployment i.e. on push into next. build: name: Build documentation - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main @@ -66,7 +66,7 @@ jobs: environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: build if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} steps: diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml index 72d4c28d32..56cc7795f4 100644 --- a/.github/workflows/build-docs.yml +++ b/.github/workflows/build-docs.yml @@ -24,7 +24,7 @@ permissions: jobs: build-docs: name: Build Documentation - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout repository diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index abe1488b17..be2667efd0 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -12,7 +12,7 @@ permissions: jobs: changelog: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@main diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 12e76f99cc..37bb1d1f21 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,20 +16,24 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: typos: - runs-on: Linux-ARM64-Runner + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - uses: actions/checkout@v4 - uses: taiki-e/install-action@v2 with: - tool: typos + tool: typos@1.42.0 - run: make typos-check rustfmt: name: rustfmt - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: Rustup @@ -44,9 +48,13 @@ jobs: clippy: name: clippy - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: | rustup update --no-self-update @@ -58,42 +66,46 @@ jobs: run: make clippy toml: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - uses: actions/checkout@v4 - uses: taiki-e/install-action@v2 with: - tool: taplo-cli + tool: taplo-cli@0.10.0 - run: make toml-check workspace-lints: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - uses: actions/checkout@v4 - uses: taiki-e/install-action@v2 with: - tool: cargo-workspace-lints + tool: cargo-workspace-lints@0.1.4 - run: | make workspace-check doc: name: doc - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - name: Build docs - run: make doc + run: cargo doc --no-deps --workspace --all-features --locked unused_deps: name: check for unused dependencies - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: machete @@ -101,7 +113,7 @@ jobs: proto: name: proto check - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@main - name: Rustup diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml deleted file mode 100644 index 354e2afa92..0000000000 --- a/.github/workflows/msrv.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: Check MSRV - -on: - push: - branches: [next] - pull_request: - types: [opened, reopened, synchronize] - -# Limits workflow concurrency to only the latest commit in the PR. -concurrency: - group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" - cancel-in-progress: true - -permissions: - contents: read - -jobs: - # Check MSRV (aka `rust-version`) in `Cargo.toml` is valid for workspace members - msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y jq - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - name: Install cargo-msrv - run: cargo install cargo-msrv - - name: Check MSRV for each workspace member - run: | - ./scripts/check-msrv.sh diff --git a/.github/workflows/network-monitor.yml b/.github/workflows/network-monitor.yml index 507980803d..ca89a4df9d 100644 --- a/.github/workflows/network-monitor.yml +++ b/.github/workflows/network-monitor.yml @@ -16,10 +16,14 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: check: name: check - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 15 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index 1539d7b1f7..a6d63d5035 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -25,12 +25,14 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main with: fetch-depth: 0 + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Build and Publish Node uses: ./.github/actions/debian with: @@ -48,7 +50,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main @@ -71,7 +73,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main @@ -94,7 +96,7 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index 1079bfddbf..81e8d74475 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -53,13 +53,16 @@ jobs: matrix: arch: [amd64, arm64] runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} + labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo uses: actions/checkout@main with: fetch-depth: 0 + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - name: Build and Publish Packages uses: ./.github/actions/debian with: diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index f4bfbe0088..990ef1d94c 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -24,7 +24,7 @@ permissions: jobs: publish: runs-on: - labels: "ubuntu-latest" + labels: "ubuntu-24.04" strategy: matrix: component: [node] diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml index 9679c6d093..fe6b15e879 100644 --- a/.github/workflows/publish-dry-run.yml +++ b/.github/workflows/publish-dry-run.yml @@ -5,7 +5,11 @@ permissions: on: push: - branches: [main] + branches: [main, next] + +concurrency: + group: "${{ github.workflow }} @ ${{ github.ref }}" + cancel-in-progress: true jobs: publish-dry-run: @@ -17,9 +21,24 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + - uses: taiki-e/install-action@v2 + with: + tool: cargo-binstall@1.16.6 + - name: Install cargo-msrv + run: cargo binstall --no-confirm --force cargo-msrv + - name: Check MSRV for each workspace member run: | - rustup update --no-self-update + export PATH="$HOME/.cargo/bin:$PATH" + ./scripts/check-msrv.sh - name: Run cargo publish dry-run run: cargo publish --workspace --dry-run env: diff --git a/.github/workflows/publish-main.yml b/.github/workflows/publish-main.yml index dcc12a71bd..fcaab36a86 100644 --- a/.github/workflows/publish-main.yml +++ b/.github/workflows/publish-main.yml @@ -18,6 +18,8 @@ jobs: with: fetch-depth: 0 ref: main + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb # Ensure the release tag refers to the latest commit on main. # Compare the commit SHA that triggered the workflow with the HEAD of the branch we just # checked out (main). @@ -34,9 +36,22 @@ jobs: exit 1 fi echo "Release tag matches main HEAD — continuing." + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain + run: rustup update --no-self-update + - uses: Swatinem/rust-cache@v2 + - uses: taiki-e/install-action@v2 + with: + tool: cargo-binstall@1.16.6 + - name: Install cargo-msrv + run: cargo binstall --no-confirm --force cargo-msrv + - name: Check MSRV for each workspace member run: | - rustup update --no-self-update + export PATH="$HOME/.cargo/bin:$PATH" + ./scripts/check-msrv.sh - name: Run cargo publish run: cargo publish --workspace env: diff --git a/.github/workflows/stress-test-check.yml b/.github/workflows/stress-test-check.yml index 488a2c0681..383440b9ee 100644 --- a/.github/workflows/stress-test-check.yml +++ b/.github/workflows/stress-test-check.yml @@ -16,19 +16,29 @@ concurrency: group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" cancel-in-progress: true +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: stress-test-check: name: stress-test-check - runs-on: ubuntu-latest - timeout-minutes: 10 + runs-on: Linux-ARM64-Runner + timeout-minutes: 20 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@nextest + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 - name: Install stress test run: make install-stress-test - name: Create directory for stress test store diff --git a/.github/workflows/test-beta.yml b/.github/workflows/test-beta.yml index 042d50be28..07b9705fdf 100644 --- a/.github/workflows/test-beta.yml +++ b/.github/workflows/test-beta.yml @@ -10,14 +10,20 @@ permissions: jobs: test: name: test - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 30 steps: - uses: actions/checkout@v4 with: ref: 'next' + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup install beta && rustup default beta - - uses: taiki-e/install-action@nextest + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 - name: Run tests run: make test diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 662fd3d440..7760225a67 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,6 +16,11 @@ concurrency: group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" cancel-in-progress: true +env: + # Reduce cache usage by removing debug information. + # This works for tests as well because TEST inherits from DEV. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: test: name: test @@ -23,11 +28,20 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@main + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb - name: Rustup run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - - uses: taiki-e/install-action@nextest + - uses: taiki-e/install-action@v2 + with: + tool: nextest@0.9.122 - name: Run tests run: make test + - name: Doc tests + run: cargo test --doc --workspace --all-features + diff --git a/.github/workflows/trigger-deploy-docs.yml b/.github/workflows/trigger-deploy-docs.yml index 6de20e9784..ca54a442d2 100644 --- a/.github/workflows/trigger-deploy-docs.yml +++ b/.github/workflows/trigger-deploy-docs.yml @@ -8,7 +8,7 @@ on: jobs: notify: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 permissions: contents: read diff --git a/.release-plz.toml b/.release-plz.toml deleted file mode 100644 index c3dfed33db..0000000000 --- a/.release-plz.toml +++ /dev/null @@ -1,6 +0,0 @@ -[workspace] -changelog_update = false # For now we have our own changelog. -release_always = true # Without the tracking PR, it would never trigger unless `true`. - -git_release_enable = false -git_tag_enable = false diff --git a/CHANGELOG.md b/CHANGELOG.md index 8979dd1e08..06b5def80a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,77 @@ # Changelog +## v0.13.0 (TBD) + +### Enhancements + +- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). +- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). +- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). +- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). +- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). +- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). +- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). +- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). +- Integrated NTX Builder with validator via `SubmitProvenTransaction` RPC ([#1453](https://github.com/0xMiden/miden-node/pull/1453)). +- Added pagination to `GetNetworkAccountIds` endpoint ([#1452](https://github.com/0xMiden/miden-node/pull/1452)). +- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). +- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). +- Add partial storage map queries to RPC ([#1428](https://github.com/0xMiden/miden-node/pull/1428)). +- Added validated transactions check to block validation logc in Validator ([#1460](https://github.com/0xMiden/miden-node/pull/1460)). +- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/miden-node/pull/1450)). +- Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). +- Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). +- Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). +- Decoupled ntx-builder from block-producer startup by loading network accounts asynchronously via a background task ([#????](https://github.com/0xMiden/miden-node/pull/????)). +- Add foreign account support to validator ([#1493](https://github.com/0xMiden/miden-node/pull/1493)). +- Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). +- Limit number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). +- The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). +- Add support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). +- Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). + +### Changes + +- [BREAKING] Removed `GetAccountDetails` RPC endpoint. Use `GetAccount` instead ([#1185](https://github.com/0xMiden/miden-node/issues/1185)). +- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). +- Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). +- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). +- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). +- Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). +- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). +- Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). +- Refactor account table and introduce tracking forest ([#1394](https://github.com/0xMiden/miden-node/pull/1394)). +- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). +- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). +- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). +- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). +- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). +- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). +- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). +- [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). +- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). +- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). +- Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). +- [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). +- [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/miden-node/pull/1572)). +- Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/miden-node/pull/1576)). + +### Fixes + +- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). +- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). +- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). +- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). +- Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). +- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). +- Fixed the network monitor counter account to use the storage slot name ([#1501](https://github.com/0xMiden/miden-node/pull/1501)). +- gRPC traces now correctly connect to the method implementation ([1553](https://github.com/0xMiden/miden-node/pull/1553)). +- Fixed ntx-builder crash on node restart after network transaction by adding missing `is_latest` filter to network account query ([#1578](https://github.com/0xMiden/miden-node/pull/1578)). + ## v0.12.8 (2026-01-15) ### Enhancements @@ -77,6 +149,8 @@ - Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/miden-node/pull/1278)). - Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/miden-node/pull/1293)). - [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/miden-node/pull/1292)). +- [BREAKING] Added `rocksdb` feature to enable rocksdb backends of `LargeSmt` ([#1326](https://github.com/0xMiden/miden-node/pull/1326)). +- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/miden-node/pull/1333)). - Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/miden-node/pull/1332)). - Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/miden-node/pull/1338)). @@ -116,6 +190,7 @@ - [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/miden-node/pull/#1045)). - Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/miden-node/pull/1140), [#1132](https://github.com/0xMiden/miden-node/pull/1132)). - Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/miden-node/pull/1176)). +- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/miden-node/pull/1219)). ### Changes diff --git a/Cargo.lock b/Cargo.lock index 7a14e4e1db..8ea0e75ef9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,16 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "addr2line" version = "0.25.1" @@ -156,9 +146,12 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] [[package]] name = "arrayref" @@ -187,28 +180,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.110", -] - [[package]] name = "async-trait" version = "0.1.89" @@ -217,7 +188,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -254,9 +225,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ "axum-core", "bytes", @@ -287,9 +258,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", @@ -316,7 +287,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -342,15 +313,15 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bech32" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" +checksum = "32637268377fc7b10a8c6d51de3e7fba1ce5dd371a96e342b34e6078db558e7f" [[package]] name = "beef" @@ -360,9 +331,9 @@ checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" [[package]] name = "bigdecimal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" dependencies = [ "autocfg", "libm", @@ -371,6 +342,24 @@ dependencies = [ "num-traits", ] +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.114", +] + [[package]] name = "bit-set" version = "0.8.0" @@ -409,15 +398,16 @@ dependencies = [ [[package]] name = "blake3" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", + "cpufeatures", ] [[package]] @@ -452,9 +442,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "bytemuck" @@ -475,35 +465,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" [[package]] -name = "camino" -version = "1.2.1" +name = "bzip2-sys" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ - "serde_core", -] - -[[package]] -name = "cargo-platform" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.27", - "serde", - "serde_json", - "thiserror 2.0.17", + "cc", + "pkg-config", ] [[package]] @@ -514,9 +482,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.46" +version = "1.2.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" +checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" dependencies = [ "find-msvc-tools", "jobserver", @@ -524,6 +492,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cf-rustracing" version = "1.2.1" @@ -584,15 +561,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -633,6 +610,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "3.2.25" @@ -652,9 +640,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive 4.5.49", @@ -662,13 +650,13 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.6", + "clap_lex 0.7.7", "strsim 0.11.1", ] @@ -694,7 +682,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -708,15 +696,15 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -735,9 +723,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" [[package]] name = "core-foundation" @@ -792,7 +780,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.51", + "clap 4.5.54", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -906,7 +894,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -949,7 +937,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -963,7 +951,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -974,7 +962,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -985,7 +973,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -1078,7 +1066,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -1088,34 +1076,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "rustc_version 0.4.1", + "syn 2.0.114", ] [[package]] name = "diesel" -version = "2.3.3" +version = "2.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e7624a3bb9fffd82fff016be9a7f163d20e5a89eb8d28f9daaa6b30fff37500" +checksum = "e130c806dccc85428c564f2dc5a96e05b6615a27c9a28776bd7761a9af4bb552" dependencies = [ "bigdecimal", "diesel_derives", @@ -1130,22 +1119,22 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.3.4" +version = "2.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9daac6489a36e42570da165a10c424f3edcefdff70c5fd55e1847c23f3dd7562" +checksum = "c30b2969f923fa1f73744b92bb7df60b858df8832742d9a3aceb79236c0be1d2" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "diesel_migrations" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee060f709c3e3b1cadd83fcd0f61711f7a8cf493348f758d3a1c1147d70b3c97" +checksum = "745fd255645f0f1135f9ec55c7b00e0882192af9683ab4731e4bba3da82b8f9c" dependencies = [ "diesel", "migrations_internals", @@ -1158,7 +1147,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -1187,7 +1176,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -1213,7 +1202,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -1298,18 +1287,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum_dispatch" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" -dependencies = [ - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.110", -] - [[package]] name = "env_filter" version = "0.1.4" @@ -1389,9 +1366,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixedbitset" @@ -1401,9 +1378,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "libz-ng-sys", @@ -1466,9 +1443,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.0" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" +checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" dependencies = [ "autocfg", ] @@ -1529,7 +1506,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -1570,16 +1547,17 @@ dependencies = [ [[package]] name = "generator" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" +checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", - "windows", + "windows-link", + "windows-result", ] [[package]] @@ -1595,9 +1573,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", @@ -1620,6 +1598,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "gimli" version = "0.32.3" @@ -1645,9 +1635,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -1655,7 +1645,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.0", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -1692,15 +1682,16 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", "foldhash 0.2.0", "rayon", "serde", + "serde_core", ] [[package]] @@ -1756,23 +1747,22 @@ dependencies = [ [[package]] name = "hostname" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" dependencies = [ "cfg-if", "libc", - "windows-link 0.1.3", + "windows-link", ] [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -1887,9 +1877,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64", "bytes", @@ -1923,7 +1913,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core", ] [[package]] @@ -1983,9 +1973,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -1997,9 +1987,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -2061,12 +2051,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", ] [[package]] @@ -2092,9 +2082,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ "memchr", "serde", @@ -2132,6 +2122,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -2143,15 +2142,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jiff" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49cce2b81f2098e7e3efc35bc2e0a6b7abec9d34128283d7a26fa8f32a6dbb35" +checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" dependencies = [ "jiff-static", "log", @@ -2162,13 +2161,13 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "980af8b43c3ad5d8d349ace167ec8170839f753a42d233ba19e08afe1850fa69" +checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -2183,9 +2182,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2252,9 +2251,19 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] [[package]] name = "libm" @@ -2262,6 +2271,20 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +[[package]] +name = "librocksdb-sys" +version = "0.17.3+10.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "libc", + "libz-sys", + "lz4-sys", +] + [[package]] name = "libsqlite3-sys" version = "0.35.0" @@ -2282,6 +2305,17 @@ dependencies = [ "libc", ] +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2308,14 +2342,13 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" dependencies = [ "libc", "neli", - "thiserror 2.0.17", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2329,9 +2362,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "logos" @@ -2355,7 +2388,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -2391,9 +2424,19 @@ dependencies = [ [[package]] name = "lru" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96051b46fc183dc9cd4a223960ef37b9af631b55191852a8274bfef064cda20f" +checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" + +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] [[package]] name = "matchers" @@ -2431,40 +2474,59 @@ dependencies = [ "autocfg", ] +[[package]] +name = "miden-agglayer" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccebe2f7aa9e173913a9da60bd21e8402936c784fdf1eba8c48956667def354e" +dependencies = [ + "fs-err", + "miden-assembly", + "miden-core", + "miden-core-lib", + "miden-protocol", + "miden-standards", + "miden-utils-sync", + "regex", + "walkdir", +] + [[package]] name = "miden-air" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06acfd2ddc25b68f9d23d2add3f15c0ec3f9890ce6418409d71bea9dc6590bd0" +checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" dependencies = [ "miden-core", "miden-utils-indexing", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-air", "winter-prover", ] [[package]] name = "miden-assembly" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1219b9e48bb286b58a23bb65cf74baa1b24ddbcb462ca625b38186674571047" +checksum = "24c6a18e29c03141cf9044604390a00691c7342924ec865b4acfdd560ff41ede" dependencies = [ + "env_logger", "log", "miden-assembly-syntax", "miden-core", "miden-mast-package", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "miden-assembly-syntax" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eeaef2853061c54527bb2664c0c832ce3d1f80847c79512455fec3b93057f2a" +checksum = "7458ff670f5a514bf972aa84d6e1851a4c4e9afa351f53b71bdc2218b99254b6" dependencies = [ "aho-corasick", + "env_logger", "lalrpop", "lalrpop-util", "log", @@ -2473,55 +2535,77 @@ dependencies = [ "miden-utils-diagnostics", "midenc-hir-type", "proptest", + "proptest-derive", "regex", "rustc_version 0.4.1", "semver 1.0.27", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "miden-block-prover" -version = "0.12.4" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec766587e838664ded55fa926d0611244cac2fe23b7cec202d8db0a85d9e536e" +checksum = "aa9c89257b227d0668105b4a6e81ea33956795c89549cc1baa3f253d753e81e5" dependencies = [ - "miden-lib", - "miden-objects", - "thiserror 2.0.17", + "miden-protocol", + "thiserror 2.0.18", ] [[package]] name = "miden-core" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452a00429d05c416001ec0578291eb88e115cf94fc22b3308267abfdcd813440" +checksum = "21a5c9c8c3d42ae8381ed49e47ff9ad2d2e345c4726761be36b7d4000ebb40ae" dependencies = [ - "enum_dispatch", + "derive_more", + "itertools 0.14.0", "miden-crypto", "miden-debug-types", "miden-formatting", + "miden-utils-core-derive", "miden-utils-indexing", "num-derive", "num-traits", - "thiserror 2.0.17", + "proptest", + "proptest-derive", + "thiserror 2.0.18", "winter-math", "winter-utils", ] +[[package]] +name = "miden-core-lib" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6556494ea5576803730fa15015bee6bd9d1a117450f22e7df0883421e7423674" +dependencies = [ + "env_logger", + "fs-err", + "miden-assembly", + "miden-core", + "miden-crypto", + "miden-processor", + "miden-utils-sync", + "sha2", + "thiserror 2.0.18", +] + [[package]] name = "miden-crypto" -version = "0.18.2" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb82051002f9c64878d3b105a7b924de1ee92019231923380cf4ecd7b824f9a" +checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" dependencies = [ "blake3", "cc", "chacha20poly1305", + "curve25519-dalek", "ed25519-dalek", "flume", "glob", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "hkdf", "k256", "miden-crypto-derive", @@ -2529,12 +2613,14 @@ dependencies = [ "num-complex", "rand 0.9.2", "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", "rand_hc", "rayon", + "rocksdb", + "sha2", "sha3", "subtle", - "thiserror 2.0.17", + "thiserror 2.0.18", "winter-crypto", "winter-math", "winter-utils", @@ -2543,19 +2629,19 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.18.2" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2222f37355ea975f40acd3c098a437574a31a4d8a2c193cf4e9fead2beede577" +checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "miden-debug-types" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97eed62ac0ca7420e49148fd306c74786b23a8d31df6da6277c671ba3e5c619a" +checksum = "19123e896f24b575e69921a79a39a0a4babeb98404a8601017feb13b75d653b3" dependencies = [ "memchr", "miden-crypto", @@ -2565,8 +2651,8 @@ dependencies = [ "miden-utils-sync", "paste", "serde", - "serde_spanned 1.0.3", - "thiserror 2.0.17", + "serde_spanned 1.0.4", + "thiserror 2.0.18", ] [[package]] @@ -2578,35 +2664,16 @@ dependencies = [ "unicode-width 0.1.14", ] -[[package]] -name = "miden-lib" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598582071e5b0ec835d06288857d4ddc0090a98bd4c17e408fa56b2c43f45d73" -dependencies = [ - "Inflector", - "fs-err", - "miden-assembly", - "miden-core", - "miden-objects", - "miden-processor", - "miden-stdlib", - "rand 0.9.2", - "regex", - "thiserror 2.0.17", - "walkdir", -] - [[package]] name = "miden-mast-package" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d13e6ba2b357551598f13396ed52f8f21aa99979aa3b338bb5521feeda19c8a" +checksum = "f0d6a322b91efa1bb71e224395ca1fb9ca00e2614f89427e35d8c42a903868a3" dependencies = [ "derive_more", "miden-assembly-syntax", "miden-core", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -2632,10 +2699,10 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.110", + "syn 2.0.114", "terminal_size 0.3.0", "textwrap", - "thiserror 2.0.17", + "thiserror 2.0.18", "trybuild", "unicode-width 0.1.14", ] @@ -2648,22 +2715,22 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "miden-network-monitor" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "axum", - "clap 4.5.51", + "clap 4.5.54", "hex", "humantime", - "miden-lib", "miden-node-proto", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miden-testing", "miden-tx", "rand 0.9.2", @@ -2680,12 +2747,13 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", - "clap 4.5.51", + "clap 4.5.54", "figment", "fs-err", + "hex", "humantime", "miden-node-block-producer", "miden-node-ntx-builder", @@ -2693,28 +2761,28 @@ dependencies = [ "miden-node-store", "miden-node-utils", "miden-node-validator", - "miden-objects", + "miden-protocol", "tokio", "url", ] [[package]] name = "miden-node-block-producer" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "assert_matches", "futures", "itertools 0.14.0", "miden-block-prover", - "miden-lib", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-test-macro", "miden-node-utils", - "miden-objects", + "miden-protocol", "miden-remote-prover-client", + "miden-standards", "miden-tx", "miden-tx-batch-prover", "pretty_assertions", @@ -2723,7 +2791,7 @@ dependencies = [ "rstest", "serial_test", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2736,29 +2804,31 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.12.8" +version = "0.13.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "miden-node-ntx-builder" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "futures", - "lru 0.16.2", + "indexmap 2.13.0", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", - "miden-objects", + "miden-protocol", "miden-remote-prover-client", + "miden-standards", "miden-tx", "rstest", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", + "tokio-util", "tonic", "tracing", "url", @@ -2766,20 +2836,22 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", + "assert_matches", "fs-err", "hex", "http", "miden-node-grpc-error-macro", "miden-node-proto-build", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miette", "proptest", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tonic", "tonic-prost", "tonic-prost-build", @@ -2788,7 +2860,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.12.8" +version = "0.13.0" dependencies = [ "fs-err", "miette", @@ -2798,25 +2870,25 @@ dependencies = [ [[package]] name = "miden-node-rpc" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "futures", "http", "mediatype", "miden-air", - "miden-lib", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miden-tx", "reqwest", "rstest", "semver 1.0.27", "tempfile", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2830,7 +2902,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "assert_matches", @@ -2842,23 +2914,24 @@ dependencies = [ "diesel_migrations", "fs-err", "hex", - "indexmap 2.12.0", - "miden-lib", + "indexmap 2.13.0", + "miden-crypto", "miden-node-proto", "miden-node-proto-build", "miden-node-test-macro", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "pretty_assertions", "rand 0.9.2", "rand_chacha 0.9.0", "regex", "serde", "termtree", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", "tonic", "tonic-reflection", "tower-http", @@ -2867,19 +2940,19 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.12.8" +version = "0.13.0" dependencies = [ - "clap 4.5.51", + "clap 4.5.54", "fs-err", "futures", "miden-air", "miden-block-prover", - "miden-lib", "miden-node-block-producer", "miden-node-proto", "miden-node-store", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "rand 0.9.2", "rayon", "tokio", @@ -2892,12 +2965,12 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "miden-node-utils" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "bytes", @@ -2905,13 +2978,14 @@ dependencies = [ "http", "http-body-util", "itertools 0.14.0", - "miden-objects", + "lru 0.16.3", + "miden-protocol", "opentelemetry", "opentelemetry-otlp", "opentelemetry_sdk", "rand 0.9.2", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tower-http", @@ -2920,18 +2994,19 @@ dependencies = [ "tracing-opentelemetry", "tracing-subscriber", "url", - "vergen", - "vergen-gitcl", ] [[package]] name = "miden-node-validator" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "miden-node-proto", "miden-node-proto-build", "miden-node-utils", + "miden-protocol", + "miden-tx", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -2941,56 +3016,72 @@ dependencies = [ ] [[package]] -name = "miden-objects" -version = "0.12.4" +name = "miden-processor" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace4018bb2d6cdbcff4d86d8af5ade8efca9f0479f7e5775c7f09cfab5f91ebe" +checksum = "4a659fac55de14647e2695f03d96b83ff94fe65fd31e74d81c225ec52af25acf" +dependencies = [ + "itertools 0.14.0", + "miden-air", + "miden-core", + "miden-debug-types", + "miden-utils-diagnostics", + "miden-utils-indexing", + "paste", + "rayon", + "thiserror 2.0.18", + "tokio", + "tracing", + "winter-prover", +] + +[[package]] +name = "miden-protocol" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfed3ae85e2fabbf8a2e7416e388a40519e10cbf0cdceda222ef858c2f270b35" dependencies = [ "bech32", + "fs-err", "getrandom 0.3.4", "miden-assembly", "miden-assembly-syntax", "miden-core", + "miden-core-lib", "miden-crypto", "miden-mast-package", "miden-processor", - "miden-stdlib", + "miden-protocol-macros", "miden-utils-sync", "miden-verifier", "rand 0.9.2", + "rand_chacha 0.9.0", "rand_xoshiro", + "regex", "semver 1.0.27", - "serde", - "thiserror 2.0.17", - "toml 0.9.8", - "winter-rand-utils", -] - -[[package]] -name = "miden-processor" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ef77929651b8755965cde8f589bd38e2345a619d54cab6427f91aa23c47f6a" -dependencies = [ - "itertools 0.14.0", - "miden-air", - "miden-core", - "miden-debug-types", - "miden-utils-diagnostics", - "miden-utils-indexing", - "paste", - "rayon", - "thiserror 2.0.17", - "tokio", - "tracing", - "winter-prover", + "serde", + "thiserror 2.0.18", + "toml 0.9.11+spec-1.1.0", + "walkdir", + "winter-rand-utils", +] + +[[package]] +name = "miden-protocol-macros" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f41a93dd532baa3a4c821073baad5d700aab119b3831ef7fdf004e196c10157e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", ] [[package]] name = "miden-prover" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c30a5d10baeec17b9336de8544cb7f9b96b32de757c4cfb8d95ee0521bb5cd" +checksum = "4e5df61f50f27886f6f777d6e0cdf785f7db87dd881799a84a801e7330c189c8" dependencies = [ "miden-air", "miden-debug-types", @@ -3002,20 +3093,21 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.12.8" +version = "0.13.0" dependencies = [ "anyhow", "async-trait", "axum", "bytes", - "clap 4.5.51", + "clap 4.5.54", "http", "humantime", "miden-block-prover", - "miden-lib", + "miden-node-proto", "miden-node-proto-build", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miden-testing", "miden-tx", "miden-tx-batch-prover", @@ -3031,7 +3123,7 @@ dependencies = [ "semver 1.0.27", "serde", "serde_qs", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", @@ -3047,15 +3139,16 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.12.8" +version = "0.13.0" dependencies = [ + "fs-err", "getrandom 0.3.4", "miden-node-proto-build", - "miden-objects", + "miden-protocol", "miden-tx", "miette", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", "tonic-prost", @@ -3065,72 +3158,85 @@ dependencies = [ ] [[package]] -name = "miden-stdlib" -version = "0.19.1" +name = "miden-standards" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e90a5de45a1e6213ff17b66fff8accde0bbc64264e2c22bbcb9a895f8f3b767" +checksum = "16144e41701794b45b7a361ec7d35407a90c4d1d129a43df0bc278d5f3327999" dependencies = [ - "env_logger", "fs-err", "miden-assembly", "miden-core", - "miden-crypto", + "miden-core-lib", "miden-processor", - "miden-utils-sync", - "thiserror 2.0.17", + "miden-protocol", + "rand 0.9.2", + "regex", + "thiserror 2.0.18", + "walkdir", ] [[package]] name = "miden-testing" -version = "0.12.4" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda0d572d7415682ed168f616becf006825aa04b89692f9907cbb3e3586bf46a" +checksum = "9bd0c6d0ceb4e6719a5afe76b9627b73e91506ebb66350d56ca9ed606127e4dc" dependencies = [ "anyhow", "itertools 0.14.0", + "miden-agglayer", + "miden-assembly", "miden-block-prover", - "miden-lib", - "miden-objects", + "miden-core-lib", "miden-processor", + "miden-protocol", + "miden-standards", "miden-tx", "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", - "thiserror 2.0.17", "winterfell", ] [[package]] name = "miden-tx" -version = "0.12.4" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d959064f99ce09fc38e9b6b4dc24c3fa80a63072bf5840a1074ca4ed5e9c911" +checksum = "a97f26c833633cea0d95ddb38bcd8bd7e8225b4e7746c15070cb9ab7b85e248c" dependencies = [ - "miden-lib", - "miden-objects", "miden-processor", + "miden-protocol", "miden-prover", + "miden-standards", "miden-verifier", - "rand 0.9.2", - "thiserror 2.0.17", - "tokio", + "thiserror 2.0.18", ] [[package]] name = "miden-tx-batch-prover" -version = "0.12.4" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5029810b106654a1ec5d7d7123945db91b96bc4f4187715d0c2cfe0b0a53af4" +checksum = "0669ce9d9c7aacd49e4923edb88fe668e370c02a754d1564b10a97501e37310f" dependencies = [ - "miden-objects", + "miden-protocol", "miden-tx", ] +[[package]] +name = "miden-utils-core-derive" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa207ffd8b26a79d9b5b246a352812f0015c0bb8f75492ec089c5c8e6d5f9e2b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "miden-utils-diagnostics" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a3ff4c019d96539a7066626efb4dce5c9fb7b0e44e961b0c2571e78f34236d5" +checksum = "6b2f55477d410542a5d8990ca04856adf5bef91bfa3b54ca3c03a5ff14a6e25c" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3141,18 +3247,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c798250bee4e856d4f18c161e91cdcbef1906f6614d00cf0063b47031c0f8cc6" +checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" dependencies = [ - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] name = "miden-utils-sync" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feebe7d896c013ea74dbc98de978836606356a044d4ed3b61ded54d3b319d89f" +checksum = "da7fa8f5fd27f122c83f55752f2a964bbfc2b713de419e9c152f7dcc05c194ec" dependencies = [ "lock_api", "loom", @@ -3161,13 +3267,13 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8f8e47b78bba1fe1b31faee8f12aafd95385f6d6a8b108b03e92f5d743bb29f" +checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" dependencies = [ "miden-air", "miden-core", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "winter-verifier", ] @@ -3180,7 +3286,7 @@ checksum = "9d4cfab04baffdda3fb9eafa5f873604059b89a1699aa95e4f1057397a69f0b5" dependencies = [ "miden-formatting", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -3210,7 +3316,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -3220,7 +3326,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -3240,6 +3346,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -3252,9 +3364,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -3273,7 +3385,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] @@ -3285,7 +3397,7 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "schannel", "security-framework 2.11.1", @@ -3295,27 +3407,31 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" dependencies = [ + "bitflags 2.10.0", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.114", ] [[package]] @@ -3336,6 +3452,16 @@ dependencies = [ "memoffset", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -3392,7 +3518,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -3446,15 +3572,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - [[package]] name = "object" version = "0.37.3" @@ -3511,7 +3628,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -3520,6 +3637,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + [[package]] name = "openssl-sys" version = "0.9.111" @@ -3542,7 +3665,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -3557,7 +3680,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tonic", ] @@ -3587,7 +3710,7 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", ] @@ -3624,7 +3747,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -3653,7 +3776,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -3669,7 +3792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.0", + "indexmap 2.13.0", ] [[package]] @@ -3698,7 +3821,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -3786,7 +3909,7 @@ dependencies = [ "log", "nix", "once_cell", - "openssl-probe", + "openssl-probe 0.1.6", "parking_lot", "percent-encoding", "pingora-error", @@ -3890,7 +4013,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4015,9 +4138,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "portable-atomic-util" @@ -4075,7 +4198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -4084,7 +4207,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -4111,11 +4234,33 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -4128,7 +4273,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", "version_check", "yansi", ] @@ -4160,7 +4305,7 @@ dependencies = [ "memchr", "parking_lot", "protobuf 3.7.2", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4182,6 +4327,17 @@ dependencies = [ "unarray", ] +[[package]] +name = "proptest-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "prost" version = "0.14.1" @@ -4210,28 +4366,28 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.110", + "syn 2.0.114", "tempfile", ] [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "prost-reflect" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a3ac73ec9a9118131a4594c9d336631a07852220a1d0ae03ee36b04503a063" +checksum = "b89455ef41ed200cafc47c76c552ee7792370ac420497e551f16123a9135f76e" dependencies = [ "logos", "miette", @@ -4286,7 +4442,7 @@ dependencies = [ "prost-reflect", "prost-types", "protox-parse", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4298,7 +4454,7 @@ dependencies = [ "logos", "miette", "prost-types", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4329,9 +4485,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -4360,7 +4516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4380,7 +4536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4389,14 +4545,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -4416,7 +4572,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4425,7 +4581,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -4494,9 +4650,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64", "bytes", @@ -4550,7 +4706,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -4558,26 +4714,43 @@ dependencies = [ [[package]] name = "rmp" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c" dependencies = [ - "byteorder", "num-traits", - "paste", ] [[package]] name = "rmp-serde" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" +checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155" dependencies = [ - "byteorder", "rmp", "serde", ] +[[package]] +name = "rocksdb" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rstest" version = "0.26.1" @@ -4603,15 +4776,15 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.114", "unicode-ident", ] [[package]] name = "rust_decimal" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" dependencies = [ "arrayvec", "num-traits", @@ -4619,9 +4792,15 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" + +[[package]] +name = "rustc-hash" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -4656,9 +4835,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags 2.10.0", "errno", @@ -4669,9 +4848,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "log", "once_cell", @@ -4684,11 +4863,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.1", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4696,18 +4875,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -4734,9 +4913,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -4885,20 +5064,20 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -4920,7 +5099,7 @@ checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" dependencies = [ "percent-encoding", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -4934,9 +5113,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" dependencies = [ "serde_core", ] @@ -4967,11 +5146,12 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" dependencies = [ - "futures", + "futures-executor", + "futures-util", "log", "once_cell", "parking_lot", @@ -4981,13 +5161,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -4997,7 +5177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" dependencies = [ "base64", - "indexmap 2.12.0", + "indexmap 2.13.0", "rust_decimal", ] @@ -5039,10 +5219,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.6" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -5058,9 +5239,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "siphasher" @@ -5117,17 +5298,14 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.4.7" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c6d746902bca4ddf16592357eacf0473631ea26b36072f0dd0b31fa5ccd1f4" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" dependencies = [ + "cc", "js-sys", - "once_cell", - "thiserror 2.0.17", - "tokio", + "rsqlite-vfs", "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", ] [[package]] @@ -5188,7 +5366,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -5208,9 +5386,9 @@ dependencies = [ [[package]] name = "supports-hyperlinks" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f44ed3c63152de6a9f90acbea1a110441de43006ea51bcce8f436196a288b" +checksum = "e396b6523b11ccb83120b115a0b7366de372751aa6edf19844dfb13a6af97e91" [[package]] name = "supports-unicode" @@ -5231,9 +5409,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.110" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -5257,7 +5435,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -5289,22 +5467,22 @@ checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" [[package]] name = "tempfile" -version = "3.23.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", "getrandom 0.3.4", "once_cell", - "rustix 1.1.2", + "rustix 1.1.3", "windows-sys 0.61.2", ] [[package]] name = "term" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2111ef44dae28680ae9752bb89409e7310ca33a8c621ebe7b106cf5c928b3ac0" +checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ "windows-sys 0.61.2", ] @@ -5334,7 +5512,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.1.2", + "rustix 1.1.3", "windows-sys 0.60.2", ] @@ -5366,11 +5544,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -5381,18 +5559,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -5416,32 +5594,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", - "libc", "num-conv", - "num_threads", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" dependencies = [ "num-conv", "time-core", @@ -5469,9 +5645,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", @@ -5492,7 +5668,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -5517,9 +5693,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -5529,12 +5705,10 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" +checksum = "3f6d24790a10a7af737693a3e8f1d03faef7e6ca0cc99aae5066f533766de545" dependencies = [ - "async-stream", - "bytes", "futures-core", "tokio", "tokio-stream", @@ -5542,9 +5716,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -5567,14 +5741,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.8" +version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.13.0", "serde_core", - "serde_spanned 1.0.3", - "toml_datetime 0.7.3", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", "winnow", @@ -5591,9 +5765,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] @@ -5604,7 +5778,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.13.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -5614,21 +5788,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.0", - "toml_datetime 0.7.3", + "indexmap 2.13.0", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", ] [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -5641,9 +5815,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" @@ -5685,7 +5859,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -5723,7 +5897,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.110", + "syn 2.0.114", "tempfile", "tonic-build", ] @@ -5776,7 +5950,7 @@ dependencies = [ "httparse", "js-sys", "pin-project", - "thiserror 2.0.17", + "thiserror 2.0.18", "tonic", "tower-service", "wasm-bindgen", @@ -5787,13 +5961,13 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.0", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -5806,9 +5980,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "bitflags 2.10.0", "bytes", @@ -5838,9 +6012,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -5850,20 +6024,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -5877,7 +6051,7 @@ checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" dependencies = [ "chrono", "smallvec", - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", "tracing-subscriber", ] @@ -5895,16 +6069,13 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" +checksum = "1ac28f2d093c6c477eaa76b23525478f38de514fa9aeb1285738d4b97a9552fc" dependencies = [ "js-sys", "opentelemetry", - "opentelemetry_sdk", - "rustversion", "smallvec", - "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -5924,9 +6095,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -5981,7 +6152,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -6007,9 +6178,9 @@ dependencies = [ [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -6059,14 +6230,15 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -6083,9 +6255,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -6104,46 +6276,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vergen" -version = "9.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2bf58be11fc9414104c6d3a2e464163db5ef74b12296bda593cac37b6e4777" -dependencies = [ - "anyhow", - "cargo_metadata", - "derive_builder", - "regex", - "rustc_version 0.4.1", - "rustversion", - "vergen-lib", -] - -[[package]] -name = "vergen-gitcl" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9dfc1de6eb2e08a4ddf152f1b179529638bedc0ea95e6d667c014506377aefe" -dependencies = [ - "anyhow", - "derive_builder", - "rustversion", - "time", - "vergen", - "vergen-lib", -] - -[[package]] -name = "vergen-lib" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b07e6010c0f3e59fcb164e0163834597da68d1f864e2b8ca49f74de01e9c166" -dependencies = [ - "anyhow", - "derive_builder", - "rustversion", -] - [[package]] name = "version_check" version = "0.9.5" @@ -6195,18 +6327,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -6217,11 +6349,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.55" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -6230,9 +6363,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6240,22 +6373,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -6275,9 +6408,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -6324,41 +6457,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.61.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" -dependencies = [ - "windows-collections", - "windows-core 0.61.2", - "windows-future", - "windows-link 0.1.3", - "windows-numerics", -] - -[[package]] -name = "windows-collections" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" -dependencies = [ - "windows-core 0.61.2", -] - -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", -] - [[package]] name = "windows-core" version = "0.62.2" @@ -6367,20 +6465,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - -[[package]] -name = "windows-future" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", - "windows-threading", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -6391,7 +6478,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -6402,49 +6489,24 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -[[package]] -name = "windows-numerics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", -] - [[package]] name = "windows-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -6453,16 +6515,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", + "windows-link", ] [[package]] @@ -6471,7 +6524,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6516,7 +6569,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6556,7 +6609,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.1", + "windows-link", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -6567,15 +6620,6 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] -[[package]] -name = "windows-threading" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -6716,9 +6760,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] @@ -6775,7 +6819,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -6838,9 +6882,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "writeable" @@ -6892,28 +6936,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] [[package]] @@ -6933,7 +6977,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", "synstructure", ] @@ -6973,9 +7017,15 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.114", ] +[[package]] +name = "zmij" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" + [[package]] name = "zstd" version = "0.13.3" diff --git a/Cargo.toml b/Cargo.toml index 52c8d3e0fc..cf690b306c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" rust-version = "1.90" -version = "0.12.8" +version = "0.13.0" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] @@ -36,45 +36,48 @@ opt-level = 2 [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.12" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.12" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.12" } -miden-node-proto = { path = "crates/proto", version = "0.12" } -miden-node-proto-build = { path = "proto", version = "0.12" } -miden-node-rpc = { path = "crates/rpc", version = "0.12" } -miden-node-store = { path = "crates/store", version = "0.12" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } +miden-node-proto = { path = "crates/proto", version = "0.13" } +miden-node-proto-build = { path = "proto", version = "0.13" } +miden-node-rpc = { path = "crates/rpc", version = "0.13" } +miden-node-store = { path = "crates/store", version = "0.13" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.12" } -miden-node-validator = { path = "crates/validator", version = "0.12" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.12" } +miden-node-utils = { path = "crates/utils", version = "0.13" } +miden-node-validator = { path = "crates/validator", version = "0.13" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.12.4" } -miden-lib = { version = "0.12.4" } -miden-objects = { default-features = false, version = "0.12.4" } -miden-testing = { version = "0.12.4" } -miden-tx = { default-features = false, version = "0.12.4" } -miden-tx-batch-prover = { version = "0.12.4" } +miden-block-prover = { version = "0.13" } +miden-protocol = { default-features = false, version = "0.13" } +miden-standards = { version = "0.13" } +miden-testing = { version = "0.13" } +miden-tx = { default-features = false, version = "0.13" } +miden-tx-batch-prover = { version = "0.13" } # Other miden dependencies. These should align with those expected by miden-base. -miden-air = { features = ["std", "testing"], version = "0.19" } +miden-air = { features = ["std", "testing"], version = "0.20" } +miden-crypto = { default-features = false, version = "0.19" } # External dependencies -anyhow = { version = "1.0" } -assert_matches = { version = "1.5" } -async-trait = { version = "0.1" } -clap = { features = ["derive"], version = "4.5" } -fs-err = { version = "3" } -futures = { version = "0.3" } -hex = { version = "0.4" } -http = { version = "1.3" } -humantime = { version = "2.2" } -indexmap = { version = "2.12" } -itertools = { version = "0.14" } -lru = { default-features = false, version = "0.16" } -pretty_assertions = { version = "1.4" } -prost = { version = "0.14" } -protox = { version = "0.9" } +anyhow = { version = "1.0" } +assert_matches = { version = "1.5" } +async-trait = { version = "0.1" } +clap = { features = ["derive"], version = "4.5" } +fs-err = { version = "3" } +futures = { version = "0.3" } +hex = { version = "0.4" } +http = { version = "1.3" } +humantime = { version = "2.2" } +indexmap = { version = "2.12" } +itertools = { version = "0.14" } +lru = { default-features = false, version = "0.16" } +pretty_assertions = { version = "1.4" } +# breaking change `DecodeError::new` is not exposed anymore +# but is assumed public by some internal dependency +prost = { default-features = false, version = "=0.14.1" } +protox = { version = "=0.9.0" } rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } @@ -83,7 +86,7 @@ thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } toml = { version = "0.9" } -tonic = { version = "0.14" } +tonic = { default-features = false, version = "0.14" } tonic-prost = { version = "0.14" } tonic-prost-build = { version = "0.14" } tonic-reflection = { version = "0.14" } diff --git a/Makefile b/Makefile index 7a968862cf..72bdbce492 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ help: WARNINGS=RUSTDOCFLAGS="-D warnings" BUILD_PROTO=BUILD_PROTO=1 +CONTAINER_RUNTIME ?= docker # -- linting -------------------------------------------------------------------------------------- @@ -15,6 +16,7 @@ BUILD_PROTO=BUILD_PROTO=1 clippy: ## Runs Clippy with configs cargo clippy --locked --all-targets --all-features --workspace -- -D warnings cargo clippy --locked --all-targets --all-features -p miden-remote-prover -- -D warnings + cargo clippy --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover -- -D warnings .PHONY: fix @@ -90,7 +92,7 @@ check: ## Check all targets and features for errors without code generation .PHONY: build build: ## Builds all crates and re-builds protobuf bindings for proto crates ${BUILD_PROTO} cargo build --locked --workspace - ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features # no-std compatible build + ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build # --- installing ---------------------------------------------------------------------------------- @@ -113,20 +115,20 @@ install-network-monitor: ## Installs network monitor binary # --- docker -------------------------------------------------------------------------------------- .PHONY: docker-build-node -docker-build-node: ## Builds the Miden node using Docker +docker-build-node: ## Builds the Miden node using Docker (override with CONTAINER_RUNTIME=podman) @CREATED=$$(date) && \ VERSION=$$(cat bin/node/Cargo.toml | grep -m 1 '^version' | cut -d '"' -f 2) && \ COMMIT=$$(git rev-parse HEAD) && \ - docker build --build-arg CREATED="$$CREATED" \ + $(CONTAINER_RUNTIME) build --build-arg CREATED="$$CREATED" \ --build-arg VERSION="$$VERSION" \ --build-arg COMMIT="$$COMMIT" \ -f bin/node/Dockerfile \ -t miden-node-image . .PHONY: docker-run-node -docker-run-node: ## Runs the Miden node as a Docker container - docker volume create miden-db - docker run --name miden-node \ +docker-run-node: ## Runs the Miden node as a Docker container (override with CONTAINER_RUNTIME=podman) + $(CONTAINER_RUNTIME) volume create miden-db + $(CONTAINER_RUNTIME) run --name miden-node \ -p 57291:57291 \ -v miden-db:/db \ -d miden-node-image diff --git a/bin/network-monitor/.env b/bin/network-monitor/.env index c5779257db..ad861da56d 100644 --- a/bin/network-monitor/.env +++ b/bin/network-monitor/.env @@ -3,16 +3,20 @@ MIDEN_MONITOR_PORT=3001 MIDEN_MONITOR_ENABLE_OTEL=true MIDEN_MONITOR_REQUEST_TIMEOUT=10s # rpc checks -MIDEN_MONITOR_RPC_URL=http://0.0.0.0:57291 +MIDEN_MONITOR_RPC_URL=https://rpc.devnet.miden.io/ MIDEN_MONITOR_STATUS_CHECK_INTERVAL=30s +MIDEN_MONITOR_STALE_CHAIN_TIP_THRESHOLD=1m # remote prover checks MIDEN_MONITOR_REMOTE_PROVER_URLS=https://tx-prover.devnet.miden.io/,https://batch-prover.devnet.miden.io/ MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL=2m # faucet checks -MIDEN_MONITOR_FAUCET_URL=http://localhost:8080 +MIDEN_MONITOR_FAUCET_URL=https://faucet-api.devnet.miden.io/ MIDEN_MONITOR_FAUCET_TEST_INTERVAL=2m # network transaction checks MIDEN_MONITOR_DISABLE_NTX_SERVICE=false MIDEN_MONITOR_COUNTER_FILEPATH=counter_account.mac MIDEN_MONITOR_WALLET_FILEPATH=wallet_account.mac MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL=30s +MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT=2m +# explorer checks +MIDEN_MONITOR_EXPLORER_URL=https://scan-backend-devnet-miden.eu-central-8.gateway.fm/graphql diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 772032b27f..64a1f19e14 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -20,10 +20,10 @@ axum = { version = "0.8" } clap = { features = ["env"], workspace = true } hex = { version = "0.4" } humantime = { workspace = true } -miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std", "testing"], workspace = true } +miden-protocol = { features = ["std", "testing"], workspace = true } +miden-standards = { workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["std"], workspace = true } rand = { version = "0.9" } diff --git a/bin/network-monitor/README.md b/bin/network-monitor/README.md index 2bab71c902..2affde86de 100644 --- a/bin/network-monitor/README.md +++ b/bin/network-monitor/README.md @@ -30,16 +30,19 @@ miden-network-monitor start --faucet-url http://localhost:8080 --enable-otel - `--rpc-url`: RPC service URL (default: `http://localhost:50051`) - `--remote-prover-urls`: Comma-separated list of remote prover URLs. If omitted or empty, prover tasks are disabled. - `--faucet-url`: Faucet service URL for testing. If omitted, faucet testing is disabled. +- `--explorer-url`: Explorer service GraphQL endpoint. If omitted, explorer checks are disabled. - `--disable-ntx-service`: Disable the network transaction service checks (enabled by default). The network transaction service consists of two components: counter increment (sending increment transactions) and counter tracking (monitoring counter value changes). - `--remote-prover-test-interval`: Interval at which to test the remote provers services (default: `2m`) - `--faucet-test-interval`: Interval at which to test the faucet services (default: `2m`) - `--status-check-interval`: Interval at which to check the status of the services (default: `3s`) - `--request-timeout`: Timeout for outgoing requests (default: `10s`) +- `--stale-chain-tip-threshold`: Maximum time without a chain tip update before marking RPC as unhealthy (default: `1m`) - `--port, -p`: Web server port (default: `3000`) - `--enable-otel`: Enable OpenTelemetry tracing - `--wallet-filepath`: Path where the wallet account is located (default: `wallet_account.mac`) - `--counter-filepath`: Path where the network account is located (default: `counter_program.mac`) - `--counter-increment-interval`: Interval at which to send the increment counter transaction (default: `30s`) +- `--counter-latency-timeout`: Maximum time to wait for a counter update after submitting a transaction (default: `2m`) - `--help, -h`: Show help information - `--version, -V`: Show version information @@ -50,16 +53,19 @@ If command-line arguments are not provided, the application falls back to enviro - `MIDEN_MONITOR_RPC_URL`: RPC service URL - `MIDEN_MONITOR_REMOTE_PROVER_URLS`: Comma-separated list of remote prover URLs. If unset or empty, prover tasks are disabled. - `MIDEN_MONITOR_FAUCET_URL`: Faucet service URL for testing. If unset, faucet testing is disabled. +- `MIDEN_MONITOR_EXPLORER_URL`: Explorer service GraphQL endpoint. If unset, explorer checks are disabled. - `MIDEN_MONITOR_DISABLE_NTX_SERVICE`: Set to `true` to disable the network transaction service checks (enabled by default). This affects both counter increment and tracking components. - `MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL`: Interval at which to test the remote provers services - `MIDEN_MONITOR_FAUCET_TEST_INTERVAL`: Interval at which to test the faucet services - `MIDEN_MONITOR_STATUS_CHECK_INTERVAL`: Interval at which to check the status of the services - `MIDEN_MONITOR_REQUEST_TIMEOUT`: Timeout for outgoing requests +- `MIDEN_MONITOR_STALE_CHAIN_TIP_THRESHOLD`: Maximum time without a chain tip update before marking RPC as unhealthy - `MIDEN_MONITOR_PORT`: Web server port - `MIDEN_MONITOR_ENABLE_OTEL`: Enable OpenTelemetry tracing - `MIDEN_MONITOR_WALLET_FILEPATH`: Path where the wallet account is located - `MIDEN_MONITOR_COUNTER_FILEPATH`: Path where the network account is located - `MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL`: Interval at which to send the increment counter transaction +- `MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT`: Maximum time to wait for a counter update after submitting a transaction ## Commands @@ -143,6 +149,7 @@ The monitor application provides real-time status monitoring for the following M ### RPC Service - **Service Health**: Overall RPC service availability and status +- **Stale Chain Tip Detection**: Monitors chain tip progress and marks RPC as unhealthy if the chain tip hasn't changed within the configured threshold (default: 1 minute) - **Version Information**: RPC service version - **Genesis Commitment**: Network genesis commitment (with copy-to-clipboard functionality) - **Store Status**: @@ -151,6 +158,14 @@ The monitor application provides real-time status monitoring for the following M - **Block Producer Status**: - Block producer version and health +### Explorer +- **Service Health**: Explorer availability and freshness of the latest block +- **Latest Block Metadata**: + - Block height and timestamp + - Transactions, nullifiers, notes, and account updates counts + - Block, chain, and proof commitments (shortened display with copy-to-clipboard) +- **Block Delta**: The difference between the explorer's block height and the RPC's chain tip. If the difference is greater than a tolerance, a warning is displayed. This check is performed in the frontend. + ### Remote Provers - **Service Health**: Individual remote prover availability and status - **Version Information**: Remote prover service version @@ -175,18 +190,19 @@ The monitor application provides real-time status monitoring for the following M - Transaction and note ID tracking from successful mints - Automated testing on a configurable interval to verify faucet functionality -### Counter Increment Service -- **Service Health**: End-to-end transaction submission for counter increment +### Local Transactions (Counter Increment) +- **Service Health**: End-to-end local transaction submission for counter increment - **Metrics**: - Success/Failure counts for increment transactions - Last TX ID with copy-to-clipboard + - Latency in blocks from submission to observed counter update (with pending measurement tracking) -### Counter Tracking Service -- **Service Health**: Real-time monitoring of counter value changes +### Network Transactions (Counter Tracking) +- **Service Health**: Real-time monitoring of on-chain counter value changes - **Metrics**: - Current network account counter value (queried from RPC periodically) - Expected counter value based on successful increments sent - - Pending increments: How many transactions are queued/unprocessed + - Pending notes: How many transactions are queued/unprocessed - Last updated timestamp ## User Interface @@ -201,6 +217,31 @@ The web dashboard provides a clean, responsive interface with the following feat - **Interactive Elements**: Copy-to-clipboard functionality for genesis commitments, transaction IDs, and note IDs - **Responsive Design**: Optimized for both desktop and mobile viewing +### gRPC-Web Browser Probe + +The dashboard automatically probes RPC and Remote Prover services every 30 seconds using gRPC-Web protocol. This tests whether the browser can successfully communicate with these services. + +**What it checks:** +- Browser connectivity to the service endpoint +- CORS configuration (the probe is a real cross-origin request from the browser) +- gRPC-Web protocol handling (proper framing and trailers) +- Basic service availability (calls the `Status` endpoint) + +**Results displayed:** +- **gRPC-Web: OK** / **gRPC-Web: FAILED** status +- Response latency in milliseconds +- Error details (if failed) +- Time since last probe + +**Common failure scenarios:** +- **CORS / Network error**: The service is not configured to accept cross-origin requests from the browser, or the service is unreachable +- **HTTP 4xx/5xx**: The service returned an HTTP error (check server logs) +- **grpc-status non-zero**: The gRPC call failed at the application level + +**Note:** The probe uses the same URLs configured for `--rpc-url` and `--remote-prover-urls`. For the probe to work from a browser, these services must: +1. Have gRPC-Web support enabled (e.g., via Envoy, grpc-web proxy, or native tonic-web) +2. Allow CORS requests from the monitor's origin (or use `Access-Control-Allow-Origin: *`) + ## Account Management When the network transaction service is enabled, the monitor manages the necessary Miden accounts: diff --git a/bin/network-monitor/assets/index.css b/bin/network-monitor/assets/index.css index b375f10e10..26213b7173 100644 --- a/bin/network-monitor/assets/index.css +++ b/bin/network-monitor/assets/index.css @@ -383,7 +383,7 @@ body { font-family: "DM Mono", monospace; } -.worker-address { +.worker-name { font-weight: 500; color: #333; } @@ -450,6 +450,24 @@ body { font-weight: 500; } +.metric-value.warning-delta, +.warning-text { + color: #ff8c00; +} + +.warning-text { + font-weight: 500; + font-size: 12px; +} + +.warning-banner { + margin-top: 8px; + padding: 8px 12px; + border-radius: 4px; + background: rgba(255, 85, 0, 0.08); + border-left: 3px solid #ff8c00; +} + .test-metrics.healthy .metric-value { color: #22C55D; } @@ -499,3 +517,100 @@ body { grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); } } + +/* gRPC-Web Probe Styles */ +.probe-section { + margin-top: 12px; + padding-top: 8px; + border-top: 1px dashed #e0e0e0; +} + +.probe-spinner { + width: 12px; + height: 12px; + border: 2px solid #ccc; + border-top-color: #666; + border-radius: 50%; + animation: spin 0.8s linear infinite; +} + +@keyframes spin { + to { transform: rotate(360deg); } +} + +.probe-result { + margin-top: 8px; + padding: 6px 10px; + border-radius: 4px; + font-size: 11px; + display: flex; + flex-wrap: wrap; + align-items: center; + gap: 8px; +} + +.probe-result.probe-ok { + background-color: rgba(34, 197, 93, 0.1); + border-left: 3px solid #22C55D; +} + +.probe-result.probe-failed { + background-color: rgba(255, 85, 0, 0.1); + border-left: 3px solid #ff5500; +} + +.probe-result.probe-pending { + background-color: rgba(150, 150, 150, 0.1); + border-left: 3px solid #999; +} + +.probe-pending .probe-status-badge { + color: #666; + text-transform: none; +} + +.probe-status-badge { + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.probe-ok .probe-status-badge { + color: #22C55D; +} + +.probe-failed .probe-status-badge { + color: #ff5500; +} + +.probe-latency { + font-family: "DM Mono", monospace; + color: #666; +} + +.probe-error { + color: #dc2626; + font-size: 10px; + max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.probe-time { + color: #999; + font-size: 10px; + margin-left: auto; +} + +@media (max-width: 768px) { + .probe-result { + flex-direction: column; + align-items: flex-start; + gap: 4px; + } + + .probe-time { + margin-left: 0; + } +} diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index ffa773abdc..9b66d6c18f 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -45,451 +45,6 @@ - + diff --git a/bin/network-monitor/assets/index.js b/bin/network-monitor/assets/index.js new file mode 100644 index 0000000000..049de239d1 --- /dev/null +++ b/bin/network-monitor/assets/index.js @@ -0,0 +1,798 @@ +// Miden Network Monitor - Frontend JavaScript +// ================================================================================================ + +let statusData = null; +let updateInterval = null; +const EXPLORER_LAG_TOLERANCE = 20; // max allowed block delta vs RPC, roughly 1 minute + +// Store gRPC-Web probe results keyed by service URL +const grpcWebProbeResults = new Map(); + +// gRPC-Web probe implementation +// ================================================================================================ + +/** + * Performs a gRPC-Web probe to the given URL and path. + * This sends a real browser-originated gRPC-Web request to test connectivity, + * CORS configuration, and gRPC-Web protocol handling. + * + * @param {string} baseUrl - The base URL of the service (e.g., "https://prover.example.com:443") + * @param {string} grpcPath - The gRPC method path (e.g., "/remote_prover.ProxyStatusApi/Status") + * @returns {Promise<{ok: boolean, latencyMs: number, error: string|null}>} + */ +async function probeGrpcWeb(baseUrl, grpcPath) { + const startTime = performance.now(); + + // Normalize URL: remove trailing slash from baseUrl + const normalizedUrl = baseUrl.replace(/\/+$/, ''); + const fullUrl = `${normalizedUrl}${grpcPath}`; + + // gRPC-Web frame for google.protobuf.Empty: + // - 1 byte compressed flag = 0x00 (not compressed) + // - 4 bytes big-endian length = 0x00000000 (empty message) + const emptyGrpcWebFrame = new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00]); + + // Build headers - RPC service requires custom Accept header + const headers = { + 'Content-Type': 'application/grpc-web+proto', + 'X-Grpc-Web': '1', + }; + + // The RPC service requires 'application/vnd.miden' in Accept header + // (this is the custom media type used by the Miden gRPC clients) + // The remote prover accepts standard gRPC-Web content types + if (grpcPath.startsWith('/rpc.')) { + headers['Accept'] = 'application/vnd.miden'; + } else { + headers['Accept'] = 'application/grpc-web+proto'; + } + + try { + const response = await fetch(fullUrl, { + method: 'POST', + headers, + body: emptyGrpcWebFrame, + }); + + const latencyMs = Math.round(performance.now() - startTime); + + if (!response.ok) { + return { + ok: false, + latencyMs, + error: `HTTP ${response.status}: ${response.statusText}`, + }; + } + + // Read the response body as bytes + const responseBytes = new Uint8Array(await response.arrayBuffer()); + + // Parse gRPC-Web response to extract grpc-status from trailers + const grpcStatus = parseGrpcWebTrailers(responseBytes); + + if (grpcStatus === '0' || grpcStatus === null) { + // grpc-status 0 means OK; null means no trailer found (might still be OK) + return { ok: true, latencyMs, error: null }; + } else { + return { + ok: false, + latencyMs, + error: `grpc-status: ${grpcStatus}`, + }; + } + } catch (err) { + const latencyMs = Math.round(performance.now() - startTime); + + // TypeError: Failed to fetch usually indicates CORS or network error + if (err instanceof TypeError) { + return { + ok: false, + latencyMs, + error: 'CORS / Network error: ' + err.message, + }; + } + + return { + ok: false, + latencyMs, + error: err.message || String(err), + }; + } +} + +/** + * Parses gRPC-Web response bytes to extract the grpc-status from trailers. + * gRPC-Web trailers are sent as a frame with flag 0x80. + * + * @param {Uint8Array} data - The response body bytes + * @returns {string|null} - The grpc-status value, or null if not found + */ +function parseGrpcWebTrailers(data) { + let offset = 0; + + while (offset + 5 <= data.length) { + const flag = data[offset]; + const length = (data[offset + 1] << 24) | + (data[offset + 2] << 16) | + (data[offset + 3] << 8) | + data[offset + 4]; + + offset += 5; + + if (offset + length > data.length) break; + + // Flag 0x80 indicates trailers + if (flag === 0x80) { + const trailerBytes = data.slice(offset, offset + length); + const trailerText = new TextDecoder().decode(trailerBytes); + + // Parse trailer headers (format: "key: value\r\n") + const lines = trailerText.split(/\r?\n/); + for (const line of lines) { + const match = line.match(/^grpc-status:\s*(\d+)/i); + if (match) { + return match[1]; + } + } + } + + offset += length; + } + + return null; +} + +// Interval for periodic gRPC-Web probing +let grpcWebProbeInterval = null; +const GRPC_WEB_PROBE_INTERVAL_MS = 30000; // Probe every 30 seconds + +/** + * Collects all gRPC-Web endpoints that need to be probed from the current status data. + * + * @returns {Array<{serviceKey: string, baseUrl: string, grpcPath: string}>} + */ +function collectGrpcWebEndpoints() { + if (!statusData || !statusData.services) return []; + + const endpoints = []; + + for (const service of statusData.services) { + if (service.details) { + // RPC service + if (service.details.RpcStatus && service.details.RpcStatus.url) { + endpoints.push({ + serviceKey: service.details.RpcStatus.url, + baseUrl: service.details.RpcStatus.url, + grpcPath: '/rpc.Api/Status', + }); + } + // Remote Prover service + if (service.details.RemoteProverStatus && service.details.RemoteProverStatus.url) { + endpoints.push({ + serviceKey: service.details.RemoteProverStatus.url, + baseUrl: service.details.RemoteProverStatus.url, + grpcPath: '/remote_prover.ProxyStatusApi/Status', + }); + } + } + } + + return endpoints; +} + +/** + * Runs gRPC-Web probes for all collected endpoints. + * Results are stored in grpcWebProbeResults and display is updated. + */ +async function runGrpcWebProbes() { + const endpoints = collectGrpcWebEndpoints(); + if (endpoints.length === 0) return; + + // Run all probes in parallel + const probePromises = endpoints.map(async ({ serviceKey, baseUrl, grpcPath }) => { + const result = await probeGrpcWeb(baseUrl, grpcPath); + grpcWebProbeResults.set(serviceKey, { + ...result, + timestamp: Date.now(), + }); + }); + + await Promise.all(probePromises); + + // Re-render to show updated results + updateDisplay(); +} + +/** + * Renders the probe result badge for a service. + * + * @param {string} serviceKey - Unique key for the service + * @returns {string} - HTML string for the probe result + */ +function renderProbeResult(serviceKey) { + const result = grpcWebProbeResults.get(serviceKey); + if (!result) return ''; + + const statusClass = result.ok ? 'probe-ok' : 'probe-failed'; + const statusText = result.ok ? 'OK' : 'FAILED'; + const seconds = Math.floor((Date.now() - result.timestamp) / 1000); + const timeAgo = seconds < 60 ? `${seconds}s ago` : seconds < 3600 ? `${Math.floor(seconds / 60)}m ago` : `${Math.floor(seconds / 3600)}h ago`; + const errorDisplay = result.error && result.error.length > 40 ? result.error.substring(0, 40) + '...' : result.error; + + return ` +
+ gRPC-Web: ${statusText} + ${result.latencyMs}ms + ${result.error ? `${errorDisplay}` : ''} + ${timeAgo} +
+ `; +} + +/** + * Renders the gRPC-Web probe result section for a service. + * Shows "Checking..." if no result yet, otherwise shows the probe result. + * + * @param {string} serviceKey - Unique key for the service (the URL) + * @returns {string} - HTML string for the probe result section + */ +function renderGrpcWebProbeSection(serviceKey) { + const result = grpcWebProbeResults.get(serviceKey); + + if (!result) { + return ` +
+
+ + gRPC-Web: Checking... +
+
+ `; + } + + return ` +
+ ${renderProbeResult(serviceKey)} +
+ `; +} + + +const COPY_ICON = ` + + + + +`; + +function renderCopyButton(value, label) { + if (!value) return ''; + const escapedValue = JSON.stringify(value); + return ` + + `; +} + +function formatSuccessRate(successCount, failureCount) { + const total = successCount + failureCount; + if (!total) { + return 'N/A'; + } + + return `${((successCount / total) * 100).toFixed(1)}%`; +} + +async function fetchStatus() { + try { + const response = await fetch('/status'); + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + statusData = await response.json(); + updateDisplay(); + } catch (error) { + console.error('Error fetching status:', error); + showError('Failed to fetch network status: ' + error.message); + } +} + +// Merge Remote Prover status and test entries into a single card per prover. +function mergeProverStatusAndTests(services) { + const testsByName = new Map(); + const merged = []; + const usedTests = new Set(); + + services.forEach(service => { + if (service.details && service.details.RemoteProverTest) { + testsByName.set(service.name, service); + } + }); + + services.forEach(service => { + if (service.details && service.details.RemoteProverStatus) { + const test = testsByName.get(service.name); + if (test) { + usedTests.add(service.name); + } + merged.push({ + ...service, + testDetails: test?.details?.RemoteProverTest ?? null, + testStatus: test?.status ?? null, + testError: test?.error ?? null + }); + } else if (!(service.details && service.details.RemoteProverTest)) { + // Non-prover entries pass through unchanged + merged.push(service); + } + }); + + // Add orphaned tests (in case a test arrives before a status) + testsByName.forEach((test, name) => { + if (!usedTests.has(name)) { + merged.push({ + name, + status: test.status, + last_checked: test.last_checked, + error: test.error, + details: null, + testDetails: test.details.RemoteProverTest, + testStatus: test.status, + testError: test.error + }); + } + }); + + return merged; +} + +function updateDisplay() { + if (!statusData) return; + + const container = document.getElementById('status-container'); + const lastUpdated = document.getElementById('last-updated'); + const overallStatus = document.getElementById('overall-status'); + const servicesCount = document.getElementById('services-count'); + + // Update last updated time + const lastUpdateTime = new Date(statusData.last_updated * 1000); + lastUpdated.textContent = lastUpdateTime.toLocaleString(); + + // Group remote prover status + test into single cards + const processedServices = mergeProverStatusAndTests(statusData.services); + const rpcService = processedServices.find(s => s.details && s.details.RpcStatus); + const rpcChainTip = + rpcService?.details?.RpcStatus?.store_status?.chain_tip ?? + rpcService?.details?.RpcStatus?.block_producer_status?.chain_tip ?? + null; + + // Count healthy vs unhealthy services + const healthyServices = processedServices.filter(s => s.status === 'Healthy').length; + const totalServices = processedServices.length; + const allHealthy = healthyServices === totalServices; + + // Update footer + overallStatus.textContent = allHealthy ? 'All Systems Operational' : `${healthyServices}/${totalServices} Services Healthy`; + overallStatus.style.color = allHealthy ? '#22C55D' : '#ff5500'; + servicesCount.textContent = `${totalServices} Services`; + + // Generate status cards + const serviceCardsHtml = processedServices.map(service => { + const isHealthy = service.status === 'Healthy'; + const statusColor = isHealthy ? '#22C55D' : '#ff5500'; + const statusIcon = isHealthy ? '✓' : '✗'; + const numOrDash = value => isHealthy ? (value?.toLocaleString?.() ?? value ?? '-') : '-'; + const timeOrDash = ts => { + if (!isHealthy) return '-'; + return ts ? new Date(ts * 1000).toLocaleString() : '-'; + }; + const commitmentOrDash = (value, label) => isHealthy && value + ? ` + ${value.substring(0, 20)}... + ${renderCopyButton(value, label)} + ` + : '-'; + + const explorerStats = service.details?.ExplorerStatus; + const isExplorerService = service.name?.toLowerCase().includes('explorer'); + const deltaBlock = (isHealthy && explorerStats && rpcChainTip !== null) + ? explorerStats.block_number - rpcChainTip + : null; + const deltaWarning = + deltaBlock !== null && Math.abs(deltaBlock) > EXPLORER_LAG_TOLERANCE + ? `Explorer tip is ${Math.abs(deltaBlock)} blocks ${deltaBlock > 0 ? 'ahead' : 'behind'}` + : null; + let explorerWarningHtml = ''; + + let detailsHtml = ''; + if (service.details) { + const details = service.details; + detailsHtml = ` +
+ ${details.RpcStatus ? ` +
Version: ${details.RpcStatus.version}
+ ${details.RpcStatus.genesis_commitment ? ` +
+ Genesis: + 0x${details.RpcStatus.genesis_commitment.substring(0, 20)}... + ${renderCopyButton(details.RpcStatus.genesis_commitment, 'genesis commitment')} +
+ ` : ''} + ${details.RpcStatus.url ? renderGrpcWebProbeSection(details.RpcStatus.url) : ''} + ${details.RpcStatus.store_status ? ` +
+
Store
+
+ Version: + ${details.RpcStatus.store_status.version} +
+
+ Status: + ${details.RpcStatus.store_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.store_status.chain_tip} +
+
+ ` : ''} + ${details.RpcStatus.block_producer_status ? ` +
+
Block Producer
+
+ Version: + ${details.RpcStatus.block_producer_status.version} +
+
+ Status: + ${details.RpcStatus.block_producer_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.block_producer_status.chain_tip} +
+
+ Mempool stats: +
+ Unbatched TXs: + ${details.RpcStatus.block_producer_status.mempool.unbatched_transactions} +
+
+ Proposed Batches: + ${details.RpcStatus.block_producer_status.mempool.proposed_batches} +
+
+ Proven Batches: + ${details.RpcStatus.block_producer_status.mempool.proven_batches} +
+
+
+ ` : ''} + ` : ''} + ${details.RemoteProverStatus ? ` +
+ Prover Status (${details.RemoteProverStatus.url}): +
Version: ${details.RemoteProverStatus.version}
+
+ Supported Proof Type: ${details.RemoteProverStatus.supported_proof_type} +
+ ${details.RemoteProverStatus.workers && details.RemoteProverStatus.workers.length > 0 ? ` +
+ Workers (${details.RemoteProverStatus.workers.length}): + ${details.RemoteProverStatus.workers.map(worker => ` +
+ ${worker.name} - + ${worker.version} - + ${worker.status} +
+ `).join('')} +
+ ` : ''} + ${renderGrpcWebProbeSection(details.RemoteProverStatus.url)} +
+ ` : ''} + ${details.FaucetTest ? ` +
+ Faucet: +
+
+ Success Rate: + ${formatSuccessRate(details.FaucetTest.success_count, details.FaucetTest.failure_count)} +
+
+ Last Response Time: + ${details.FaucetTest.test_duration_ms}ms +
+ ${details.FaucetTest.last_tx_id ? ` +
+ Last TX ID: + ${details.FaucetTest.last_tx_id.substring(0, 16)}...${renderCopyButton(details.FaucetTest.last_tx_id, 'TX ID')} +
+ ` : ''} + ${details.FaucetTest.challenge_difficulty ? ` +
+ Last Challenge Difficulty: + ~${details.FaucetTest.challenge_difficulty} bits +
+ ` : ''} +
+
+ ${details.FaucetTest.faucet_metadata ? ` +
+ Faucet Token Info: +
+
+ Token ID: + ${details.FaucetTest.faucet_metadata.id.substring(0, 16)}...${renderCopyButton(details.FaucetTest.faucet_metadata.id, 'token ID')} +
+
+ Version: + ${details.FaucetTest.faucet_metadata.version || '-'} +
+
+ Current Issuance: + ${details.FaucetTest.faucet_metadata.issuance.toLocaleString()} +
+
+ Max Supply: + ${details.FaucetTest.faucet_metadata.max_supply.toLocaleString()} +
+
+ Decimals: + ${details.FaucetTest.faucet_metadata.decimals} +
+
+ Base Amount: + ${details.FaucetTest.faucet_metadata.base_amount.toLocaleString()} +
+
+ PoW Difficulty: + ${details.FaucetTest.faucet_metadata.pow_load_difficulty} +
+ +
+
+ ` : ''} + ` : ''} + ${details.NtxIncrement ? ` +
+ Local Transactions: +
+
+ Success Rate: + ${formatSuccessRate(details.NtxIncrement.success_count, details.NtxIncrement.failure_count)} +
+ ${details.NtxIncrement.last_latency_blocks !== null && details.NtxIncrement.last_latency_blocks !== undefined ? ` +
+ Latency: + ${details.NtxIncrement.last_latency_blocks} blocks +
+ ` : ''} + ${details.NtxIncrement.last_tx_id ? ` +
+ Last TX ID: + ${details.NtxIncrement.last_tx_id.substring(0, 16)}...${renderCopyButton(details.NtxIncrement.last_tx_id, 'TX ID')} +
+ ` : ''} +
+
+ ` : ''} + ${details.NtxTracking ? ` +
+ Network Transactions: +
+
+ Current Value: + ${details.NtxTracking.current_value ?? '-'} +
+ ${details.NtxTracking.expected_value ? ` +
+ Expected Value: + ${details.NtxTracking.expected_value} +
+ ` : ''} + ${details.NtxTracking.pending_increments !== null && details.NtxTracking.pending_increments !== undefined ? ` +
+ Pending Notes: + ${details.NtxTracking.pending_increments} +
+ ` : ''} + ${details.NtxTracking.last_updated ? ` +
+ Last Updated: + ${new Date(details.NtxTracking.last_updated * 1000).toLocaleString()} +
+ ` : ''} +
+
+ ` : ''} + ${service.testDetails ? ` +
+ Proof Generation Testing (${service.testDetails.proof_type}): +
+
+ Success Rate: + ${formatSuccessRate(service.testDetails.success_count, service.testDetails.failure_count)} +
+
+ Last Response Time: + ${service.testDetails.test_duration_ms}ms +
+
+ Last Proof Size: + ${(service.testDetails.proof_size_bytes / 1024).toFixed(2)} KB +
+
+
+ ` : ''} +
+ `; + } + + // Always render explorer block for explorer services, even if stats are missing. + if (isExplorerService) { + detailsHtml += ` +
+
+ Explorer: +
+ Block Height: + ${explorerStats ? numOrDash(explorerStats.block_number) : '-'} +
+
+ RPC Chain Tip: + ${isHealthy && rpcChainTip !== null ? rpcChainTip : '-'} +
+
+ Block Time: + ${explorerStats ? timeOrDash(explorerStats.timestamp) : '-'} +
+
+ Block Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.block_commitment, 'block commitment') : '-'} +
+
+ Chain Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.chain_commitment, 'chain commitment') : '-'} +
+
+ Proof Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.proof_commitment, 'proof commitment') : '-'} +
+
+ Transactions: + ${explorerStats ? numOrDash(explorerStats.number_of_transactions) : '-'} +
+
+ Nullifiers: + ${explorerStats ? numOrDash(explorerStats.number_of_nullifiers) : '-'} +
+
+ Notes: + ${explorerStats ? numOrDash(explorerStats.number_of_notes) : '-'} +
+
+ Account Updates: + ${explorerStats ? numOrDash(explorerStats.number_of_account_updates) : '-'} +
+
+
+ `; + + if (deltaWarning) { + explorerWarningHtml = ` +
+
+ Explorer vs RPC +
+
${deltaWarning}
+
+ `; + } + } + + return ` +
+
+
${service.name}
+
+ ${statusIcon} ${service.status.toUpperCase()} +
+
+
+ ${detailsHtml} + ${explorerWarningHtml} +
+
+ Last checked: ${new Date(service.last_checked * 1000).toLocaleString()} +
+
+ `; + }).join(''); + + container.innerHTML = serviceCardsHtml; + + // Add refresh button that spans the full grid + container.innerHTML += ` +
+ +
+ `; +} + +function showError(message) { + const container = document.getElementById('status-container'); + container.innerHTML = ` +
+ ${message} +
+
+ +
+ `; +} + +async function copyToClipboard(text, event) { + const button = event.target.closest('.copy-button'); + if (!button) return; + + try { + await navigator.clipboard.writeText(text); + // Show a brief success indicator + const originalContent = button.innerHTML; + button.innerHTML = ''; + button.style.color = '#22C55D'; + + setTimeout(() => { + button.innerHTML = originalContent; + button.style.color = ''; + }, 2000); + } catch (err) { + console.error('Failed to copy to clipboard:', err); + // Show error feedback on button + button.style.color = '#ff5500'; + setTimeout(() => { + button.style.color = ''; + }, 2000); + } +} + +// Initialize on DOM ready +document.addEventListener('DOMContentLoaded', () => { + // Initial load and set up auto-refresh + fetchStatus().then(() => { + // Start gRPC-Web probing after initial status fetch + runGrpcWebProbes(); + grpcWebProbeInterval = setInterval(runGrpcWebProbes, GRPC_WEB_PROBE_INTERVAL_MS); + }); + updateInterval = setInterval(fetchStatus, 10000); // Refresh every 10 seconds +}); + +// Clean up on page unload +window.addEventListener('beforeunload', () => { + if (updateInterval) { + clearInterval(updateInterval); + } + if (grpcWebProbeInterval) { + clearInterval(grpcWebProbeInterval); + } +}); + diff --git a/bin/network-monitor/src/assets/counter_program.masm b/bin/network-monitor/src/assets/counter_program.masm index 60cd146bad..9cd6536f45 100644 --- a/bin/network-monitor/src/assets/counter_program.masm +++ b/bin/network-monitor/src/assets/counter_program.masm @@ -1,21 +1,24 @@ # Counter program for network monitoring with note authentication # Storage layout: -# - Slot 0: counter value (u64) -# - Slot 1: authorized wallet account id as [prefix, suffix, 0, 0] +# - OWNER_SLOT: authorized wallet account id as [prefix, suffix, 0, 0] +# - COUNTER_SLOT: counter value (u64) -use.miden::active_account -use.miden::native_account -use.miden::active_note -use.miden::account_id -use.miden::tx +use miden::core::sys +use miden::protocol::active_account +use miden::protocol::native_account +use miden::protocol::active_note +use miden::protocol::account_id +use miden::protocol::tx -use.std::sys + +const COUNTER_SLOT = word("miden::monitor::counter_contract::counter") +const OWNER_SLOT = word("miden::monitor::counter_contract::owner") # Increment function with note authentication # => [] -export.increment - # Ensure the note sender matches the authorized wallet stored in slot 1. - push.1 exec.active_account::get_item +pub proc increment + # Ensure the note sender matches the authorized wallet. + push.OWNER_SLOT[0..2] exec.active_account::get_item # => [owner_prefix, owner_suffix, 0, 0] exec.active_note::get_sender @@ -27,13 +30,13 @@ export.increment assert.err="Note sender not authorized" drop drop # => [] - push.0 exec.active_account::get_item + push.COUNTER_SLOT[0..2] exec.active_account::get_item # => [count, 0, 0, 0] - + push.1 add # => [count+1] - push.0 exec.native_account::set_item + push.COUNTER_SLOT[0..2] exec.native_account::set_item # => [count, 0, 0, 0] dropw @@ -42,8 +45,8 @@ end # Get the counter (no auth required) # => [count] -export.get_count - push.0 exec.active_account::get_item +pub proc get_count + push.COUNTER_SLOT[0..2] exec.active_account::get_item # => [count, 0, 0, 0] exec.sys::truncate_stack diff --git a/bin/network-monitor/src/assets/increment_counter.masm b/bin/network-monitor/src/assets/increment_counter.masm index 76c4bdcb6f..4a835bdd8b 100644 --- a/bin/network-monitor/src/assets/increment_counter.masm +++ b/bin/network-monitor/src/assets/increment_counter.masm @@ -2,7 +2,7 @@ # This script is executed as a note and calls the # `counter_contract::increment` entrypoint. -use.external_contract::counter_contract +use external_contract::counter_contract begin call.counter_contract::increment diff --git a/bin/network-monitor/src/commands/start.rs b/bin/network-monitor/src/commands/start.rs index 3f1cbca6b2..4262db445a 100644 --- a/bin/network-monitor/src/commands/start.rs +++ b/bin/network-monitor/src/commands/start.rs @@ -4,7 +4,7 @@ use anyhow::Result; use miden_node_utils::logging::OpenTelemetry; -use tracing::{info, instrument, warn}; +use tracing::{debug, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; @@ -15,7 +15,16 @@ use crate::monitor::tasks::Tasks; /// /// This function initializes all monitoring tasks including RPC status checking, /// remote prover testing, faucet testing, and the web frontend. -#[instrument(target = COMPONENT, name = "start-monitor", skip_all, fields(port = %config.port))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.start_monitor", + skip_all, + level = "info", + fields(port = %config.port), + ret(level = "debug"), + err +)] pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Load configuration from command-line arguments and environment variables info!("Loaded configuration: {:?}", config); @@ -29,17 +38,28 @@ pub async fn start_monitor(config: MonitorConfig) -> Result<()> { let mut tasks = Tasks::new(); // Initialize the RPC Status endpoint checker task. + debug!(target: COMPONENT, "Initializing RPC status checker"); let rpc_rx = tasks.spawn_rpc_checker(&config).await?; + // Initialize the explorer status checker task. + let explorer_rx = if config.explorer_url.is_some() { + Some(tasks.spawn_explorer_checker(&config).await?) + } else { + None + }; + // Initialize the prover checkers & tests tasks, only if URLs were provided. let prover_rxs = if config.remote_prover_urls.is_empty() { + debug!(target: COMPONENT, "No remote prover URLs configured, skipping prover tasks"); Vec::new() } else { + debug!(target: COMPONENT, "Initializing prover checkers and tests"); tasks.spawn_prover_tasks(&config).await? }; // Initialize the faucet testing task. let faucet_rx = if config.faucet_url.is_some() { + debug!(target: COMPONENT, "Initializing faucet testing task"); Some(tasks.spawn_faucet(&config)) } else { warn!("Faucet URL not configured, skipping faucet testing"); @@ -48,19 +68,23 @@ pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Initialize the counter increment and tracking tasks only if enabled. let (ntx_increment_rx, ntx_tracking_rx) = if config.disable_ntx_service { + debug!(target: COMPONENT, "NTX service disabled, skipping counter increment task"); (None, None) } else { + debug!(target: COMPONENT, "Initializing counter increment task"); let (increment_rx, tracking_rx) = tasks.spawn_ntx_service(&config).await?; (Some(increment_rx), Some(tracking_rx)) }; // Initialize HTTP server. + debug!(target: COMPONENT, "Initializing HTTP server"); let server_state = ServerState { rpc: rpc_rx, provers: prover_rxs, faucet: faucet_rx, ntx_increment: ntx_increment_rx, ntx_tracking: ntx_tracking_rx, + explorer: explorer_rx, }; tasks.spawn_http_server(server_state, &config); diff --git a/bin/network-monitor/src/config.rs b/bin/network-monitor/src/config.rs index fa2af59e25..7443b759f6 100644 --- a/bin/network-monitor/src/config.rs +++ b/bin/network-monitor/src/config.rs @@ -138,6 +138,16 @@ pub struct MonitorConfig { )] pub counter_increment_interval: Duration, + /// Maximum time to wait for the counter update after submitting a transaction. + #[arg( + long = "counter-latency-timeout", + env = "MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT", + default_value = "2m", + value_parser = humantime::parse_duration, + help = "Maximum time to wait for a counter update after submitting a transaction" + )] + pub counter_latency_timeout: Duration, + /// The timeout for the outgoing requests. #[arg( long = "request-timeout", @@ -147,4 +157,25 @@ pub struct MonitorConfig { help = "The timeout for the outgoing requests" )] pub request_timeout: Duration, + + /// The URL of the explorer service. + #[arg( + long = "explorer-url", + env = "MIDEN_MONITOR_EXPLORER_URL", + help = "The URL of the explorer service" + )] + pub explorer_url: Option, + + /// Maximum time without a chain tip update before marking RPC as unhealthy. + /// + /// If the chain tip does not increment within this duration, the RPC service will be + /// marked as unhealthy with a stale chain tip error. + #[arg( + long = "stale-chain-tip-threshold", + env = "MIDEN_MONITOR_STALE_CHAIN_TIP_THRESHOLD", + default_value = "1m", + value_parser = humantime::parse_duration, + help = "Maximum time without a chain tip update before marking RPC as unhealthy" + )] + pub stale_chain_tip_threshold: Duration, } diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 7bf16618bb..c044267331 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -6,22 +6,21 @@ use std::path::Path; use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::Instant; use anyhow::{Context, Result}; -use miden_lib::AuthScheme; -use miden_lib::account::interface::AccountInterface; -use miden_lib::utils::ScriptBuilder; -use miden_node_proto::clients::{Builder, Rpc, RpcClient}; -use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; +use miden_node_proto::clients::RpcClient; +use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; -use miden_objects::account::auth::AuthSecretKey; -use miden_objects::account::{Account, AccountFile, AccountHeader, AccountId}; -use miden_objects::assembly::Library; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; -use miden_objects::note::{ +use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::{Account, AccountFile, AccountHeader, AccountId}; +use miden_protocol::assembly::Library; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; +use miden_protocol::note::{ Note, NoteAssets, + NoteAttachment, NoteExecutionHint, NoteInputs, NoteMetadata, @@ -30,38 +29,38 @@ use miden_objects::note::{ NoteTag, NoteType, }; -use miden_objects::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; -use miden_objects::utils::Deserializable; -use miden_objects::{Felt, Word, ZERO}; +use miden_protocol::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; +use miden_protocol::utils::Deserializable; +use miden_protocol::{Felt, Word}; +use miden_standards::account::interface::{AccountInterface, AccountInterfaceExt}; +use miden_standards::code_builder::CodeBuilder; +use miden_standards::note::NetworkAccountTarget; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{LocalTransactionProver, TransactionExecutor}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; -use tokio::sync::watch; +use tokio::sync::{Mutex, watch}; use tracing::{error, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; -use crate::deploy::{MonitorDataStore, get_counter_library}; +use crate::deploy::counter::COUNTER_SLOT_NAME; +use crate::deploy::{MonitorDataStore, create_genesis_aware_rpc_client, get_counter_library}; use crate::status::{ CounterTrackingDetails, IncrementDetails, + PendingLatencyDetails, ServiceDetails, ServiceStatus, Status, }; -async fn create_rpc_client(config: &MonitorConfig) -> Result { - Builder::new(config.rpc_url.clone()) - .with_tls() - .context("Failed to configure TLS for RPC client") - .expect("TLS is enabled") - .with_timeout(config.request_timeout) - .without_metadata_version() - .without_metadata_genesis() - .connect::() - .await +#[derive(Debug, Default, Clone)] +pub struct LatencyState { + pending: Option, + pending_started: Option, + last_latency_blocks: Option, } /// Get the genesis block header. @@ -87,51 +86,220 @@ async fn get_genesis_block_header(rpc_client: &mut RpcClient) -> Result Result> { + let request = build_account_request(account_id, false); + let resp = rpc_client.get_account(request).await?.into_inner(); + + let Some(details) = resp.details else { + return Ok(None); + }; + + let storage_details = details.storage_details.context("missing storage details")?; + let storage_header = storage_details.header.context("missing storage header")?; + + Ok(Some(storage_header)) +} + /// Fetch the latest nonce of the given account from RPC. async fn fetch_counter_value( rpc_client: &mut RpcClient, account_id: AccountId, ) -> Result> { - let id_bytes: [u8; 15] = account_id.into(); - let req = miden_node_proto::generated::account::AccountId { id: id_bytes.to_vec() }; - let resp = rpc_client.get_account_details(req).await?.into_inner(); - if let Some(raw) = resp.details { - let account = Account::read_from_bytes(&raw) - .map_err(|e| anyhow::anyhow!("failed to deserialize account details: {e}"))?; + let Some(storage_header) = fetch_account_storage_header(rpc_client, account_id).await? else { + return Ok(None); + }; + + let counter_slot = storage_header + .slots + .iter() + .find(|slot| slot.slot_name == COUNTER_SLOT_NAME.as_str()) + .context(format!("counter slot '{}' not found", COUNTER_SLOT_NAME.as_str()))?; - let storage_slot = account.storage().slots().first().expect("storage slot is always value"); - let word = storage_slot.value(); - let value = word.as_elements().last().expect("a word is always 4 elements").as_int(); + // The counter value is stored as a Word, with the actual u64 value in the last element + let slot_value: Word = counter_slot + .commitment + .as_ref() + .context("missing storage slot value")? + .try_into() + .context("failed to convert slot value to word")?; - Ok(Some(value)) + let value = slot_value.as_elements().last().expect("Word has 4 elements").as_int(); + + Ok(Some(value)) +} + +/// Build an account request for the given account ID. +/// +/// If `include_code_and_vault` is true, uses dummy commitments to force the server +/// to return code and vault data (server only returns data when our commitment differs). +fn build_account_request( + account_id: AccountId, + include_code_and_vault: bool, +) -> miden_node_proto::generated::rpc::AccountRequest { + let id_bytes: [u8; 15] = account_id.into(); + let account_id_proto = + miden_node_proto::generated::account::AccountId { id: id_bytes.to_vec() }; + + let (code_commitment, asset_vault_commitment) = if include_code_and_vault { + let dummy: miden_node_proto::generated::primitives::Digest = Word::default().into(); + (Some(dummy), Some(dummy)) } else { - Ok(None) + (None, None) + }; + + miden_node_proto::generated::rpc::AccountRequest { + account_id: Some(account_id_proto), + block_num: None, + details: Some(miden_node_proto::generated::rpc::account_request::AccountDetailRequest { + code_commitment, + asset_vault_commitment, + storage_maps: vec![], + }), } } +/// Fetch an account from RPC and reconstruct the full Account. +/// +/// Uses dummy commitments to force the server to return all data (code, vault, storage header). +/// Only supports accounts with value slots; returns an error if storage maps are present. async fn fetch_wallet_account( rpc_client: &mut RpcClient, account_id: AccountId, ) -> Result> { - let id_bytes: [u8; 15] = account_id.into(); - let req = miden_node_proto::generated::account::AccountId { id: id_bytes.to_vec() }; - let resp = rpc_client.get_account_details(req).await; + use miden_protocol::account::AccountCode; + use miden_protocol::asset::AssetVault; - // If the RPC call fails, return None - if resp.is_err() { - return Ok(None); - } + let request = build_account_request(account_id, true); - let Some(account_details) = resp.expect("Previously checked for error").into_inner().details - else { + let response = match rpc_client.get_account(request).await { + Ok(response) => response.into_inner(), + Err(e) => { + warn!(account.id = %account_id, err = %e, "failed to fetch wallet account via RPC"); + return Ok(None); + }, + }; + + let Some(details) = response.details else { + if response.witness.is_some() { + info!( + account.id = %account_id, + "account found on-chain but cannot reconstruct full account from RPC response" + ); + } return Ok(None); }; - let account = Account::read_from_bytes(&account_details) - .map_err(|e| anyhow::anyhow!("failed to deserialize account details: {e}"))?; + let header = details.header.context("missing account header")?; + let nonce: u64 = header.nonce; + + let code = details + .code + .map(|code_bytes| AccountCode::read_from_bytes(&code_bytes)) + .transpose() + .context("failed to deserialize account code")? + .context("server did not return account code")?; + + let vault = match details.vault_details { + Some(vault_details) if vault_details.too_many_assets => { + anyhow::bail!("account {account_id} has too many assets, cannot fetch full account"); + }, + Some(vault_details) => { + let assets: Vec = vault_details + .assets + .into_iter() + .map(TryInto::try_into) + .collect::>() + .context("failed to convert assets")?; + AssetVault::new(&assets).context("failed to create vault")? + }, + None => anyhow::bail!("server did not return asset vault for account {account_id}"), + }; + + let storage_details = details.storage_details.context("missing storage details")?; + let storage = build_account_storage(storage_details)?; + + let account = Account::new(account_id, vault, storage, code, Felt::new(nonce), None) + .context("failed to create account")?; + + // Sanity check: verify reconstructed account matches header commitments + let expected_code_commitment: Word = header + .code_commitment + .context("missing code commitment in header")? + .try_into() + .context("invalid code commitment")?; + let expected_vault_root: Word = header + .vault_root + .context("missing vault root in header")? + .try_into() + .context("invalid vault root")?; + let expected_storage_commitment: Word = header + .storage_commitment + .context("missing storage commitment in header")? + .try_into() + .context("invalid storage commitment")?; + + anyhow::ensure!( + account.code().commitment() == expected_code_commitment, + "code commitment mismatch: rebuilt={:?}, expected={:?}", + account.code().commitment(), + expected_code_commitment + ); + anyhow::ensure!( + account.vault().root() == expected_vault_root, + "vault root mismatch: rebuilt={:?}, expected={:?}", + account.vault().root(), + expected_vault_root + ); + anyhow::ensure!( + account.storage().to_commitment() == expected_storage_commitment, + "storage commitment mismatch: rebuilt={:?}, expected={:?}", + account.storage().to_commitment(), + expected_storage_commitment + ); + + info!(account.id = %account_id, "fetched wallet account from RPC"); Ok(Some(account)) } +/// Build account storage from the storage details returned by the server. +/// +/// This function only supports accounts with value slots. If any storage map slots +/// are encountered, an error is returned since the monitor only uses simple accounts. +fn build_account_storage( + storage_details: miden_node_proto::generated::rpc::AccountStorageDetails, +) -> Result { + use miden_protocol::account::{AccountStorage, StorageSlot}; + + let storage_header = storage_details.header.context("missing storage header")?; + + let mut slots = Vec::new(); + for slot in storage_header.slots { + let slot_name = miden_protocol::account::StorageSlotName::new(slot.slot_name.clone()) + .context("invalid slot name")?; + let value: Word = slot + .commitment + .context("missing slot value")? + .try_into() + .context("invalid slot value")?; + + // slot_type: 0 = Value, 1 = Map + anyhow::ensure!( + slot.slot_type == 0, + "storage map slots are not supported for this account" + ); + + slots.push(StorageSlot::with_value(slot_name, value)); + } + + AccountStorage::new(slots).context("failed to create account storage") +} + async fn setup_increment_task( config: MonitorConfig, rpc_client: &mut RpcClient, @@ -152,7 +320,7 @@ async fn setup_increment_task( .await? .unwrap_or(wallet_account_file.account.clone()); - let AuthSecretKey::RpoFalcon512(secret_key) = wallet_account_file + let AuthSecretKey::Falcon512Rpo(secret_key) = wallet_account_file .auth_secret_keys .first() .expect("wallet account file should have one auth secret key") @@ -207,14 +375,15 @@ async fn setup_increment_task( /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument(target = COMPONENT, name = "run-increment-task", skip_all, ret(level = "debug"))] pub async fn run_increment_task( config: MonitorConfig, tx: watch::Sender, expected_counter_value: Arc, + latency_state: Arc>, ) -> Result<()> { // Create RPC client - let mut rpc_client = create_rpc_client(&config).await?; + let mut rpc_client = + create_genesis_aware_rpc_client(&config.rpc_url, config.request_timeout).await?; let ( mut details, @@ -232,7 +401,9 @@ pub async fn run_increment_task( loop { interval.tick().await; - let last_error = match create_and_submit_network_note( + let mut last_error = None; + + match create_and_submit_network_note( &wallet_account, &counter_account, &secret_key, @@ -244,16 +415,34 @@ pub async fn run_increment_task( ) .await { - Ok((tx_id, final_account, _block_height)) => handle_increment_success( - &mut wallet_account, - &final_account, - &mut data_store, - &mut details, - tx_id, - &expected_counter_value, - )?, - Err(e) => Some(handle_increment_failure(&mut details, &e)), - }; + Ok((tx_id, final_account, block_height)) => { + let target_value = handle_increment_success( + &mut wallet_account, + &final_account, + &mut data_store, + &mut details, + tx_id, + &expected_counter_value, + )?; + + { + let mut guard = latency_state.lock().await; + guard.pending = Some(PendingLatencyDetails { + submit_height: block_height.as_u32(), + target_value, + }); + guard.pending_started = Some(Instant::now()); + } + }, + Err(e) => { + last_error = Some(handle_increment_failure(&mut details, &e)); + }, + } + + { + let guard = latency_state.lock().await; + details.last_latency_blocks = guard.last_latency_blocks; + } let status = build_increment_status(&details, last_error); send_status(&tx, status)?; @@ -261,6 +450,8 @@ pub async fn run_increment_task( } /// Handle the success path for increment operations. +/// +/// Returns the next expected counter value after a successful increment. fn handle_increment_success( wallet_account: &mut Account, final_account: &AccountHeader, @@ -268,7 +459,7 @@ fn handle_increment_success( details: &mut IncrementDetails, tx_id: String, expected_counter_value: &Arc, -) -> Result> { +) -> Result { let updated_wallet = Account::new( wallet_account.id(), wallet_account.vault().clone(), @@ -284,9 +475,9 @@ fn handle_increment_success( details.last_tx_id = Some(tx_id); // Increment the expected counter value - expected_counter_value.fetch_add(1, Ordering::Relaxed); + let new_expected = expected_counter_value.fetch_add(1, Ordering::Relaxed) + 1; - Ok(None) + Ok(new_expected) } /// Handle the failure path when creating/submitting the network note fails. @@ -298,7 +489,11 @@ fn handle_increment_failure(details: &mut IncrementDetails, error: &anyhow::Erro /// Build a `ServiceStatus` snapshot from the current increment details and last error. fn build_increment_status(details: &IncrementDetails, last_error: Option) -> ServiceStatus { - let status = if details.failure_count == 0 { + let status = if last_error.is_some() { + // If the most recent attempt failed, surface the service as unhealthy so the + // dashboard reflects that the increment pipeline is not currently working. + Status::Unhealthy + } else if details.failure_count == 0 { Status::Healthy } else if details.success_count == 0 { Status::Unhealthy @@ -307,7 +502,7 @@ fn build_increment_status(details: &IncrementDetails, last_error: Option }; ServiceStatus { - name: "Counter Increment".to_string(), + name: "Local Transactions".to_string(), status, last_checked: crate::monitor::tasks::current_unix_timestamp_secs(), error: last_error, @@ -339,14 +534,15 @@ fn send_status(tx: &watch::Sender, status: ServiceStatus) -> Resu /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument(target = COMPONENT, name = "run-counter-tracking-task", skip_all, ret(level = "debug"))] pub async fn run_counter_tracking_task( config: MonitorConfig, tx: watch::Sender, expected_counter_value: Arc, + latency_state: Arc>, ) -> Result<()> { // Create RPC client - let mut rpc_client = create_rpc_client(&config).await?; + let mut rpc_client = + create_genesis_aware_rpc_client(&config.rpc_url, config.request_timeout).await?; // Load counter account to get the account ID let counter_account = match load_counter_account(&config.counter_filepath) { @@ -358,11 +554,45 @@ pub async fn run_counter_tracking_task( }; let mut details = CounterTrackingDetails::default(); + initialize_counter_tracking_state( + &mut rpc_client, + &counter_account, + &expected_counter_value, + &mut details, + ) + .await; + + let mut poll_interval = tokio::time::interval(config.counter_increment_interval / 2); + + loop { + poll_interval.tick().await; - // Initialize the expected counter value by fetching the current value from the node - match fetch_counter_value(&mut rpc_client, counter_account.id()).await { + let last_error = poll_counter_once( + &mut rpc_client, + &counter_account, + &expected_counter_value, + &latency_state, + &mut details, + &config, + ) + .await; + let status = build_tracking_status(&details, last_error); + send_status(&tx, status)?; + } +} + +/// Initialize tracking state by fetching the current counter value from the node. +/// +/// Populates `expected_counter_value` and seeds `details` with the latest observed +/// values so the first poll iteration starts from a consistent snapshot. +async fn initialize_counter_tracking_state( + rpc_client: &mut RpcClient, + counter_account: &Account, + expected_counter_value: &Arc, + details: &mut CounterTrackingDetails, +) { + match fetch_counter_value(rpc_client, counter_account.id()).await { Ok(Some(initial_value)) => { - // Set the expected value to the current value from the node expected_counter_value.store(initial_value, Ordering::Relaxed); details.current_value = Some(initial_value); details.expected_value = Some(initial_value); @@ -370,61 +600,121 @@ pub async fn run_counter_tracking_task( info!("Initialized counter tracking with value: {}", initial_value); }, Ok(None) => { - // Counter doesn't exist yet, initialize to 0 expected_counter_value.store(0, Ordering::Relaxed); warn!("Counter account not found, initializing expected value to 0"); }, Err(e) => { - // Failed to fetch, initialize to 0 but log the error expected_counter_value.store(0, Ordering::Relaxed); error!("Failed to fetch initial counter value, initializing to 0: {:?}", e); }, } +} - let mut poll_interval = tokio::time::interval(config.counter_increment_interval / 2); +/// Poll the counter once, updating details and latency tracking state. +/// +/// Returns a human-readable error string when the poll fails or latency tracking +/// cannot complete; otherwise returns `None`. +async fn poll_counter_once( + rpc_client: &mut RpcClient, + counter_account: &Account, + expected_counter_value: &Arc, + latency_state: &Arc>, + details: &mut CounterTrackingDetails, + config: &MonitorConfig, +) -> Option { + let mut last_error = None; + let current_time = crate::monitor::tasks::current_unix_timestamp_secs(); + + match fetch_counter_value(rpc_client, counter_account.id()).await { + Ok(Some(value)) => { + details.current_value = Some(value); + details.last_updated = Some(current_time); + + update_expected_and_pending(details, expected_counter_value, value); + handle_latency_tracking(rpc_client, latency_state, config, value, &mut last_error) + .await; + }, + Ok(None) => { + // Counter value not available, but not an error + }, + Err(e) => { + error!("Failed to fetch counter value: {:?}", e); + last_error = Some(format!("fetch counter value failed: {e}")); + }, + } - loop { - poll_interval.tick().await; + last_error +} - let current_time = crate::monitor::tasks::current_unix_timestamp_secs(); - let last_error = match fetch_counter_value(&mut rpc_client, counter_account.id()).await { - Ok(Some(value)) => { - // Update current value and timestamp - details.current_value = Some(value); - details.last_updated = Some(current_time); - - // Get expected value and calculate pending increments - let expected = expected_counter_value.load(Ordering::Relaxed); - details.expected_value = Some(expected); - - // Calculate how many increments are pending (expected - current) - // Use saturating_sub to avoid negative values if current > expected (shouldn't - // happen normally, but could due to race conditions) - if expected >= value { - details.pending_increments = Some(expected - value); - } else { - // This shouldn't happen, but log it if it does - warn!( - "Expected counter value ({}) is less than current value ({}), setting pending to 0", - expected, value - ); - details.pending_increments = Some(0); - } +/// Update expected and pending counters based on the latest observed value. +fn update_expected_and_pending( + details: &mut CounterTrackingDetails, + expected_counter_value: &Arc, + observed_value: u64, +) { + let expected = expected_counter_value.load(Ordering::Relaxed); + details.expected_value = Some(expected); - None - }, - Ok(None) => { - // Counter value not available, but not an error - None - }, - Err(e) => { - error!("Failed to fetch counter value: {:?}", e); - Some(format!("fetch counter value failed: {e}")) - }, - }; + if expected >= observed_value { + details.pending_increments = Some(expected - observed_value); + } else { + warn!( + "Expected counter value ({}) is less than current value ({}), setting pending to 0", + expected, observed_value + ); + details.pending_increments = Some(0); + } +} - let status = build_tracking_status(&details, last_error); - send_status(&tx, status)?; +/// Update latency tracking state, performing RPC as needed while minimizing lock hold time. +/// +/// Populates `last_error` when latency bookkeeping fails or times out. +async fn handle_latency_tracking( + rpc_client: &mut RpcClient, + latency_state: &Arc>, + config: &MonitorConfig, + observed_value: u64, + last_error: &mut Option, +) { + let (pending, pending_started) = { + let guard = latency_state.lock().await; + (guard.pending.clone(), guard.pending_started) + }; + + if let Some(pending) = pending { + if observed_value >= pending.target_value { + match fetch_chain_tip(rpc_client).await { + Ok(observed_height) => { + let latency_blocks = observed_height.saturating_sub(pending.submit_height); + let mut guard = latency_state.lock().await; + if guard.pending.as_ref().map(|p| p.target_value) == Some(pending.target_value) + { + guard.last_latency_blocks = Some(latency_blocks); + guard.pending = None; + guard.pending_started = None; + } + }, + Err(e) => { + *last_error = Some(format!("Failed to fetch chain tip for latency calc: {e}")); + }, + } + } else if let Some(started) = pending_started { + if Instant::now().saturating_duration_since(started) >= config.counter_latency_timeout { + warn!( + "Latency measurement timed out after {:?} for target value {}", + config.counter_latency_timeout, pending.target_value + ); + let mut guard = latency_state.lock().await; + if guard.pending.as_ref().map(|p| p.target_value) == Some(pending.target_value) { + guard.pending = None; + guard.pending_started = None; + } + *last_error = Some(format!( + "Timed out after {:?} waiting for counter to reach {}", + config.counter_latency_timeout, pending.target_value + )); + } + } } } @@ -433,14 +723,18 @@ fn build_tracking_status( details: &CounterTrackingDetails, last_error: Option, ) -> ServiceStatus { - let status = if details.current_value.is_some() { + let status = if last_error.is_some() { + // If the latest poll failed, surface the service as unhealthy even if we have + // a previously cached value, so the dashboard shows that tracking is degraded. + Status::Unhealthy + } else if details.current_value.is_some() { Status::Healthy } else { Status::Unknown }; ServiceStatus { - name: "Counter Tracking".to_string(), + name: "Network Transactions".to_string(), status, last_checked: crate::monitor::tasks::current_unix_timestamp_secs(), error: last_error, @@ -458,7 +752,15 @@ fn load_counter_account(file_path: &Path) -> Result { /// Create and submit a network note that targets the counter account. #[allow(clippy::too_many_arguments)] -#[instrument(target = COMPONENT, name = "create-and-submit-network-note", skip_all, ret)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.create_and_submit_network_note", + skip_all, + level = "info", + ret(level = "debug"), + err +)] async fn create_and_submit_network_note( wallet_account: &Account, counter_account: &Account, @@ -470,17 +772,13 @@ async fn create_and_submit_network_note( rng: &mut ChaCha20Rng, ) -> Result<(String, AccountHeader, BlockNumber)> { // Create authenticator for transaction signing - let authenticator = BasicAuthenticator::new(&[AuthSecretKey::RpoFalcon512(secret_key.clone())]); + let authenticator = BasicAuthenticator::new(&[AuthSecretKey::Falcon512Rpo(secret_key.clone())]); - let account_interface = AccountInterface::new( - wallet_account.id(), - vec![AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }], - wallet_account.code(), - ); + let account_interface = AccountInterface::from_account(wallet_account); let (network_note, note_recipient) = create_network_note(wallet_account, counter_account, increment_script.clone(), rng)?; - let script = account_interface.build_send_notes_script(&[network_note.into()], None, false)?; + let script = account_interface.build_send_notes_script(&[network_note.into()], None)?; // Create transaction executor let executor = TransactionExecutor::new(data_store).with_authenticator(&authenticator); @@ -498,6 +796,8 @@ async fn create_and_submit_network_note( .await .context("Failed to execute transaction")?; + let tx_inputs = executed_tx.tx_inputs().to_bytes(); + let final_account = executed_tx.final_account().clone(); // Prove the transaction @@ -507,7 +807,7 @@ async fn create_and_submit_network_note( // Submit the proven transaction let request = ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: None, + transaction_inputs: Some(tx_inputs), }; let block_height: BlockNumber = rpc_client @@ -515,7 +815,7 @@ async fn create_and_submit_network_note( .await .context("Failed to submit proven transaction to RPC")? .into_inner() - .block_height + .block_num .into(); info!("Submitted proven transaction to RPC"); @@ -530,7 +830,7 @@ async fn create_and_submit_network_note( fn create_increment_script() -> Result<(NoteScript, Library)> { let library = get_counter_library()?; - let script_builder = ScriptBuilder::new(true) + let script_builder = CodeBuilder::new() .with_dynamically_linked_library(&library) .context("Failed to create script builder with library")?; @@ -552,13 +852,18 @@ fn create_network_note( script: NoteScript, rng: &mut ChaCha20Rng, ) -> Result<(Note, NoteRecipient)> { + // Create the NetworkAccountTarget attachment - this is required for the note to be + // recognized as a network note by the ntx-builder + let target = NetworkAccountTarget::new(counter_account.id(), NoteExecutionHint::Always) + .context("Failed to create NetworkAccountTarget for counter account")?; + let attachment: NoteAttachment = target.into(); + let metadata = NoteMetadata::new( wallet_account.id(), NoteType::Public, - NoteTag::from_account_id(counter_account.id()), - NoteExecutionHint::Always, - ZERO, - )?; + NoteTag::with_account_target(counter_account.id()), + ) + .with_attachment(attachment); let serial_num = Word::new([ Felt::new(rng.random()), @@ -572,3 +877,16 @@ fn create_network_note( let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) } + +/// Fetch the current chain tip height from RPC status. +async fn fetch_chain_tip(rpc_client: &mut RpcClient) -> Result { + let status = rpc_client.status(()).await?.into_inner(); + + if let Some(block_producer_status) = status.block_producer { + Ok(block_producer_status.chain_tip) + } else if let Some(store_status) = status.store { + Ok(store_status.chain_tip) + } else { + anyhow::bail!("RPC status response did not include a chain tip") + } +} diff --git a/bin/network-monitor/src/deploy/counter.rs b/bin/network-monitor/src/deploy/counter.rs index fa62b15754..a5ab3d3638 100644 --- a/bin/network-monitor/src/deploy/counter.rs +++ b/bin/network-monitor/src/deploy/counter.rs @@ -3,9 +3,7 @@ use std::path::Path; use anyhow::Result; -use miden_lib::testing::account_component::IncrNonceAuthComponent; -use miden_lib::transaction::TransactionKernel; -use miden_objects::account::{ +use miden_protocol::account::{ Account, AccountBuilder, AccountComponent, @@ -14,12 +12,26 @@ use miden_objects::account::{ AccountStorageMode, AccountType, StorageSlot, + StorageSlotName, }; -use miden_objects::{Felt, FieldElement, Word}; +use miden_protocol::utils::sync::LazyLock; +use miden_protocol::{Felt, FieldElement, Word}; +use miden_standards::code_builder::CodeBuilder; +use miden_standards::testing::account_component::IncrNonceAuthComponent; use tracing::instrument; use crate::COMPONENT; +pub static OWNER_SLOT_NAME: LazyLock = LazyLock::new(|| { + StorageSlotName::new("miden::monitor::counter_contract::owner") + .expect("storage slot name should be valid") +}); + +pub static COUNTER_SLOT_NAME: LazyLock = LazyLock::new(|| { + StorageSlotName::new("miden::monitor::counter_contract::counter") + .expect("storage slot name should be valid") +}); + /// Create a counter program account with custom MASM script. #[instrument(target = COMPONENT, name = "create-counter-account", skip_all, ret(level = "debug"))] pub fn create_counter_account(owner_account_id: AccountId) -> Result { @@ -31,21 +43,18 @@ pub fn create_counter_account(owner_account_id: AccountId) -> Result { let owner_account_id_prefix = owner_account_id.prefix().as_felt(); let owner_account_id_suffix = owner_account_id.suffix(); - let owner_id_slot = StorageSlot::Value(Word::from([ - Felt::ZERO, - Felt::ZERO, - owner_account_id_suffix, - owner_account_id_prefix, - ])); + let owner_id_slot = StorageSlot::with_value( + OWNER_SLOT_NAME.clone(), + Word::from([Felt::ZERO, Felt::ZERO, owner_account_id_suffix, owner_account_id_prefix]), + ); + + let counter_slot = StorageSlot::with_value(COUNTER_SLOT_NAME.clone(), Word::empty()); - let counter_slot = StorageSlot::Value(Word::empty()); + let component_code = + CodeBuilder::default().compile_component_code("counter::program", script)?; - let account_code = AccountComponent::compile( - script, - TransactionKernel::assembler(), - vec![counter_slot, owner_id_slot], - )? - .with_supports_all_types(); + let account_code = AccountComponent::new(component_code, vec![counter_slot, owner_id_slot])? + .with_supports_all_types(); let incr_nonce_auth: AccountComponent = IncrNonceAuthComponent.into(); diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index d9be433a1e..235905f139 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -8,18 +8,29 @@ use std::sync::Arc; use std::time::Duration; use anyhow::{Context, Result}; -use miden_lib::transaction::TransactionKernel; -use miden_node_proto::clients::{Builder, Rpc, RpcClient}; -use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; +use miden_node_proto::clients::{Builder, RpcClient}; +use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; -use miden_objects::account::{Account, AccountId, PartialAccount, PartialStorage}; -use miden_objects::assembly::{DefaultSourceManager, Library, LibraryPath, Module, ModuleKind}; -use miden_objects::asset::{AssetVaultKey, AssetWitness, PartialVault}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::crypto::merkle::{MmrPeaks, PartialMmr}; -use miden_objects::note::NoteScript; -use miden_objects::transaction::{AccountInputs, InputNotes, PartialBlockchain, TransactionArgs}; -use miden_objects::{MastForest, Word}; +use miden_protocol::account::{Account, AccountId, PartialAccount, PartialStorage}; +use miden_protocol::assembly::{ + DefaultSourceManager, + Library, + Module, + ModuleKind, + Path as MidenPath, +}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness, PartialVault}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::crypto::merkle::mmr::{MmrPeaks, PartialMmr}; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::{ + AccountInputs, + InputNotes, + PartialBlockchain, + TransactionArgs, + TransactionKernel, +}; +use miden_protocol::{MastForest, Word}; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{ @@ -40,6 +51,62 @@ use crate::deploy::wallet::{create_wallet_account, save_wallet_account}; pub mod counter; pub mod wallet; +/// Create an RPC client configured with the correct genesis metadata in the +/// `Accept` header so that write RPCs such as `SubmitProvenTransaction` are +/// accepted by the node. +pub async fn create_genesis_aware_rpc_client( + rpc_url: &Url, + timeout: Duration, +) -> Result { + // First, create a temporary client without genesis metadata to discover the + // genesis block header and its commitment. + let mut rpc: RpcClient = Builder::new(rpc_url.clone()) + .with_tls() + .context("Failed to configure TLS for RPC client")? + .with_timeout(timeout) + .without_metadata_version() + .without_metadata_genesis() + .without_otel_context_injection() + .connect() + .await + .context("Failed to create RPC client for genesis discovery")?; + + let block_header_request = BlockHeaderByNumberRequest { + block_num: Some(BlockNumber::GENESIS.as_u32()), + include_mmr_proof: None, + }; + + let response = rpc + .get_block_header_by_number(block_header_request) + .await + .context("Failed to get genesis block header from RPC")? + .into_inner(); + + let genesis_block_header = response + .block_header + .ok_or_else(|| anyhow::anyhow!("No block header in response"))?; + + let genesis_header: BlockHeader = + genesis_block_header.try_into().context("Failed to convert block header")?; + let genesis_commitment = genesis_header.commitment(); + let genesis = genesis_commitment.to_hex(); + + // Rebuild the client, this time including the required genesis metadata so that + // write RPCs like SubmitProvenTransaction are accepted by the node. + let rpc_client = Builder::new(rpc_url.clone()) + .with_tls() + .context("Failed to configure TLS for RPC client")? + .with_timeout(timeout) + .without_metadata_version() + .with_metadata_genesis(genesis) + .without_otel_context_injection() + .connect() + .await + .context("Failed to connect to RPC server with genesis metadata")?; + + Ok(rpc_client) +} + /// Ensure accounts exist, creating them if they don't. /// /// This function checks if the wallet and counter account files exist. @@ -89,16 +156,8 @@ pub async fn ensure_accounts_exist( /// then saves it to the specified file. #[instrument(target = COMPONENT, name = "deploy-counter-account", skip_all, ret(level = "debug"))] pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> Result<()> { - // Deploy counter account to the network - let mut rpc_client: RpcClient = Builder::new(rpc_url.clone()) - .with_tls() - .context("Failed to configure TLS for RPC client")? - .with_timeout(Duration::from_secs(5)) - .without_metadata_version() - .without_metadata_genesis() - .connect::() - .await - .context("Failed to connect to RPC server")?; + // Deploy counter account to the network using a genesis-aware RPC client. + let mut rpc_client = create_genesis_aware_rpc_client(rpc_url, Duration::from_secs(10)).await?; let block_header_request = BlockHeaderByNumberRequest { block_num: Some(BlockNumber::GENESIS.as_u32()), @@ -115,7 +174,8 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> .block_header .ok_or_else(|| anyhow::anyhow!("No block header in response"))?; - let genesis_header = root_block_header.try_into().context("Failed to convert block header")?; + let genesis_header: BlockHeader = + root_block_header.try_into().context("Failed to convert block header")?; let genesis_chain_mmr = PartialBlockchain::new(PartialMmr::from_peaks(MmrPeaks::default()), Vec::new()) @@ -125,7 +185,7 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> data_store.add_account(counter_account.clone()); let executor: TransactionExecutor<'_, '_, _, BasicAuthenticator> = - TransactionExecutor::new(&data_store); + TransactionExecutor::new(&data_store).with_debug_mode(); let tx_args = TransactionArgs::default(); @@ -139,13 +199,15 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> .await .context("Failed to execute transaction")?; + let transaction_inputs = executed_tx.tx_inputs().to_bytes(); + let prover = LocalTransactionProver::default(); let proven_tx = prover.prove(executed_tx).context("Failed to prove transaction")?; let request = ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: None, + transaction_inputs: Some(transaction_inputs), }; rpc_client @@ -157,16 +219,15 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> } pub(crate) fn get_counter_library() -> Result { - let assembler = TransactionKernel::assembler().with_debug_mode(true); + let assembler = TransactionKernel::assembler(); let source_manager = Arc::new(DefaultSourceManager::default()); let script = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/assets/counter_program.masm")); - let library_path = LibraryPath::new("external_contract::counter_contract") - .context("Failed to create library path")?; + let library_path = MidenPath::new("external_contract::counter_contract"); let module = Module::parser(ModuleKind::Library) - .parse_str(library_path, script, &source_manager) + .parse_str(library_path, script, source_manager) .map_err(|e| anyhow::anyhow!("Failed to parse module: {e}"))?; assembler @@ -248,7 +309,7 @@ impl DataStore for MonitorDataStore { _account_id: AccountId, _map_root: Word, _map_key: Word, - ) -> Result { + ) -> Result { unimplemented!("Not needed") } @@ -260,12 +321,12 @@ impl DataStore for MonitorDataStore { unimplemented!("Not needed") } - async fn get_vault_asset_witness( + async fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> Result { + vault_keys: BTreeSet, + ) -> Result, DataStoreError> { let account = self.get_account(account_id)?; if account.vault().root() != vault_root { @@ -275,16 +336,21 @@ impl DataStore for MonitorDataStore { }); } - AssetWitness::new(account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + AssetWitness::new(account.vault().open(vault_key).into()).map_err(|err| { + DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(Box::new(err)), + } + }) + })) } - async fn get_note_script(&self, script_root: Word) -> Result { - Err(DataStoreError::NoteScriptNotFound(script_root)) + async fn get_note_script( + &self, + _script_root: Word, + ) -> Result, DataStoreError> { + Ok(None) } } diff --git a/bin/network-monitor/src/deploy/wallet.rs b/bin/network-monitor/src/deploy/wallet.rs index 704ced5e4d..de687ab6d4 100644 --- a/bin/network-monitor/src/deploy/wallet.rs +++ b/bin/network-monitor/src/deploy/wallet.rs @@ -3,12 +3,12 @@ use std::path::Path; use anyhow::Result; -use miden_lib::AuthScheme; -use miden_lib::account::wallets::create_basic_wallet; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_objects::account::auth::AuthSecretKey; -use miden_objects::account::{Account, AccountFile, AccountStorageMode, AccountType}; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; +use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::{Account, AccountFile, AccountStorageMode, AccountType}; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; +use miden_standards::AuthScheme; +use miden_standards::account::wallets::create_basic_wallet; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; use tracing::instrument; @@ -22,7 +22,7 @@ use crate::COMPONENT; pub fn create_wallet_account() -> Result<(Account, SecretKey)> { let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }; + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; let init_seed: [u8; 32] = rng.random(); let wallet_account = create_basic_wallet( @@ -41,7 +41,7 @@ pub fn save_wallet_account( secret_key: &SecretKey, file_path: &Path, ) -> Result<()> { - let auth_secret_key = AuthSecretKey::RpoFalcon512(secret_key.clone()); + let auth_secret_key = AuthSecretKey::Falcon512Rpo(secret_key.clone()); let account_file = AccountFile::new(account.clone(), vec![auth_secret_key]); account_file.write(file_path)?; Ok(()) diff --git a/bin/network-monitor/src/explorer.rs b/bin/network-monitor/src/explorer.rs new file mode 100644 index 0000000000..f912a62dff --- /dev/null +++ b/bin/network-monitor/src/explorer.rs @@ -0,0 +1,256 @@ +// EXPLORER STATUS CHECKER +// ================================================================================================ + +use std::fmt::{self, Display}; +use std::time::Duration; + +use reqwest::Client; +use serde::Serialize; +use tokio::sync::watch; +use tokio::time::MissedTickBehavior; +use tracing::{info, instrument}; +use url::Url; + +use crate::status::{ExplorerStatusDetails, ServiceDetails, ServiceStatus, Status}; +use crate::{COMPONENT, current_unix_timestamp_secs}; + +const LATEST_BLOCK_QUERY: &str = " +query LatestBlock { + blocks(input: { sort_by: timestamp, order_by: desc }, first: 1) { + edges { + node { + block_number + timestamp + number_of_transactions + number_of_nullifiers + number_of_notes + block_commitment + chain_commitment + proof_commitment + number_of_account_updates + } + } + } +} +"; + +#[derive(Serialize, Copy, Clone)] +struct EmptyVariables; + +#[derive(Serialize, Copy, Clone)] +struct GraphqlRequest { + query: &'static str, + variables: V, +} + +const LATEST_BLOCK_REQUEST: GraphqlRequest = GraphqlRequest { + query: LATEST_BLOCK_QUERY, + variables: EmptyVariables, +}; + +/// Runs a task that continuously checks explorer status and updates a watch channel. +/// +/// This function spawns a task that periodically checks the explorer service status +/// and sends updates through a watch channel. +/// +/// # Arguments +/// +/// * `explorer_url` - The URL of the explorer service. +/// * `name` - The name of the explorer. +/// * `status_sender` - The sender for the watch channel. +/// * `status_check_interval` - The interval at which to check the status of the services. +/// +/// # Returns +/// +/// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are +/// connection issues or failures while checking the explorer status. +pub async fn run_explorer_status_task( + explorer_url: Url, + name: String, + status_sender: watch::Sender, + status_check_interval: Duration, + request_timeout: Duration, +) { + let mut explorer_client = reqwest::Client::new(); + + let mut interval = tokio::time::interval(status_check_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + loop { + interval.tick().await; + + let current_time = current_unix_timestamp_secs(); + + let status = check_explorer_status( + &mut explorer_client, + explorer_url.clone(), + name.clone(), + current_time, + request_timeout, + ) + .await; + + // Send the status update; exit if no receivers (shutdown signal) + if status_sender.send(status).is_err() { + info!("No receivers for explorer status updates, shutting down"); + return; + } + } +} + +/// Checks the status of the explorer service. +/// +/// This function checks the status of the explorer service. +/// +/// # GraphQL Query +/// +/// See [`LATEST_BLOCK_QUERY`] for the exact query string used. +/// +/// # Arguments +/// +/// * `explorer` - The explorer client. +/// * `name` - The name of the explorer. +/// * `url` - The URL of the explorer. +/// * `current_time` - The current time. +/// +/// # Returns +/// +/// A `ServiceStatus` containing the status of the explorer service. +#[instrument(target = COMPONENT, name = "check-status.explorer", skip_all, ret(level = "info"))] +pub(crate) async fn check_explorer_status( + explorer_client: &mut Client, + explorer_url: Url, + name: String, + current_time: u64, + request_timeout: Duration, +) -> ServiceStatus { + let resp = explorer_client + .post(explorer_url.clone()) + .json(&LATEST_BLOCK_REQUEST) + .timeout(request_timeout) + .send() + .await; + + let value = match resp { + Ok(resp) => resp.json::().await, + Err(e) => return unhealthy(&name, current_time, &e), + }; + + let details = match value { + Ok(value) => ExplorerStatusDetails::try_from(value), + Err(e) => return unhealthy(&name, current_time, &e), + }; + + match details { + Ok(details) => ServiceStatus { + name: name.clone(), + status: Status::Healthy, + last_checked: current_time, + error: None, + details: ServiceDetails::ExplorerStatus(details), + }, + Err(e) => unhealthy(&name, current_time, &e), + } +} + +/// Returns an unhealthy service status. +fn unhealthy(name: &str, current_time: u64, err: &impl ToString) -> ServiceStatus { + ServiceStatus { + name: name.to_owned(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(err.to_string()), + details: ServiceDetails::Error, + } +} + +#[derive(Debug)] +pub enum ExplorerStatusError { + MissingField(String), +} + +impl Display for ExplorerStatusError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExplorerStatusError::MissingField(field) => write!(f, "missing field: {field}"), + } + } +} + +impl TryFrom for ExplorerStatusDetails { + type Error = ExplorerStatusError; + + fn try_from(value: serde_json::Value) -> Result { + let node = value.pointer("/data/blocks/edges/0/node").ok_or_else(|| { + ExplorerStatusError::MissingField("data.blocks.edges[0].node".to_string()) + })?; + + let block_number = node + .get("block_number") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("block_number".to_string()))?; + let timestamp = node + .get("timestamp") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("timestamp".to_string()))?; + + let number_of_transactions = node + .get("number_of_transactions") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| { + ExplorerStatusError::MissingField("number_of_transactions".to_string()) + })?; + let number_of_nullifiers = node + .get("number_of_nullifiers") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("number_of_nullifiers".to_string()))?; + let number_of_notes = node + .get("number_of_notes") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("number_of_notes".to_string()))?; + let number_of_account_updates = node + .get("number_of_account_updates") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| { + ExplorerStatusError::MissingField("number_of_account_updates".to_string()) + })?; + + let block_commitment = node + .get("block_commitment") + .and_then(|v| v.as_str()) + .ok_or_else(|| ExplorerStatusError::MissingField("block_commitment".to_string()))? + .to_string(); + let chain_commitment = node + .get("chain_commitment") + .and_then(|v| v.as_str()) + .ok_or_else(|| ExplorerStatusError::MissingField("chain_commitment".to_string()))? + .to_string(); + let proof_commitment = node + .get("proof_commitment") + .and_then(|v| v.as_str()) + .ok_or_else(|| ExplorerStatusError::MissingField("proof_commitment".to_string()))? + .to_string(); + + Ok(Self { + block_number, + timestamp, + number_of_transactions, + number_of_nullifiers, + number_of_notes, + number_of_account_updates, + block_commitment, + chain_commitment, + proof_commitment, + }) + } +} + +pub(crate) fn initial_explorer_status() -> ServiceStatus { + ServiceStatus { + name: "Explorer".to_string(), + status: Status::Unknown, + last_checked: current_unix_timestamp_secs(), + error: None, + details: ServiceDetails::ExplorerStatus(ExplorerStatusDetails::default()), + } +} diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 5cc0944b21..84c0b0f3ac 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -7,8 +7,8 @@ use std::time::Duration; use anyhow::Context; use hex; -use miden_objects::account::AccountId; -use miden_objects::testing::account_id::ACCOUNT_ID_SENDER; +use miden_protocol::account::AccountId; +use miden_protocol::testing::account_id::ACCOUNT_ID_SENDER; use reqwest::Client; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -88,7 +88,6 @@ pub struct GetMetadataResponse { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "faucet-test-task", skip_all)] pub async fn run_faucet_test_task( faucet_url: Url, status_sender: watch::Sender, @@ -167,6 +166,15 @@ pub async fn run_faucet_test_task( /// # Returns /// /// The response from the faucet if successful, or an error if the test fails. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.perform_faucet_test", + skip_all, + level = "info", + ret(level = "debug"), + err +)] async fn perform_faucet_test( client: &Client, faucet_url: &Url, @@ -248,7 +256,15 @@ async fn perform_faucet_test( /// /// The nonce that solves the challenge, or an error if no solution is found within reasonable /// bounds. -#[instrument(target = COMPONENT, name = "solve-pow-challenge", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.solve_pow_challenge", + skip_all, + level = "info", + ret(level = "debug"), + err +)] fn solve_pow_challenge(challenge: &str, target: u64) -> anyhow::Result { let challenge_bytes = hex::decode(challenge).context("Failed to decode challenge from hex")?; diff --git a/bin/network-monitor/src/frontend.rs b/bin/network-monitor/src/frontend.rs index ba7838e27f..035db669cd 100644 --- a/bin/network-monitor/src/frontend.rs +++ b/bin/network-monitor/src/frontend.rs @@ -25,6 +25,7 @@ pub struct ServerState { pub faucet: Option>, pub ntx_increment: Option>, pub ntx_tracking: Option>, + pub explorer: Option>, } /// Runs the frontend server. @@ -35,12 +36,12 @@ pub struct ServerState { /// /// * `server_state` - The server state containing watch receivers for all services. /// * `config` - The configuration of the network. -#[instrument(target = COMPONENT, name = "frontend.serve", skip_all, fields(port = %config.port))] pub async fn serve(server_state: ServerState, config: MonitorConfig) { // build our application with routes let app = Router::new() // Serve embedded assets .route("/assets/index.css", get(serve_css)) + .route("/assets/index.js", get(serve_js)) .route("/assets/favicon.ico", get(serve_favicon)) // Main dashboard route .route("/", get(get_dashboard)) @@ -76,6 +77,11 @@ async fn get_status( // Collect RPC status services.push(server_state.rpc.borrow().clone()); + // Collect explorer status if available + if let Some(explorer_rx) = &server_state.explorer { + services.push(explorer_rx.borrow().clone()); + } + // Collect all remote prover statuses for (prover_status_rx, prover_test_rx) in &server_state.provers { services.push(prover_status_rx.borrow().clone()); @@ -110,6 +116,14 @@ async fn serve_css() -> Response { .into_response() } +async fn serve_js() -> Response { + ( + [(header::CONTENT_TYPE, header::HeaderValue::from_static("text/javascript"))], + include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/index.js")), + ) + .into_response() +} + async fn serve_favicon() -> Response { ( [(header::CONTENT_TYPE, header::HeaderValue::from_static("image/x-icon"))], diff --git a/bin/network-monitor/src/main.rs b/bin/network-monitor/src/main.rs index 2a288f5302..ed0f08cbaf 100644 --- a/bin/network-monitor/src/main.rs +++ b/bin/network-monitor/src/main.rs @@ -12,6 +12,7 @@ pub mod commands; pub mod config; pub mod counter; mod deploy; +pub mod explorer; pub mod faucet; pub mod frontend; mod monitor; diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 38a3068801..c5b773dc32 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -6,21 +6,27 @@ use std::sync::atomic::AtomicU64; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use anyhow::Result; -use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverProxy, Rpc}; -use tokio::sync::watch; +use miden_node_proto::clients::{ + Builder as ClientBuilder, + RemoteProverProxyStatusClient, + RpcClient, +}; use tokio::sync::watch::Receiver; +use tokio::sync::{Mutex, watch}; use tokio::task::{Id, JoinSet}; use tracing::{debug, instrument}; use crate::COMPONENT; use crate::config::MonitorConfig; -use crate::counter::{run_counter_tracking_task, run_increment_task}; +use crate::counter::{LatencyState, run_counter_tracking_task, run_increment_task}; use crate::deploy::ensure_accounts_exist; +use crate::explorer::{initial_explorer_status, run_explorer_status_task}; use crate::faucet::run_faucet_test_task; use crate::frontend::{ServerState, serve}; use crate::remote_prover::{ProofType, generate_prover_test_payload, run_remote_prover_test_task}; use crate::status::{ ServiceStatus, + StaleChainTracker, check_remote_prover_status, check_rpc_status, run_remote_prover_status_task, @@ -44,11 +50,21 @@ impl Tasks { } /// Spawn the RPC status checker task. - #[instrument(target = COMPONENT, name = "tasks.spawn-rpc-checker", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_rpc_checker", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_rpc_checker( &mut self, config: &MonitorConfig, ) -> Result> { + debug!(target: COMPONENT, rpc_url = %config.rpc_url, "Spawning RPC status checker task"); + // Create initial status for RPC service let mut rpc = ClientBuilder::new(config.rpc_url.clone()) .with_tls() @@ -56,37 +72,95 @@ impl Tasks { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let current_time = current_unix_timestamp_secs(); - let initial_rpc_status = check_rpc_status(&mut rpc, current_time).await; + let mut stale_tracker = StaleChainTracker::new(config.stale_chain_tip_threshold); + let initial_rpc_status = check_rpc_status( + &mut rpc, + config.rpc_url.to_string(), + current_time, + &mut stale_tracker, + ) + .await; // Spawn the RPC checker let (rpc_tx, rpc_rx) = watch::channel(initial_rpc_status); let rpc_url = config.rpc_url.clone(); let status_check_interval = config.status_check_interval; let request_timeout = config.request_timeout; + let stale_chain_tip_threshold = config.stale_chain_tip_threshold; let id = self .handles .spawn(async move { - run_rpc_status_task(rpc_url, rpc_tx, status_check_interval, request_timeout).await; + run_rpc_status_task( + rpc_url, + rpc_tx, + status_check_interval, + request_timeout, + stale_chain_tip_threshold, + ) + .await; }) .id(); self.names.insert(id, "rpc-checker".to_string()); + debug!(target: COMPONENT, "RPC status checker task spawned successfully"); Ok(rpc_rx) } + /// Spawn the explorer status checker task. + #[instrument(target = COMPONENT, name = "tasks.spawn-explorer-checker", skip_all)] + pub async fn spawn_explorer_checker( + &mut self, + config: &MonitorConfig, + ) -> Result> { + let explorer_url = config.explorer_url.clone().expect("Explorer URL exists"); + let name = "Explorer".to_string(); + let status_check_interval = config.status_check_interval; + let request_timeout = config.request_timeout; + let (explorer_status_tx, explorer_status_rx) = watch::channel(initial_explorer_status()); + + let id = self + .handles + .spawn(async move { + run_explorer_status_task( + explorer_url, + name, + explorer_status_tx, + status_check_interval, + request_timeout, + ) + .await; + }) + .id(); + self.names.insert(id, "explorer-checker".to_string()); + + println!("Spawned explorer status checker task"); + + Ok(explorer_status_rx) + } + /// Spawn prover status and test tasks for all configured provers. - #[instrument(target = COMPONENT, name = "tasks.spawn-prover-tasks", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_prover_tasks", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_prover_tasks( &mut self, config: &MonitorConfig, ) -> Result, watch::Receiver)>> { + debug!(target: COMPONENT, prover_count = config.remote_prover_urls.len(), "Spawning prover tasks"); let mut prover_rxs = Vec::new(); for (i, prover_url) in config.remote_prover_urls.iter().enumerate() { - let name = format!("Prover-{}", i + 1); + let name = format!("Remote Prover ({})", i + 1); let mut remote_prover = ClientBuilder::new(prover_url.clone()) .with_tls() @@ -94,7 +168,8 @@ impl Tasks { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let current_time = current_unix_timestamp_secs(); @@ -189,11 +264,19 @@ impl Tasks { prover_rxs.push((prover_status_rx, prover_test_rx)); } + debug!(target: COMPONENT, spawned_provers = prover_rxs.len(), "All prover tasks spawned successfully"); Ok(prover_rxs) } /// Spawn the faucet testing task. - #[instrument(target = COMPONENT, name = "tasks.spawn-faucet", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_faucet", + skip_all, + level = "info", + ret(level = "debug") + )] pub fn spawn_faucet(&mut self, config: &MonitorConfig) -> Receiver { let current_time = current_unix_timestamp_secs(); @@ -230,8 +313,16 @@ impl Tasks { faucet_rx } - /// Spawn the network transaction service checker tasks (increment and tracking). - #[instrument(target = COMPONENT, name = "tasks.spawn-ntx-service", skip_all)] + /// Spawn the network transaction service checker task. + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_ntx_service", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_ntx_service( &mut self, config: &MonitorConfig, @@ -244,10 +335,13 @@ impl Tasks { // Create shared atomic counter for tracking expected counter value let expected_counter_value = Arc::new(AtomicU64::new(0)); + let latency_state = Arc::new(Mutex::new(LatencyState::default())); + let latency_state_for_increment = latency_state.clone(); + let latency_state_for_tracking = latency_state.clone(); // Create initial increment status let initial_increment_status = ServiceStatus { - name: "Counter Increment".to_string(), + name: "Local Transactions".to_string(), status: crate::status::Status::Unknown, last_checked: current_time, error: None, @@ -255,12 +349,13 @@ impl Tasks { success_count: 0, failure_count: 0, last_tx_id: None, + last_latency_blocks: None, }), }; // Create initial tracking status let initial_tracking_status = ServiceStatus { - name: "Counter Tracking".to_string(), + name: "Network Transactions".to_string(), status: crate::status::Status::Unknown, last_checked: current_time, error: None, @@ -281,9 +376,14 @@ impl Tasks { let increment_id = self .handles .spawn(async move { - Box::pin(run_increment_task(config_clone, increment_tx, counter_clone)) - .await - .expect("Counter increment task runs indefinitely"); + Box::pin(run_increment_task( + config_clone, + increment_tx, + counter_clone, + latency_state_for_increment, + )) + .await + .expect("Counter increment task runs indefinitely"); }) .id(); self.names.insert(increment_id, "counter-increment".to_string()); @@ -295,9 +395,14 @@ impl Tasks { let tracking_id = self .handles .spawn(async move { - Box::pin(run_counter_tracking_task(config_clone, tracking_tx, counter_clone)) - .await - .expect("Counter tracking task runs indefinitely"); + Box::pin(run_counter_tracking_task( + config_clone, + tracking_tx, + counter_clone, + latency_state_for_tracking, + )) + .await + .expect("Counter tracking task runs indefinitely"); }) .id(); self.names.insert(tracking_id, "counter-tracking".to_string()); @@ -306,7 +411,14 @@ impl Tasks { } /// Spawn the HTTP frontend server. - #[instrument(target = COMPONENT, name = "tasks.spawn-frontend", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_http_server", + skip_all, + level = "info", + ret(level = "debug") + )] pub fn spawn_http_server(&mut self, server_state: ServerState, config: &MonitorConfig) { let config = config.clone(); let id = self.handles.spawn(async move { serve(server_state, config).await }).id(); diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index c58b418112..791315d3b8 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -8,10 +8,10 @@ use std::time::Duration; use anyhow::Context; use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverClient}; use miden_node_proto::generated as proto; -use miden_objects::asset::{Asset, FungibleAsset}; -use miden_objects::note::NoteType; -use miden_objects::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; -use miden_objects::transaction::TransactionInputs; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::note::NoteType; +use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; +use miden_protocol::transaction::TransactionInputs; use miden_testing::{Auth, MockChainBuilder}; use miden_tx::utils::Serializable; use serde::{Deserialize, Serialize}; @@ -87,7 +87,6 @@ pub struct ProverTestDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "remote-prover-test-task", skip_all)] pub async fn run_remote_prover_test_task( prover_url: Url, name: &str, @@ -103,6 +102,7 @@ pub async fn run_remote_prover_test_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() + .without_otel_context_injection() .connect_lazy::(); let mut interval = tokio::time::interval(test_interval); @@ -153,7 +153,14 @@ pub async fn run_remote_prover_test_task( /// # Returns /// /// A `ServiceStatus` containing the results of the proof test. -#[instrument(target = COMPONENT, name = "test-remote-prover", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.test_remote_prover", + skip_all, + level = "info", + ret(level = "debug") +)] async fn test_remote_prover( client: &mut miden_node_proto::clients::RemoteProverClient, name: &str, @@ -256,6 +263,15 @@ fn tonic_status_to_json(status: &tonic::Status) -> String { /// This function creates a mock transaction using `MockChainBuilder` similar to what's done /// in the remote prover tests. The transaction is generated once and can be reused for /// multiple proof test calls. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.generate_mock_transaction", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn generate_mock_transaction() -> anyhow::Result { let mut mock_chain_builder = MockChainBuilder::new(); @@ -303,6 +319,14 @@ pub async fn generate_mock_transaction() -> anyhow::Result { // GENERATE TEST REQUEST PAYLOAD // ================================================================================================ +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.generate_prover_test_payload", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn generate_prover_test_payload() -> proto::remote_prover::ProofRequest { proto::remote_prover::ProofRequest { proof_type: proto::remote_prover::ProofType::Transaction.into(), diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index c70ee024de..759fb0ed96 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -5,21 +5,75 @@ use std::time::Duration; -use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverProxy, Rpc}; +use miden_node_proto::clients::{ + Builder as ClientBuilder, + RemoteProverProxyStatusClient, + RpcClient, +}; use miden_node_proto::generated as proto; -use miden_node_proto::generated::block_producer::BlockProducerStatus; -use miden_node_proto::generated::rpc::RpcStatus; -use miden_node_proto::generated::rpc_store::StoreStatus; +use miden_node_proto::generated::rpc::{BlockProducerStatus, RpcStatus, StoreStatus}; use serde::{Deserialize, Serialize}; use tokio::sync::watch; use tokio::time::MissedTickBehavior; -use tracing::{info, instrument}; +use tracing::{debug, info, instrument}; use url::Url; use crate::faucet::FaucetTestDetails; use crate::remote_prover::{ProofType, ProverTestDetails}; use crate::{COMPONENT, current_unix_timestamp_secs}; +// STALE CHAIN TIP TRACKER +// ================================================================================================ + +/// Tracks the chain tip and detects when it becomes stale. +/// +/// This struct monitors the chain tip from RPC status responses and determines if the chain +/// has stopped making progress by comparing the time since the last chain tip change against +/// a configurable threshold. +#[derive(Debug)] +pub struct StaleChainTracker { + /// The last observed chain tip from the store. + last_chain_tip: Option, + /// Unix timestamp when the chain tip was last observed to change. + last_chain_tip_update: Option, + /// Maximum time without a chain tip update before marking as stale. + stale_threshold_secs: u64, +} + +impl StaleChainTracker { + /// Creates a new stale chain tracker with the given threshold. + pub fn new(stale_threshold: Duration) -> Self { + Self { + last_chain_tip: None, + last_chain_tip_update: None, + stale_threshold_secs: stale_threshold.as_secs(), + } + } + + /// Updates the tracker with a new chain tip observation and returns whether the chain is + /// stale. + /// + /// The chain is considered stale if the tip hasn't changed for longer than the configured + /// threshold + pub fn update(&mut self, chain_tip: u32, current_time: u64) -> Option { + match self.last_chain_tip { + Some(last_tip) if last_tip == chain_tip => { + if let Some(last_update) = self.last_chain_tip_update { + let elapsed = current_time.saturating_sub(last_update); + if elapsed > self.stale_threshold_secs { + return Some(elapsed); + } + } + }, + _ => { + self.last_chain_tip = Some(chain_tip); + self.last_chain_tip_update = Some(current_time); + }, + } + None + } +} + // STATUS // ================================================================================================ @@ -76,6 +130,17 @@ pub struct IncrementDetails { pub failure_count: u64, /// Last transaction ID (if available). pub last_tx_id: Option, + /// Last measured latency in blocks from submission to state update. + pub last_latency_blocks: Option, +} + +/// Details about an in-flight latency measurement. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct PendingLatencyDetails { + /// Block height returned when the transaction was submitted. + pub submit_height: u32, + /// Counter value we expect to see once the transaction is applied. + pub target_value: u64, } /// Details of the counter tracking service. @@ -91,6 +156,20 @@ pub struct CounterTrackingDetails { pub pending_increments: Option, } +/// Details of the explorer service. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ExplorerStatusDetails { + pub block_number: u64, + pub timestamp: u64, + pub number_of_transactions: u64, + pub number_of_nullifiers: u64, + pub number_of_notes: u64, + pub number_of_account_updates: u64, + pub block_commitment: String, + pub chain_commitment: String, + pub proof_commitment: String, +} + /// Details of a service. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum ServiceDetails { @@ -100,6 +179,7 @@ pub enum ServiceDetails { FaucetTest(FaucetTestDetails), NtxIncrement(IncrementDetails), NtxTracking(CounterTrackingDetails), + ExplorerStatus(ExplorerStatusDetails), Error, } @@ -109,6 +189,8 @@ pub enum ServiceDetails { /// service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RpcStatusDetails { + /// The URL of the RPC service (used by the frontend for gRPC-Web probing). + pub url: String, pub version: String, pub genesis_commitment: Option, pub store_status: Option, @@ -134,6 +216,21 @@ pub struct StoreStatusDetails { pub struct BlockProducerStatusDetails { pub version: String, pub status: Status, + /// The block producer's current view of the chain tip height. + pub chain_tip: u32, + /// Mempool statistics for this block producer. + pub mempool: MempoolStatusDetails, +} + +/// Details about the block producer's mempool. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MempoolStatusDetails { + /// Number of transactions currently in the mempool waiting to be batched. + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + pub proven_batches: u64, } /// Details of a remote prover service. @@ -154,7 +251,7 @@ pub struct RemoteProverStatusDetails { /// worker service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WorkerStatusDetails { - pub address: String, + pub name: String, pub version: String, pub status: Status, } @@ -186,9 +283,20 @@ impl From for StoreStatusDetails { impl From for BlockProducerStatusDetails { fn from(value: BlockProducerStatus) -> Self { + // We assume all supported nodes expose mempool statistics. + let mempool_stats = value + .mempool_stats + .expect("block producer status must include mempool statistics"); + Self { version: value.version, status: value.status.into(), + chain_tip: value.chain_tip, + mempool: MempoolStatusDetails { + unbatched_transactions: mempool_stats.unbatched_transactions, + proposed_batches: mempool_stats.proposed_batches, + proven_batches: mempool_stats.proven_batches, + }, } } } @@ -199,7 +307,7 @@ impl From for WorkerStatusDetails { proto::remote_prover::WorkerHealthStatus::try_from(value.status).unwrap().into(); Self { - address: value.address, + name: value.name, version: value.version, status, } @@ -224,9 +332,11 @@ impl RemoteProverStatusDetails { } } -impl From for RpcStatusDetails { - fn from(status: RpcStatus) -> Self { +impl RpcStatusDetails { + /// Creates `RpcStatusDetails` from a gRPC `RpcStatus` response and the configured URL. + pub fn from_rpc_status(status: RpcStatus, url: String) -> Self { Self { + url, version: status.version, genesis_commitment: status.genesis_commitment.as_ref().map(|gc| format!("{gc:?}")), store_status: status.store.map(StoreStatusDetails::from), @@ -241,41 +351,51 @@ impl From for RpcStatusDetails { /// Runs a task that continuously checks RPC status and updates a watch channel. /// /// This function spawns a task that periodically checks the RPC service status -/// and sends updates through a watch channel. +/// and sends updates through a watch channel. It also detects stale chain tips +/// and marks the RPC as unhealthy if the chain tip hasn't changed for longer +/// than the configured threshold. /// /// # Arguments /// /// * `rpc_url` - The URL of the RPC service. /// * `status_sender` - The sender for the watch channel. /// * `status_check_interval` - The interval at which to check the status of the services. +/// * `request_timeout` - The timeout for outgoing requests. +/// * `stale_chain_tip_threshold` - Maximum time without a chain tip update before marking as +/// unhealthy. /// /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "rpc-status-task", skip_all)] pub async fn run_rpc_status_task( rpc_url: Url, status_sender: watch::Sender, status_check_interval: Duration, request_timeout: Duration, + stale_chain_tip_threshold: Duration, ) { + let url_str = rpc_url.to_string(); let mut rpc = ClientBuilder::new(rpc_url) .with_tls() .expect("TLS is enabled") .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let mut stale_tracker = StaleChainTracker::new(stale_chain_tip_threshold); + loop { interval.tick().await; let current_time = current_unix_timestamp_secs(); - let status = check_rpc_status(&mut rpc, current_time).await; + let status = + check_rpc_status(&mut rpc, url_str.clone(), current_time, &mut stale_tracker).await; // Send the status update; exit if no receivers (shutdown signal) if status_sender.send(status).is_err() { @@ -287,39 +407,80 @@ pub async fn run_rpc_status_task( /// Checks the status of the RPC service. /// -/// This function checks the status of the RPC service. +/// This function checks the status of the RPC service and detects stale chain tips. +/// If the chain tip hasn't changed for longer than the configured threshold, the RPC +/// is marked as unhealthy. /// /// # Arguments /// /// * `rpc` - The RPC client. +/// * `url` - The URL of the RPC service. /// * `current_time` - The current time. +/// * `stale_tracker` - Tracker for detecting stale chain tips. /// /// # Returns /// /// A `ServiceStatus` containing the status of the RPC service. -#[instrument(target = COMPONENT, name = "check-status.rpc", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.check_rpc_status", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn check_rpc_status( rpc: &mut miden_node_proto::clients::RpcClient, + url: String, current_time: u64, + stale_tracker: &mut StaleChainTracker, ) -> ServiceStatus { match rpc.status(()).await { Ok(response) => { let status = response.into_inner(); + let rpc_details = RpcStatusDetails::from_rpc_status(status, url); + + // Check for stale chain tip using the store's chain tip + if let Some(store_status) = &rpc_details.store_status { + if let Some(stale_duration) = + stale_tracker.update(store_status.chain_tip, current_time) + { + debug!( + target: COMPONENT, + chain_tip = store_status.chain_tip, + stale_duration_secs = stale_duration, + "Chain tip is stale" + ); + return ServiceStatus { + name: "RPC".to_string(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(format!( + "Chain tip {} has not changed for {} seconds", + store_status.chain_tip, stale_duration + )), + details: ServiceDetails::RpcStatus(rpc_details), + }; + } + } ServiceStatus { name: "RPC".to_string(), status: Status::Healthy, last_checked: current_time, error: None, - details: ServiceDetails::RpcStatus(status.into()), + details: ServiceDetails::RpcStatus(rpc_details), } }, - Err(e) => ServiceStatus { - name: "RPC".to_string(), - status: Status::Unhealthy, - last_checked: current_time, - error: Some(e.to_string()), - details: ServiceDetails::Error, + Err(e) => { + debug!(target: COMPONENT, error = %e, "RPC status check failed"); + ServiceStatus { + name: "RPC".to_string(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(e.to_string()), + details: ServiceDetails::Error, + } }, } } @@ -343,7 +504,6 @@ pub(crate) async fn check_rpc_status( /// /// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are /// connection issues or failures while checking the remote prover status. -#[instrument(target = COMPONENT, name = "remote-prover-status-task", skip_all)] pub async fn run_remote_prover_status_task( prover_url: Url, name: String, @@ -358,7 +518,8 @@ pub async fn run_remote_prover_status_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); @@ -398,10 +559,17 @@ pub async fn run_remote_prover_status_task( /// # Returns /// /// A `ServiceStatus` containing the status of the remote prover service. -#[instrument(target = COMPONENT, name = "check-status.remote-prover", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.check_remote_prover_status", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn check_remote_prover_status( remote_prover: &mut miden_node_proto::clients::RemoteProverProxyStatusClient, - name: String, + display_name: String, url: String, current_time: u64, ) -> ServiceStatus { @@ -422,19 +590,22 @@ pub(crate) async fn check_remote_prover_status( }; ServiceStatus { - name: format!("Remote Prover ({name})"), + name: display_name.clone(), status: overall_health, last_checked: current_time, error: None, details: ServiceDetails::RemoteProverStatus(remote_prover_details), } }, - Err(e) => ServiceStatus { - name: format!("Remote Prover ({name})"), - status: Status::Unhealthy, - last_checked: current_time, - error: Some(e.to_string()), - details: ServiceDetails::Error, + Err(e) => { + debug!(target: COMPONENT, prover_name = %display_name, error = %e, "Remote prover status check failed"); + ServiceStatus { + name: display_name, + status: Status::Unhealthy, + last_checked: current_time, + error: Some(e.to_string()), + details: ServiceDetails::Error, + } }, } } diff --git a/bin/node/.env b/bin/node/.env index 75d0bbbbd0..fc4c2793e3 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -9,6 +9,9 @@ MIDEN_NODE_NTX_PROVER_URL= MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= +MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= +MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true +MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE= diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 38db5e109d..b6ade3b4da 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -21,6 +21,7 @@ tracing-forest = ["miden-node-block-producer/tracing-forest"] anyhow = { workspace = true } clap = { features = ["env", "string"], workspace = true } fs-err = { workspace = true } +hex = { workspace = true } humantime = { workspace = true } miden-node-block-producer = { workspace = true } miden-node-ntx-builder = { workspace = true } @@ -28,14 +29,10 @@ miden-node-rpc = { workspace = true } miden-node-store = { workspace = true } miden-node-utils = { workspace = true } miden-node-validator = { workspace = true } -miden-objects = { workspace = true } +miden-protocol = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } url = { workspace = true } [dev-dependencies] figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } - -[build-dependencies] -# Required to inject build metadata. -miden-node-utils = { features = ["vergen"], workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index a6b5cae559..832b0bb8d2 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,8 +1,9 @@ FROM rust:1.90-slim-bullseye AS builder +# Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang bindgen pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ rm -rf /var/lib/apt/lists/* WORKDIR /app @@ -13,8 +14,6 @@ COPY ./crates ./crates COPY ./proto ./proto RUN cargo install --path bin/node --locked -RUN rm -rf data accounts && mkdir data accounts -RUN miden-node bundled bootstrap --data-directory ./data --accounts-directory ./accounts FROM debian:bullseye-slim @@ -26,8 +25,6 @@ RUN apt-get update && \ sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /app/accounts accounts -COPY --from=builder /app/data data COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ @@ -47,6 +44,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 -# Start the Miden node # Miden node does not spawn sub-processes, so it can be used as the PID1 -CMD miden-node bundled start --rpc.url http://0.0.0.0:57291 --data-directory ./data +CMD miden-node diff --git a/bin/node/build.rs b/bin/node/build.rs deleted file mode 100644 index ae41be1485..0000000000 --- a/bin/node/build.rs +++ /dev/null @@ -1,9 +0,0 @@ -fn main() { - // Configures environment variables for build metadata intended for extended version - // information. - if let Err(e) = miden_node_utils::version::vergen() { - // Don't let an error here bring down the build. Build metadata will be empty which isn't a - // critical failure. - println!("cargo:warning=Failed to embed build metadata: {e:?}"); - } -} diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 6820a38511..5cfbc78fcc 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; use miden_node_utils::grpc::UrlExt; -use tokio::sync::Barrier; use url::Url; use super::{ENV_BLOCK_PRODUCER_URL, ENV_STORE_BLOCK_PRODUCER_URL}; @@ -12,6 +10,7 @@ use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, + ENV_VALIDATOR_BLOCK_PRODUCER_URL, duration_to_human_readable_string, }; @@ -27,6 +26,10 @@ pub enum BlockProducerCommand { #[arg(long = "store.url", env = ENV_STORE_BLOCK_PRODUCER_URL)] store_url: Url, + /// The validator's service gRPC url. + #[arg(long = "validator.url", env = ENV_VALIDATOR_BLOCK_PRODUCER_URL)] + validator_url: Url, + #[command(flatten)] block_producer: BlockProducerConfig, @@ -55,6 +58,7 @@ impl BlockProducerCommand { let Self::Start { url, store_url, + validator_url, block_producer, enable_otel: _, grpc_timeout, @@ -64,30 +68,31 @@ impl BlockProducerCommand { url.to_socket().context("Failed to extract socket address from store URL")?; // Runtime validation for protocol constraints - if block_producer.max_batches_per_block > miden_objects::MAX_BATCHES_PER_BLOCK { + if block_producer.max_batches_per_block > miden_protocol::MAX_BATCHES_PER_BLOCK { anyhow::bail!( "max-batches-per-block cannot exceed protocol limit of {}", - miden_objects::MAX_BATCHES_PER_BLOCK + miden_protocol::MAX_BATCHES_PER_BLOCK ); } - if block_producer.max_txs_per_batch > miden_objects::MAX_ACCOUNTS_PER_BATCH { + if block_producer.max_txs_per_batch > miden_protocol::MAX_ACCOUNTS_PER_BATCH { anyhow::bail!( "max-txs-per-batch cannot exceed protocol limit of {}", - miden_objects::MAX_ACCOUNTS_PER_BATCH + miden_protocol::MAX_ACCOUNTS_PER_BATCH ); } BlockProducer { block_producer_address, store_url, + validator_url, batch_prover_url: block_producer.batch_prover_url, block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, max_batches_per_block: block_producer.max_batches_per_block, - production_checkpoint: Arc::new(Barrier::new(1)), grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() .await @@ -102,6 +107,8 @@ impl BlockProducerCommand { #[cfg(test)] mod tests { + use std::num::NonZeroUsize; + use url::Url; use super::*; @@ -115,13 +122,15 @@ mod tests { let cmd = BlockProducerCommand::Start { url: dummy_url(), store_url: dummy_url(), + validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, - max_batches_per_block: miden_objects::MAX_BATCHES_PER_BLOCK + 1, // Invalid value + max_batches_per_block: miden_protocol::MAX_BATCHES_PER_BLOCK + 1, // Invalid value + mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, grpc_timeout: Duration::from_secs(10), @@ -137,15 +146,17 @@ mod tests { let cmd = BlockProducerCommand::Start { url: dummy_url(), store_url: dummy_url(), + validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), - max_txs_per_batch: miden_objects::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol - * limit - * (should fail) */ + max_txs_per_batch: miden_protocol::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol + * limit + * (should fail) */ max_batches_per_block: 8, + mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, grpc_timeout: Duration::from_secs(10), diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 71c6f5ac11..28fba84e94 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; use std::path::PathBuf; -use std::sync::Arc; use std::time::Duration; use anyhow::Context; @@ -10,8 +9,10 @@ use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; +use miden_protocol::block::BlockSigner; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; -use tokio::sync::Barrier; use tokio::task::JoinSet; use url::Url; @@ -21,6 +22,8 @@ use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, + ENV_VALIDATOR_INSECURE_SECRET_KEY, + INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, duration_to_human_readable_string, }; @@ -43,7 +46,17 @@ pub enum BundledCommand { accounts_directory: PathBuf, /// Constructs the genesis block from the given toml file. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "FILE")] - genesis_config_file: Option, + genesis_config_file: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, /// Runs all three node components in the same process. @@ -82,6 +95,15 @@ pub enum BundledCommand { value_name = "DURATION" )] grpc_timeout: Duration, + + /// Insecure, hex-encoded validator secret key for development and testing purposes. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, } @@ -92,12 +114,14 @@ impl BundledCommand { data_directory, accounts_directory, genesis_config_file, + validator_insecure_secret_key, } => { // Currently the bundled bootstrap is identical to the store's bootstrap. crate::commands::store::StoreCommand::Bootstrap { data_directory, accounts_directory, genesis_config_file, + validator_insecure_secret_key, } .handle() .await @@ -110,9 +134,19 @@ impl BundledCommand { ntx_builder, enable_otel: _, grpc_timeout, + validator_insecure_secret_key, } => { - Self::start(rpc_url, data_directory, ntx_builder, block_producer, grpc_timeout) - .await + let secret_key_bytes = hex::decode(validator_insecure_secret_key)?; + let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; + Self::start( + rpc_url, + data_directory, + ntx_builder, + block_producer, + grpc_timeout, + signer, + ) + .await }, } } @@ -124,8 +158,8 @@ impl BundledCommand { ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, grpc_timeout: Duration, + signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { - let should_start_ntb = !ntx_builder.disabled; // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. // @@ -186,32 +220,28 @@ impl BundledCommand { }) .id(); - // A sync point between the ntb and block-producer components. - let checkpoint = if should_start_ntb { - Barrier::new(2) - } else { - Barrier::new(1) - }; - let checkpoint = Arc::new(checkpoint); + let should_start_ntx_builder = !ntx_builder.disabled; // Start block-producer. The block-producer's endpoint is available after loading completes. let block_producer_id = join_set .spawn({ - let checkpoint = Arc::clone(&checkpoint); let store_url = Url::parse(&format!("http://{store_block_producer_address}")) .context("Failed to parse URL")?; + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; async move { BlockProducer { block_producer_address, store_url, + validator_url, batch_prover_url: block_producer.batch_prover_url, block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_batches_per_block: block_producer.max_batches_per_block, max_txs_per_batch: block_producer.max_txs_per_batch, - production_checkpoint: checkpoint, grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() .await @@ -223,10 +253,14 @@ impl BundledCommand { let validator_id = join_set .spawn({ async move { - Validator { address: validator_address, grpc_timeout } - .serve() - .await - .context("failed while serving validator component") + Validator { + address: validator_address, + grpc_timeout, + signer, + } + .serve() + .await + .context("failed while serving validator component") } }) .id(); @@ -238,10 +272,13 @@ impl BundledCommand { .context("Failed to parse URL")?; let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) .context("Failed to parse URL")?; + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; Rpc { listener: grpc_rpc, store_url, block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout, } .serve() @@ -262,7 +299,9 @@ impl BundledCommand { let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) .context("Failed to parse URL")?; - if should_start_ntb { + if should_start_ntx_builder { + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; let id = join_set .spawn(async move { let block_producer_url = @@ -271,11 +310,11 @@ impl BundledCommand { NetworkTransactionBuilder::new( store_ntx_builder_url, block_producer_url, + validator_url, ntx_builder.tx_prover_url, - ntx_builder.ticker_interval, - checkpoint, + ntx_builder.script_cache_size, ) - .serve_new() + .run() .await .context("failed while serving ntx builder component") }) diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 90c91ccfc0..7e8fa7e69f 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,6 +1,12 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_node_block_producer::{DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +use miden_node_block_producer::{ + DEFAULT_BATCH_INTERVAL, + DEFAULT_BLOCK_INTERVAL, + DEFAULT_MAX_BATCHES_PER_BLOCK, + DEFAULT_MAX_TXS_PER_BATCH, +}; use url::Url; pub mod block_producer; @@ -9,6 +15,10 @@ pub mod rpc; pub mod store; pub mod validator; +/// A predefined, insecure validator key for development purposes. +const INSECURE_VALIDATOR_KEY_HEX: &str = + "0101010101010101010101010101010101010101010101010101010101010101"; + const ENV_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_BLOCK_PRODUCER_URL"; const ENV_VALIDATOR_URL: &str = "MIDEN_NODE_VALIDATOR_URL"; const ENV_BATCH_PROVER_URL: &str = "MIDEN_NODE_BATCH_PROVER_URL"; @@ -18,16 +28,19 @@ const ENV_RPC_URL: &str = "MIDEN_NODE_RPC_URL"; const ENV_STORE_RPC_URL: &str = "MIDEN_NODE_STORE_RPC_URL"; const ENV_STORE_NTX_BUILDER_URL: &str = "MIDEN_NODE_STORE_NTX_BUILDER_URL"; const ENV_STORE_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_STORE_BLOCK_PRODUCER_URL"; +const ENV_VALIDATOR_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL"; const ENV_DATA_DIRECTORY: &str = "MIDEN_NODE_DATA_DIRECTORY"; const ENV_ENABLE_OTEL: &str = "MIDEN_NODE_ENABLE_OTEL"; const ENV_GENESIS_CONFIG_FILE: &str = "MIDEN_GENESIS_CONFIG_FILE"; const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; +const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; +const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; +const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; -const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(5); -const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(2); const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); +const DEFAULT_NTX_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); // Formats a Duration into a human-readable string for display in clap help text. fn duration_to_human_readable_string(duration: Duration) -> String { @@ -38,7 +51,7 @@ fn duration_to_human_readable_string(duration: Duration) -> String { #[derive(clap::Args)] pub struct NtxBuilderConfig { /// Disable spawning the network transaction builder. - #[arg(long = "no-ntb", default_value_t = false)] + #[arg(long = "no-ntx-builder", default_value_t = false)] pub disabled: bool, /// The remote transaction prover's gRPC url, used for the ntx builder. If unset, @@ -48,12 +61,20 @@ pub struct NtxBuilderConfig { /// Interval at which to run the network transaction builder's ticker. #[arg( - long = "ntb.interval", + long = "ntx-builder.interval", default_value = &duration_to_human_readable_string(DEFAULT_NTX_TICKER_INTERVAL), value_parser = humantime::parse_duration, value_name = "DURATION" )] pub ticker_interval: Duration, + + #[arg( + long = "ntx-builder.script-cache-size", + env = ENV_NTX_SCRIPT_CACHE_SIZE, + value_name = "NUM", + default_value_t = DEFAULT_NTX_SCRIPT_CACHE_SIZE + )] + pub script_cache_size: NonZeroUsize, } /// Configuration for the Block Producer component @@ -88,10 +109,29 @@ pub struct BlockProducerConfig { pub block_prover_url: Option, /// The number of transactions per batch. - #[arg(long = "max-txs-per-batch", env = ENV_MAX_TXS_PER_BATCH, value_name = "NUM", default_value_t = DEFAULT_MAX_TXS_PER_BATCH)] + #[arg( + long = "max-txs-per-batch", + env = ENV_MAX_TXS_PER_BATCH, + value_name = "NUM", + default_value_t = DEFAULT_MAX_TXS_PER_BATCH + )] pub max_txs_per_batch: usize, /// Maximum number of batches per block. - #[arg(long = "max-batches-per-block", env = ENV_MAX_BATCHES_PER_BLOCK, value_name = "NUM", default_value_t = DEFAULT_MAX_BATCHES_PER_BLOCK)] + #[arg( + long = "max-batches-per-block", + env = ENV_MAX_BATCHES_PER_BLOCK, + value_name = "NUM", + default_value_t = DEFAULT_MAX_BATCHES_PER_BLOCK + )] pub max_batches_per_block: usize, + + /// Maximum number of uncommitted transactions allowed in the mempool. + #[arg( + long = "mempool.tx-capacity", + default_value_t = miden_node_block_producer::DEFAULT_MEMPOOL_TX_CAPACITY, + env = ENV_MEMPOOL_TX_CAPACITY, + value_name = "NUM" + )] + mempool_tx_capacity: NonZeroUsize, } diff --git a/bin/node/src/commands/rpc.rs b/bin/node/src/commands/rpc.rs index ed05546b3f..643734f378 100644 --- a/bin/node/src/commands/rpc.rs +++ b/bin/node/src/commands/rpc.rs @@ -5,7 +5,7 @@ use miden_node_rpc::Rpc; use miden_node_utils::grpc::UrlExt; use url::Url; -use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL}; +use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL, ENV_VALIDATOR_URL}; use crate::commands::{DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, duration_to_human_readable_string}; #[derive(clap::Subcommand)] @@ -25,6 +25,10 @@ pub enum RpcCommand { #[arg(long = "block-producer.url", env = ENV_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Option, + /// The validator's gRPC url. + #[arg(long = "validator.url", env = ENV_VALIDATOR_URL, value_name = "URL")] + validator_url: Url, + /// Enables the exporting of traces for OpenTelemetry. /// /// This can be further configured using environment variables as defined in the official @@ -51,6 +55,7 @@ impl RpcCommand { url, store_url, block_producer_url, + validator_url, enable_otel: _, grpc_timeout, } = self; @@ -64,6 +69,7 @@ impl RpcCommand { listener, store_url, block_producer_url, + validator_url, grpc_timeout, } .serve() diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 7cc0fb0408..2105b14530 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -5,6 +5,8 @@ use anyhow::Context; use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; use miden_node_utils::grpc::UrlExt; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use url::Url; use super::{ @@ -17,6 +19,8 @@ use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, + ENV_VALIDATOR_INSECURE_SECRET_KEY, + INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -38,7 +42,17 @@ pub enum StoreCommand { accounts_directory: PathBuf, /// Use the given configuration file to construct the genesis state from. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "GENESIS_CONFIG")] - genesis_config_file: Option, + genesis_config_file: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, /// Starts the store component. @@ -90,9 +104,13 @@ impl StoreCommand { data_directory, accounts_directory, genesis_config_file, - } => { - Self::bootstrap(&data_directory, &accounts_directory, genesis_config_file.as_ref()) - }, + validator_insecure_secret_key, + } => Self::bootstrap( + &data_directory, + &accounts_directory, + &genesis_config_file, + validator_insecure_secret_key, + ), StoreCommand::Start { rpc_url, ntx_builder_url, @@ -164,24 +182,23 @@ impl StoreCommand { fn bootstrap( data_directory: &Path, accounts_directory: &Path, - maybe_genesis_config: Option<&PathBuf>, + genesis_config: &PathBuf, + validator_insecure_secret_key: String, ) -> anyhow::Result<()> { - let config = maybe_genesis_config - .map(|genesis_config| { - let toml_str = fs_err::read_to_string(genesis_config)?; - let config = GenesisConfig::read_toml(toml_str.as_str()) - .context(format!("Read from file: {}", genesis_config.display()))?; - Ok::<_, anyhow::Error>(config) - }) - .transpose()? - .unwrap_or_default(); - - let (genesis_state, secrets) = config.into_state()?; + // Decode the validator key. + let signer = SecretKey::read_from_bytes(&hex::decode(validator_insecure_secret_key)?)?; + + // Read the toml. + let toml_str = fs_err::read_to_string(genesis_config)?; + let config = GenesisConfig::read_toml(toml_str.as_str()) + .context(format!("Read from file: {}", genesis_config.display()))?; + + let (genesis_state, secrets) = config.into_state(signer)?; // Create directories if they do not already exist. for directory in &[accounts_directory, data_directory] { - if directory.exists() { - let is_empty = directory.read_dir()?.next().is_none(); + if fs_err::exists(directory)? { + let is_empty = fs_err::read_dir(directory)?.next().is_none(); // If the directory exists and is empty, we store the files there if !is_empty { anyhow::bail!(format!("{} exists but it is not empty.", directory.display())); diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index 2483317817..f543be3013 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -3,12 +3,16 @@ use std::time::Duration; use anyhow::Context; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use url::Url; use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, + ENV_VALIDATOR_INSECURE_SECRET_KEY, ENV_VALIDATOR_URL, + INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -35,17 +39,27 @@ pub enum ValidatorCommand { value_name = "DURATION" )] grpc_timeout: Duration, + + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg(long = "insecure.secret-key", env = ENV_VALIDATOR_INSECURE_SECRET_KEY, value_name = "INSECURE_SECRET_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] + insecure_secret_key: String, }, } impl ValidatorCommand { pub async fn handle(self) -> anyhow::Result<()> { - let Self::Start { url, grpc_timeout, .. } = self; + let Self::Start { + url, grpc_timeout, insecure_secret_key, .. + } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - Validator { address, grpc_timeout } + let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; + + Validator { address, grpc_timeout, signer } .serve() .await .context("failed while serving validator component") diff --git a/bin/node/src/main.rs b/bin/node/src/main.rs index d7a288beec..be4b0d4ae3 100644 --- a/bin/node/src/main.rs +++ b/bin/node/src/main.rs @@ -4,7 +4,6 @@ use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; -use miden_node_utils::version::LongVersion; mod commands; @@ -12,7 +11,7 @@ mod commands; // ================================================================================================ #[derive(Parser)] -#[command(version, about, long_about = None, long_version = long_version().to_string())] +#[command(version, about, long_about = None)] pub struct Cli { #[command(subcommand)] pub command: Command, @@ -84,22 +83,3 @@ async fn main() -> anyhow::Result<()> { cli.command.execute().await } - -// HELPERS & UTILITIES -// ================================================================================================ - -/// Generates [`LongVersion`] using the metadata generated by build.rs. -fn long_version() -> LongVersion { - LongVersion { - version: env!("CARGO_PKG_VERSION"), - sha: option_env!("VERGEN_GIT_SHA").unwrap_or_default(), - branch: option_env!("VERGEN_GIT_BRANCH").unwrap_or_default(), - dirty: option_env!("VERGEN_GIT_DIRTY").unwrap_or_default(), - features: option_env!("VERGEN_CARGO_FEATURES").unwrap_or_default(), - rust_version: option_env!("VERGEN_RUSTC_SEMVER").unwrap_or_default(), - host: option_env!("VERGEN_RUSTC_HOST_TRIPLE").unwrap_or_default(), - target: option_env!("VERGEN_CARGO_TARGET_TRIPLE").unwrap_or_default(), - opt_level: option_env!("VERGEN_CARGO_OPT_LEVEL").unwrap_or_default(), - debug: option_env!("VERGEN_CARGO_DEBUG").unwrap_or_default(), - } -} diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 0bcc98a2ed..85bc355f79 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -31,8 +31,9 @@ clap = { features = ["env"], workspace = true } http = { workspace = true } humantime = { workspace = true } miden-block-prover = { workspace = true } +miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std"], workspace = true } +miden-protocol = { features = ["std"], workspace = true } miden-tx = { features = ["std"], workspace = true } miden-tx-batch-prover = { features = ["std"], workspace = true } opentelemetry = { version = "0.31" } @@ -41,7 +42,7 @@ pingora-core = { version = "0.6" } pingora-limits = { version = "0.6" } pingora-proxy = { version = "0.6" } prometheus = { version = "0.14" } -prost = { default-features = false, features = ["derive"], version = "0.14" } +prost = { default-features = false, features = ["derive"], workspace = true } reqwest = { version = "0.12" } semver = { version = "1.0" } serde = { features = ["derive"], version = "1.0" } @@ -59,10 +60,10 @@ tracing-opentelemetry = { version = "0.32" } uuid = { features = ["v4"], version = "1.16" } [dev-dependencies] -miden-lib = { features = ["testing"], workspace = true } -miden-objects = { features = ["testing"], workspace = true } -miden-testing = { workspace = true } -miden-tx = { features = ["testing"], workspace = true } +miden-protocol = { features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } +miden-testing = { workspace = true } +miden-tx = { features = ["testing"], workspace = true } [build-dependencies] miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index 2f4cbf6a73..24a70f7312 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -1,10 +1,10 @@ use miden_block_prover::LocalBlockProver; +use miden_node_proto::BlockProofRequest; use miden_node_utils::ErrorReport; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::ProposedBatch; -use miden_objects::block::ProposedBlock; -use miden_objects::transaction::TransactionInputs; -use miden_objects::utils::Serializable; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::ProposedBatch; +use miden_protocol::transaction::TransactionInputs; +use miden_protocol::utils::Serializable; use miden_tx::LocalTransactionProver; use miden_tx_batch_prover::LocalBatchProver; use serde::{Deserialize, Serialize}; @@ -165,24 +165,25 @@ impl ProverRpcApi { )] pub fn prove_block( &self, - proposed_block: ProposedBlock, + proof_request: BlockProofRequest, request_id: &str, ) -> Result, tonic::Status> { let Prover::Block(prover) = &self.prover else { return Err(Status::unimplemented("Block prover is not enabled")); }; + let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; - let proven_block = prover + // Record the commitment of the block in the current tracing span. + let block_id = block_header.commitment(); + tracing::Span::current().record("block_id", tracing::field::display(&block_id)); + + let block_proof = prover .try_lock() .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(proposed_block) + .prove(tx_batches, block_header, block_inputs) .map_err(internal_error)?; - // Record the commitment of the block in the current tracing span - let block_id = proven_block.commitment(); - tracing::Span::current().record("block_id", tracing::field::display(&block_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proven_block.to_bytes() })) + Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) } } @@ -225,8 +226,8 @@ impl ProverApi for ProverRpcApi { self.prove_batch(proposed_batch, &request_id) }, proto::remote_prover::ProofType::Block => { - let proposed_block = proof_request.try_into().map_err(invalid_argument)?; - self.prove_block(proposed_block, &request_id) + let proof_request = proof_request.try_into().map_err(invalid_argument)?; + self.prove_block(proof_request, &request_id) }, } } @@ -251,13 +252,13 @@ mod test { use std::time::Duration; use miden_node_utils::cors::cors_for_grpc_web_layer; - use miden_objects::asset::{Asset, FungibleAsset}; - use miden_objects::note::NoteType; - use miden_objects::testing::account_id::{ + use miden_protocol::asset::{Asset, FungibleAsset}; + use miden_protocol::note::NoteType; + use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER, }; - use miden_objects::transaction::ProvenTransaction; + use miden_protocol::transaction::ProvenTransaction; use miden_testing::{Auth, MockChainBuilder}; use miden_tx::utils::Serializable; use tokio::net::TcpListener; diff --git a/bin/remote-prover/src/generated/conversions.rs b/bin/remote-prover/src/generated/conversions.rs index 885d1e06b7..e1bdc64069 100644 --- a/bin/remote-prover/src/generated/conversions.rs +++ b/bin/remote-prover/src/generated/conversions.rs @@ -1,9 +1,9 @@ // CONVERSIONS // ================================================================================================ -use miden_objects::batch::ProposedBatch; -use miden_objects::block::ProposedBlock; -use miden_objects::transaction::{ProvenTransaction, TransactionInputs}; +use miden_node_proto::BlockProofRequest; +use miden_protocol::batch::ProposedBatch; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; use crate::api::ProofType; @@ -39,11 +39,11 @@ impl TryFrom for ProposedBatch { } } -impl TryFrom for ProposedBlock { +impl TryFrom for BlockProofRequest { type Error = DeserializationError; fn try_from(request: proto::ProofRequest) -> Result { - ProposedBlock::read_from_bytes(&request.payload) + BlockProofRequest::read_from_bytes(&request.payload) } } diff --git a/bin/remote-prover/src/generated/remote_prover.rs b/bin/remote-prover/src/generated/remote_prover.rs index 210b691537..b504804c3e 100644 --- a/bin/remote-prover/src/generated/remote_prover.rs +++ b/bin/remote-prover/src/generated/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/bin/remote-prover/src/proxy/health_check.rs b/bin/remote-prover/src/proxy/health_check.rs index 51192b7747..b583c09827 100644 --- a/bin/remote-prover/src/proxy/health_check.rs +++ b/bin/remote-prover/src/proxy/health_check.rs @@ -49,7 +49,7 @@ impl BackgroundService for LoadBalancerState { if let Err(ref reason) = status_result { error!( err = %reason, - worker.address = worker.address(), + worker.name = worker.name(), "Worker failed health check" ); } diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs index 2e3d532848..81290d73a9 100644 --- a/bin/remote-prover/src/proxy/mod.rs +++ b/bin/remote-prover/src/proxy/mod.rs @@ -352,7 +352,7 @@ impl RequestContext { /// Set the worker that will process the request fn set_worker(&mut self, worker: Worker) { - WORKER_REQUEST_COUNT.with_label_values(&[&worker.address()]).inc(); + WORKER_REQUEST_COUNT.with_label_values(&[&worker.name()]).inc(); self.worker = Some(worker); } } @@ -495,7 +495,7 @@ impl ProxyHttp for LoadBalancer { // Check if there is an available worker if let Some(worker) = self.0.pop_available_worker().await { - debug!("Worker {} picked up the request with ID: {}", worker.address(), request_id); + debug!("Worker {} picked up the request with ID: {}", worker.name(), request_id); ctx.set_worker(worker); break; } @@ -508,7 +508,7 @@ impl ProxyHttp for LoadBalancer { // Set SNI let mut http_peer = HttpPeer::new( - ctx.worker.clone().expect("Failed to get worker").address(), + ctx.worker.clone().expect("Failed to get worker").name(), false, String::new(), ); diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs index bf181c613a..aa418e8cb3 100644 --- a/bin/remote-prover/src/proxy/worker.rs +++ b/bin/remote-prover/src/proxy/worker.rs @@ -140,14 +140,14 @@ impl Worker { /// - `Ok(())` if the client was successfully created /// - `Err(RemoteProverError)` if the client creation failed async fn recreate_status_client(&mut self) -> Result<(), RemoteProverError> { - let address = self.address(); - match create_status_client(&address, self.connection_timeout, self.total_timeout).await { + let name = self.name(); + match create_status_client(&name, self.connection_timeout, self.total_timeout).await { Ok(client) => { self.status_client = Some(client); Ok(()) }, Err(err) => { - error!("Failed to recreate status client for worker {}: {}", address, err); + error!("Failed to recreate status client for worker {}: {}", name, err); Err(err) }, } @@ -170,7 +170,7 @@ impl Worker { if self.status_client.is_none() { match self.recreate_status_client().await { Ok(()) => { - info!("Successfully recreated status client for worker {}", self.address()); + info!("Successfully recreated status client for worker {}", self.name()); }, Err(err) => { return Err(err.as_report_context("failed to recreate status client")); @@ -181,7 +181,7 @@ impl Worker { let worker_status = match self.status_client.as_mut().unwrap().status(()).await { Ok(response) => response.into_inner(), Err(e) => { - error!("Failed to check worker status ({}): {}", self.address(), e); + error!("Failed to check worker status ({}): {}", self.name(), e); return Err(e.message().to_string()); }, }; @@ -198,7 +198,7 @@ impl Worker { let worker_supported_proof_type = ProofType::try_from(worker_status.supported_proof_type) .inspect_err(|err| { - error!(%err, address=%self.address(), "Failed to convert worker supported proof type"); + error!(%err, name=%self.name(), "Failed to convert worker supported proof type"); })?; if supported_proof_type != worker_supported_proof_type { @@ -271,8 +271,8 @@ impl Worker { self.is_available } - /// Returns the worker address. - pub fn address(&self) -> String { + /// Returns the worker name. + pub fn name(&self) -> String { self.backend.addr.to_string() } @@ -325,7 +325,7 @@ impl Worker { } }, WorkerHealthStatus::Unhealthy { .. } => { - WORKER_UNHEALTHY.with_label_values(&[&self.address()]).inc(); + WORKER_UNHEALTHY.with_label_values(&[&self.name()]).inc(); self.is_available = false; }, } @@ -349,7 +349,7 @@ impl From<&Worker> for ProxyWorkerStatus { fn from(worker: &Worker) -> Self { use miden_remote_prover::generated::remote_prover::WorkerHealthStatus as ProtoWorkerHealthStatus; Self { - address: worker.address(), + name: worker.name(), version: worker.version().to_string(), status: match worker.health_status() { WorkerHealthStatus::Healthy => ProtoWorkerHealthStatus::Healthy, diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index 9c3029a829..b9df84d41d 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -22,14 +22,14 @@ fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } miden-block-prover = { features = ["testing"], workspace = true } -miden-lib = { workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } miden-node-store = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { workspace = true } +miden-protocol = { workspace = true } +miden-standards = { workspace = true } rand = { workspace = true } rayon = { version = "1.10" } tokio = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } url = { workspace = true } diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index dee33fa9d3..4d8c283c6e 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -23,6 +23,7 @@ The endpoints that you can test are: - `sync_state` - `sync_notes` - `sync_nullifiers` +- `sync_transactions` Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. @@ -153,5 +154,22 @@ P99.9 request latency: 2.289709ms Average nullifiers per response: 21.0348 ``` +- sync-transactions +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-transactions --accounts 5 --block-range 100 + +Average request latency: 1.61454ms +P50 request latency: 1.439584ms +P95 request latency: 3.195001ms +P99 request latency: 4.068709ms +P99.9 request latency: 6.888542ms +Average transactions per response: 1.547 +Pagination statistics: + Total runs: 10000 + Runs triggering pagination: 9971 + Pagination rate: 99.71% + Average pages per run: 2.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index 62b5ddc6dd..095b04caf1 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -3,7 +3,13 @@ use std::path::PathBuf; use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; -use store::{bench_sync_notes, bench_sync_nullifiers, bench_sync_state, load_state}; +use store::{ + bench_sync_notes, + bench_sync_nullifiers, + bench_sync_state, + bench_sync_transactions, + load_state, +}; mod seeding; mod store; @@ -58,13 +64,26 @@ pub enum Command { #[derive(Subcommand, Clone, Copy)] pub enum Endpoint { + #[command(name = "sync-nullifiers")] SyncNullifiers { /// Number of prefixes to send in each request. #[arg(short, long, value_name = "PREFIXES", default_value = "10")] prefixes: usize, }, + #[command(name = "sync-state")] SyncState, + #[command(name = "sync-notes")] SyncNotes, + #[command(name = "sync-transactions")] + SyncTransactions { + /// Number of accounts to sync transactions for in each request. + #[arg(short, long, value_name = "ACCOUNTS", default_value = "5")] + accounts: usize, + /// Block range size for each request (number of blocks to query). + #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "100")] + block_range: u32, + }, + #[command(name = "load-state")] LoadState, } @@ -98,6 +117,16 @@ async fn main() { Endpoint::SyncNotes => { bench_sync_notes(data_directory, iterations, concurrency).await; }, + Endpoint::SyncTransactions { accounts, block_range } => { + bench_sync_transactions( + data_directory, + iterations, + concurrency, + accounts, + block_range, + ) + .await; + }, Endpoint::LoadState => { load_state(&data_directory).await; }, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index c35a6dd910..e0fe79338f 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -6,28 +6,24 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; use miden_block_prover::LocalBlockProver; -use miden_lib::account::auth::AuthRpoFalcon512; -use miden_lib::account::faucets::BasicFungibleFaucet; -use miden_lib::account::wallets::BasicWallet; -use miden_lib::note::create_p2id_note; -use miden_lib::utils::Serializable; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_proto::generated::rpc_store::rpc_client::RpcClient; +use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_store::{DataDirectory, GenesisState, Store}; use miden_node_utils::tracing::grpc::OtelInterceptor; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ Account, AccountBuilder, AccountDelta, AccountId, + AccountStorage, AccountStorageMode, AccountType, }; -use miden_objects::asset::{Asset, FungibleAsset, TokenSymbol}; -use miden_objects::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; -use miden_objects::block::{ +use miden_protocol::asset::{Asset, FungibleAsset, TokenSymbol}; +use miden_protocol::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; +use miden_protocol::block::{ BlockHeader, BlockInputs, BlockNumber, @@ -35,10 +31,12 @@ use miden_objects::block::{ ProposedBlock, ProvenBlock, }; -use miden_objects::crypto::dsa::rpo_falcon512::{PublicKey, SecretKey}; -use miden_objects::crypto::rand::RpoRandomCoin; -use miden_objects::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; -use miden_objects::transaction::{ +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; +use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::errors::AssetError; +use miden_protocol::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; +use miden_protocol::transaction::{ InputNote, InputNotes, OrderedTransactionHeaders, @@ -47,7 +45,12 @@ use miden_objects::transaction::{ ProvenTransactionBuilder, TransactionHeader, }; -use miden_objects::{AssetError, Felt, ONE, Word}; +use miden_protocol::utils::Serializable; +use miden_protocol::{Felt, ONE, Word}; +use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::faucets::BasicFungibleFaucet; +use miden_standards::account::wallets::BasicWallet; +use miden_standards::note::create_p2id_note; use rand::Rng; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use rayon::prelude::ParallelSlice; @@ -88,7 +91,8 @@ pub async fn seed_store( // generate the faucet account and the genesis state let faucet = create_faucet(); let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); - let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1); + let signer = EcdsaSecretKey::new(); + let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1, signer); Store::bootstrap(genesis_state.clone(), &data_directory).expect("store should bootstrap"); // start the store @@ -245,8 +249,13 @@ async fn apply_block( store_client: &StoreClient, metrics: &mut SeedingMetrics, ) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); - let proven_block = LocalBlockProver::new(0).prove_dummy(proposed_block).unwrap(); + let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); + let (header, body) = proposed_block.clone().into_header_and_body().unwrap(); + let block_proof = LocalBlockProver::new(0) + .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) + .unwrap(); + let signature = EcdsaSecretKey::new().sign(header.commitment()); + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); let block_size: usize = proven_block.to_bytes().len(); let start = Instant::now(); @@ -305,8 +314,8 @@ fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RpoRandomCo faucet_id, target_id, vec![asset], - miden_objects::note::NoteType::Public, - Felt::default(), + miden_protocol::note::NoteType::Public, + miden_protocol::note::NoteAttachment::default(), rng, ) .expect("note creation failed") @@ -319,7 +328,7 @@ fn create_account(public_key: PublicKey, index: u64, storage_mode: AccountStorag AccountBuilder::new(init_seed.try_into().unwrap()) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(storage_mode) - .with_auth_component(AuthRpoFalcon512::new(public_key.into())) + .with_auth_component(AuthFalcon512Rpo::new(public_key.into())) .with_component(BasicWallet) .build() .unwrap() @@ -337,7 +346,7 @@ fn create_faucet() -> Account { .account_type(AccountType::FungibleFaucet) .storage_mode(AccountStorageMode::Private) .with_component(BasicFungibleFaucet::new(token_symbol, 2, Felt::new(u64::MAX)).unwrap()) - .with_auth_component(AuthRpoFalcon512::new(key_pair.public_key().into())) + .with_auth_component(AuthFalcon512Rpo::new(key_pair.public_key().into())) .build() .unwrap() } @@ -434,10 +443,11 @@ fn create_emit_note_tx( ) -> ProvenTransaction { let initial_account_hash = faucet.commitment(); - let slot = faucet.storage().get_item(2).unwrap(); + let metadata_slot_name = AccountStorage::faucet_sysdata_slot(); + let slot = faucet.storage().get_item(metadata_slot_name).unwrap(); faucet .storage_mut() - .set_item(0, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) + .set_item(metadata_slot_name, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) .unwrap(); faucet.increment_nonce(ONE).unwrap(); diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index d218265212..fa39303aed 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -2,13 +2,15 @@ use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use futures::{StreamExt, stream}; -use miden_node_proto::generated::rpc_store::rpc_client::RpcClient; +use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::state::State; use miden_node_utils::tracing::grpc::OtelInterceptor; -use miden_objects::account::AccountId; -use miden_objects::note::{NoteDetails, NoteTag}; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::account::AccountId; +use miden_protocol::note::{NoteDetails, NoteTag}; +use miden_protocol::utils::{Deserializable, Serializable}; +use rand::Rng; +use rand::seq::SliceRandom; use tokio::fs; use tokio::time::sleep; use tonic::service::interceptor::InterceptedService; @@ -87,10 +89,10 @@ pub async fn sync_state( api_client: &mut RpcClient>, account_ids: Vec, block_num: u32, -) -> (Duration, proto::rpc_store::SyncStateResponse) { +) -> (Duration, proto::rpc::SyncStateResponse) { let note_tags = account_ids .iter() - .map(|id| u32::from(NoteTag::from_account_id(*id))) + .map(|id| u32::from(NoteTag::with_account_target(*id))) .collect::>(); let account_ids = account_ids @@ -98,7 +100,7 @@ pub async fn sync_state( .map(|id| proto::account::AccountId { id: id.to_bytes() }) .collect::>(); - let sync_request = proto::rpc_store::SyncStateRequest { block_num, note_tags, account_ids }; + let sync_request = proto::rpc::SyncStateRequest { block_num, note_tags, account_ids }; let start = Instant::now(); let response = api_client.sync_state(sync_request).await.unwrap(); @@ -156,10 +158,10 @@ pub async fn sync_notes( ) -> Duration { let note_tags = account_ids .iter() - .map(|id| u32::from(NoteTag::from_account_id(*id))) + .map(|id| u32::from(NoteTag::with_account_target(*id))) .collect::>(); - let sync_request = proto::rpc_store::SyncNotesRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from: 0, block_to: None }), + let sync_request = proto::rpc::SyncNotesRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), note_tags, }; @@ -280,9 +282,9 @@ pub async fn bench_sync_nullifiers( async fn sync_nullifiers( api_client: &mut RpcClient>, nullifiers_prefixes: Vec, -) -> (Duration, proto::rpc_store::SyncNullifiersResponse) { - let sync_request = proto::rpc_store::SyncNullifiersRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from: 0, block_to: None }), +) -> (Duration, proto::rpc::SyncNullifiersResponse) { + let sync_request = proto::rpc::SyncNullifiersRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), nullifiers: nullifiers_prefixes, prefix_len: 16, }; @@ -292,12 +294,200 @@ async fn sync_nullifiers( (start.elapsed(), response.into_inner()) } +// SYNC TRANSACTIONS +// ================================================================================================ + +/// Sends multiple `sync_transactions` requests to the store and prints the performance. +/// +/// Arguments: +/// - `data_directory`: directory that contains the database dump file and the accounts ids dump +/// file. +/// - `iterations`: number of requests to send. +/// - `concurrency`: number of requests to send in parallel. +/// - `accounts_per_request`: number of accounts to sync transactions for in each request. +pub async fn bench_sync_transactions( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + accounts_per_request: usize, + block_range_size: u32, +) { + // load accounts from the dump file + let accounts_file = data_directory.join(ACCOUNTS_FILENAME); + let accounts = fs::read_to_string(&accounts_file) + .await + .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); + let mut account_ids: Vec = accounts + .lines() + .map(|a| AccountId::from_hex(a).expect("invalid account id")) + .collect(); + // Shuffle once so the cycling iterator starts in a random order. + let mut rng = rand::rng(); + account_ids.shuffle(&mut rng); + let mut account_ids = account_ids.into_iter().cycle(); + + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + // Get the latest block number to determine the range + let status = store_client.clone().status(()).await.unwrap().into_inner(); + let chain_tip = status.chain_tip; + + // each request will have `accounts_per_request` account ids and will query a range of blocks + let request = |_| { + let mut client = store_client.clone(); + let account_batch: Vec = + account_ids.by_ref().take(accounts_per_request).collect(); + + // Pick a random window of size `block_range_size` that fits before `chain_tip`. + let max_start = chain_tip.saturating_sub(block_range_size); + let start_block = rand::rng().random_range(0..=max_start); + let end_block = start_block.saturating_add(block_range_size).min(chain_tip); + + tokio::spawn(async move { + sync_transactions_paginated(&mut client, account_batch, start_block, end_block).await + }) + }; + + // create a stream of tasks to send sync_transactions requests + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + let responses: Vec = + results.iter().map(|r| r.response.clone()).collect(); + + print_summary(&timers_accumulator); + + #[allow(clippy::cast_precision_loss)] + let average_transactions_per_response = if responses.is_empty() { + 0.0 + } else { + responses.iter().map(|r| r.transactions.len()).sum::() as f64 + / responses.len() as f64 + }; + println!("Average transactions per response: {average_transactions_per_response}"); + + // Calculate pagination statistics + let total_runs = results.len(); + let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); + #[allow(clippy::cast_precision_loss)] + let pagination_rate = if total_runs > 0 { + (paginated_runs as f64 / total_runs as f64) * 100.0 + } else { + 0.0 + }; + #[allow(clippy::cast_precision_loss)] + let avg_pages = if total_runs > 0 { + results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 + } else { + 0.0 + }; + + println!("Pagination statistics:"); + println!(" Total runs: {total_runs}"); + println!(" Runs triggering pagination: {paginated_runs}"); + println!(" Pagination rate: {pagination_rate:.2}%"); + println!(" Average pages per run: {avg_pages:.2}"); +} + +/// Sends a single `sync_transactions` request to the store and returns a tuple with: +/// - the elapsed time. +/// - the response. +pub async fn sync_transactions( + api_client: &mut RpcClient>, + account_ids: Vec, + block_from: u32, + block_to: u32, +) -> (Duration, proto::rpc::SyncTransactionsResponse) { + let account_ids = account_ids + .iter() + .map(|id| proto::account::AccountId { id: id.to_bytes() }) + .collect::>(); + + let sync_request = proto::rpc::SyncTransactionsRequest { + block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), + account_ids, + }; + + let start = Instant::now(); + let response = api_client.sync_transactions(sync_request).await.unwrap(); + (start.elapsed(), response.into_inner()) +} + +#[derive(Clone)] +struct SyncTransactionsRun { + duration: Duration, + response: proto::rpc::SyncTransactionsResponse, + pages: usize, +} + +async fn sync_transactions_paginated( + api_client: &mut RpcClient>, + account_ids: Vec, + block_from: u32, + block_to: u32, +) -> SyncTransactionsRun { + let mut total_duration = Duration::default(); + let mut aggregated_records = Vec::new(); + let mut next_block_from = block_from; + let mut target_block_to = block_to; + let mut pages = 0usize; + let mut final_pagination_info = None; + + loop { + if next_block_from > target_block_to { + break; + } + + let (elapsed, response) = + sync_transactions(api_client, account_ids.clone(), next_block_from, target_block_to) + .await; + total_duration += elapsed; + pages += 1; + + let info = response.pagination_info.unwrap_or(proto::rpc::PaginationInfo { + chain_tip: target_block_to, + block_num: target_block_to, + }); + + aggregated_records.extend(response.transactions.into_iter()); + let reached_block = info.block_num; + let chain_tip = info.chain_tip; + final_pagination_info = + Some(proto::rpc::PaginationInfo { chain_tip, block_num: reached_block }); + + if reached_block >= chain_tip { + break; + } + + // Request the remaining range up to the reported chain tip + next_block_from = reached_block; + target_block_to = chain_tip; + } + + SyncTransactionsRun { + duration: total_duration, + response: proto::rpc::SyncTransactionsResponse { + pagination_info: final_pagination_info, + transactions: aggregated_records, + }, + pages, + } +} + // LOAD STATE // ================================================================================================ pub async fn load_state(data_directory: &Path) { let start = Instant::now(); - let _state = State::load(data_directory).await.unwrap(); + let (termination_ask, _) = tokio::sync::mpsc::channel(1); + let _state = State::load(data_directory, termination_ask).await.unwrap(); let elapsed = start.elapsed(); // Get database path and run SQL commands to count records diff --git a/clippy.toml b/clippy.toml index 3523592a20..2a5815cec4 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1,32 @@ doc-valid-idents = ["..", "SQLite"] + +disallowed-methods = [ + # Use fs_errr functions, so the filename is available in the error message + { path = "std::fs::canonicalize", replacement = "fs_err::canonicalize" }, + { path = "std::fs::copy", replacement = "fs_err::copy" }, + { path = "std::fs::create_dir", replacement = "fs_err::create_dir" }, + { path = "std::fs::create_dir_all", replacement = "fs_err::create_dir_all" }, + { path = "std::fs::exists", replacement = "fs_err::exists" }, + { path = "std::fs::hard_link", replacement = "fs_err::hard_link" }, + { path = "std::fs::metadata", replacement = "fs_err::metadata" }, + { path = "std::fs::read", replacement = "fs_err::read" }, + { path = "std::fs::read_dir", replacement = "fs_err::read_dir" }, + { path = "std::fs::read_link", replacement = "fs_err::read_link" }, + { path = "std::fs::read_to_string", replacement = "fs_err::read_to_string" }, + { path = "std::fs::remove_dir", replacement = "fs_err::remove_dir" }, + { path = "std::fs::remove_dir_all", replacement = "fs_err::remove_dir_all" }, + { path = "std::fs::remove_file", replacement = "fs_err::remove_file" }, + { path = "std::fs::rename", replacement = "fs_err::rename" }, + { path = "std::fs::set_permissions", replacement = "fs_err::set_permissions" }, + { path = "std::fs::soft_link", replacement = "fs_err::soft_link" }, + { path = "std::fs::symlink_metadata", replacement = "fs_err::symlink_metadata" }, + { path = "std::fs::write", replacement = "fs_err::write" }, + + # Use fs_err::path::PathExt methods, so the filename is available in the error message + { path = "std::path::Path::canonicalize", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::metadata", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::read_dir", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::read_link", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::symlink_metadata", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::try_exists", reason = "Use fs_err::path::PathExt methods" }, +] diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index eba7c6a13b..e5e5511ad1 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -23,19 +23,19 @@ anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } miden-block-prover = { workspace = true } -miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } -miden-objects = { default-features = true, workspace = true } +miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } +miden-standards = { workspace = true } miden-tx = { default-features = true, workspace = true } miden-tx-batch-prover = { workspace = true } rand = { version = "0.9" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["transport"], workspace = true } +tonic = { default-features = true, features = ["transport"], workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } @@ -43,11 +43,11 @@ url = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -miden-lib = { features = ["testing"], workspace = true } miden-node-store = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } -miden-objects = { default-features = true, features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } pretty_assertions = "1.4" rand_chacha = { default-features = false, version = "0.9" } diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index fb84fd28b6..e3cc714c2a 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -7,8 +7,8 @@ use futures::never::Never; use futures::{FutureExt, TryFutureExt}; use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::{BatchId, ProposedBatch, ProvenBatch}; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; use miden_remote_prover_client::remote_prover::batch_prover::RemoteBatchProver; use miden_tx_batch_prover::LocalBatchProver; use rand::Rng; diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 6a5cf53efa..a3a36ec4f0 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,15 +1,24 @@ use std::ops::{Deref, Range}; use std::sync::Arc; +use anyhow::Context; use futures::FutureExt; -use futures::never::Never; use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock}; -use miden_objects::note::NoteHeader; -use miden_objects::transaction::TransactionHeader; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{OrderedBatches, ProvenBatch}; +use miden_protocol::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + BlockProof, + ProposedBlock, + ProvenBlock, +}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::note::NoteHeader; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; use rand::Rng; use tokio::time::Duration; @@ -19,6 +28,7 @@ use url::Url; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; use crate::store::StoreClient; +use crate::validator::BlockProducerValidatorClient; use crate::{COMPONENT, TelemetryInjectorExt}; // BLOCK BUILDER @@ -36,6 +46,8 @@ pub struct BlockBuilder { pub store: StoreClient, + pub validator: BlockProducerValidatorClient, + /// The prover used to prove a proposed block into a proven block. pub block_prover: BlockProver, } @@ -46,6 +58,7 @@ impl BlockBuilder { /// If the block prover URL is not set, the block builder will use the local block prover. pub fn new( store: StoreClient, + validator: BlockProducerValidatorClient, block_prover_url: Option, block_interval: Duration, ) -> Self { @@ -61,17 +74,20 @@ impl BlockBuilder { failure_rate: 0.0, block_prover, store, + validator, } } /// Starts the [`BlockBuilder`], infinitely producing blocks at the configured interval. /// + /// Returns only if there was a fatal, unrecoverable error. + /// /// Block production is sequential and consists of /// /// 1. Pulling the next set of batches from the mempool /// 2. Compiling these batches into the next block /// 3. Proving the block (this is simulated using random sleeps) /// 4. Committing the block to the store - pub async fn run(self, mempool: SharedMempool) { + pub async fn run(self, mempool: SharedMempool) -> anyhow::Result<()> { assert!( self.failure_rate < 1.0 && self.failure_rate.is_sign_positive(), "Failure rate must be a percentage" @@ -86,8 +102,16 @@ impl BlockBuilder { loop { interval.tick().await; - // Errors are handled internally by the block building process. - self.build_block(&mempool).await; + // Exit if a fatal error occurred. + // + // No need for error logging since this is handled inside the function. + if let err @ Err(BuildBlockError::Desync { local_chain_tip, .. }) = + self.build_block(&mempool).await + { + return err.with_context(|| { + format!("fatal error while building block {}", local_chain_tip.child()) + }); + } } } @@ -103,7 +127,7 @@ impl BlockBuilder { /// - A failed stage will emit an error event, and both its own span and the root span will be /// marked as errors. #[instrument(parent = None, target = COMPONENT, name = "block_builder.build_block", skip_all)] - async fn build_block(&self, mempool: &SharedMempool) { + async fn build_block(&self, mempool: &SharedMempool) -> Result<(), BuildBlockError> { use futures::TryFutureExt; let selected = Self::select_block(mempool).inspect(SelectedBlock::inject_telemetry).await; @@ -112,8 +136,11 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(ProposedBlock::inject_telemetry) - .and_then(|inputs| self.prove_block(inputs)) + .inspect_ok(|(proposed_block, _)| { + ProposedBlock::inject_telemetry(proposed_block); + }) + .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) + .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) .inspect_ok(ProvenBlock::inject_telemetry) // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot // handle errors after it considers the process complete (which makes sense). @@ -121,10 +148,10 @@ impl BlockBuilder { .and_then(|proven_block| self.commit_block(mempool, proven_block)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) - .or_else(|_err| self.rollback_block(mempool, block_num).never_error()) - // All errors were handled and discarded above, so this is just type juggling - // to drop the result. - .unwrap_or_else(|_: Never| ()) + .or_else(|err| async { + self.rollback_block(mempool, block_num).await; + Err(err) + }) .await } @@ -155,7 +182,7 @@ impl BlockBuilder { &self, selected_block: SelectedBlock, ) -> Result { - let SelectedBlock { block_number: _, batches } = selected_block; + let SelectedBlock { block_number, batches } = selected_block; let batch_iter = batches.iter(); @@ -163,9 +190,9 @@ impl BlockBuilder { // Note: .cloned() shouldn't be necessary but not having it produces an odd lifetime // error in BlockProducer::serve. Not sure if there's a better fix. Error: // implementation of `FnOnce` is not general enough - // closure with signature `fn(&InputNoteCommitment) -> miden_objects::note::NoteId` must - // implement `FnOnce<(&InputNoteCommitment,)>` ...but it actually implements - // `FnOnce<(&InputNoteCommitment,)>` + // closure with signature `fn(&InputNoteCommitment) -> miden_protocol::note::NoteId` + // must implement `FnOnce<(&InputNoteCommitment,)>` ...but it actually + // implements `FnOnce<(&InputNoteCommitment,)>` batch .input_notes() .iter() @@ -190,6 +217,21 @@ impl BlockBuilder { .await .map_err(BuildBlockError::GetBlockInputsFailed)?; + // Check that the latest committed block in the store matches our expectations. + // + // Desync can occur since the mempool and store are separate components. One example is if + // the block-producer's apply_block gRPC request times out, rolling back the block locally, + // but the store still committed the block on its end. + let store_chain_tip = inputs.prev_block_header().block_num(); + if store_chain_tip.child() != block_number { + return Err(BuildBlockError::Desync { + local_chain_tip: block_number + .parent() + .expect("block being built always has a parent"), + store_chain_tip, + }); + } + Ok(BlockBatchesAndInputs { batches, inputs }) } @@ -197,31 +239,76 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result { + ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = - ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = ProposedBlock::new(inputs.clone(), batches) + .map_err(BuildBlockError::ProposeBlockFailed)?; + + Ok((proposed_block, inputs)) + } + + #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] + async fn validate_block( + &self, + proposed_block: ProposedBlock, + block_inputs: BlockInputs, + ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> + { + // Concurrently build the block and validate it via the validator. + let build_result = tokio::task::spawn_blocking({ + let proposed_block = proposed_block.clone(); + move || proposed_block.into_header_and_body() + }); + let signature = self + .validator + .sign_block(proposed_block.clone()) + .await + .map_err(|err| BuildBlockError::ValidateBlockFailed(err.into()))?; + let (header, body) = build_result + .await + .map_err(|err| BuildBlockError::other(format!("task join error: {err}")))? + .map_err(BuildBlockError::ProposeBlockFailed)?; + + // Verify the signature against the built block to ensure that + // the validator has provided a valid signature for the relevant block. + if !signature.verify(header.commitment(), header.validator_key()) { + return Err(BuildBlockError::InvalidSignature); + } - Ok(proposed_block) + let (ordered_batches, ..) = proposed_block.into_parts(); + Ok((ordered_batches, block_inputs, header, signature, body)) } #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] async fn prove_block( &self, - proposed_block: ProposedBlock, + ordered_batches: OrderedBatches, + block_inputs: BlockInputs, + header: BlockHeader, + signature: Signature, + body: BlockBody, ) -> Result { - let proven_block = self.block_prover.prove(proposed_block).await?; + // Prove block using header and body from validator. + let block_proof = self + .block_prover + .prove(ordered_batches.clone(), header.clone(), block_inputs) + .await?; + self.simulate_proving().await; + // SAFETY: The header and body are assumed valid and consistent with the proof. + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { return Err(BuildBlockError::SecurityLevelTooLow( proven_block.proof_security_level(), MIN_PROOF_SECURITY_LEVEL, )); } - - self.simulate_proving().await; + // TODO(sergerad): Consider removing this validation. Once block proving is implemented, + // this would be replaced with verifying the proof returned from the prover against + // the block header. + validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; Ok(proven_block) } @@ -388,15 +475,55 @@ impl BlockProver { } #[instrument(target = COMPONENT, skip_all, err)] - async fn prove(&self, proposed_block: ProposedBlock) -> Result { + async fn prove( + &self, + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { match self { - Self::Local(prover) => { - prover.prove(proposed_block).map_err(BuildBlockError::ProveBlockFailed) - }, + Self::Local(prover) => prover + .prove(tx_batches, block_header, block_inputs) + .map_err(BuildBlockError::ProveBlockFailed), Self::Remote(prover) => prover - .prove(proposed_block) + .prove(tx_batches, block_header, block_inputs) .await .map_err(BuildBlockError::RemoteProverClientError), } } } + +/// Validates that the proven block's transaction headers are consistent with the transactions +/// passed in the proposed block. +/// +/// This expects that transactions from the proposed block and proven block are in the same +/// order, as defined by [`OrderedTransactionHeaders`]. +fn validate_tx_headers( + proven_block: &ProvenBlock, + proposed_txs: &OrderedTransactionHeaders, +) -> Result<(), BuildBlockError> { + if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { + return Err(BuildBlockError::other(format!( + "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", + proven_block.body().transactions().as_slice().len(), + proposed_txs.as_slice().len() + ))); + } + + // Because we checked the length matches we can zip the iterators up. + // We expect the transaction headers to be in the same order. + for (proposed_header, proven_header) in proposed_txs + .as_slice() + .iter() + .zip(proven_block.body().transactions().as_slice()) + { + if proposed_header != proven_header { + return Err(BuildBlockError::other(format!( + "transaction header with id {} does not match header of the transaction in the proposed block", + proposed_header.id() + ))); + } + } + + Ok(()) +} diff --git a/crates/block-producer/src/domain/batch.rs b/crates/block-producer/src/domain/batch.rs index 4a36798c73..592a340434 100644 --- a/crates/block-producer/src/domain/batch.rs +++ b/crates/block-producer/src/domain/batch.rs @@ -1,10 +1,10 @@ use std::collections::HashMap; use std::sync::Arc; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::batch::BatchId; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::batch::BatchId; +use miden_protocol::transaction::TransactionId; use crate::domain::transaction::AuthenticatedTransaction; @@ -16,8 +16,8 @@ use crate::domain::transaction::AuthenticatedTransaction; /// /// [Mempool]: crate::mempool::Mempool /// [BatchBuilder]: crate::batch_builder::BatchBuilder -/// [ProposedBatch]: miden_objects::batch::ProposedBatch -/// [ProvenBatch]: miden_objects::batch::ProvenBatch +/// [ProposedBatch]: miden_protocol::batch::ProposedBatch +/// [ProvenBatch]: miden_protocol::batch::ProvenBatch #[derive(Clone, Debug, PartialEq)] pub(crate) struct SelectedBatch { txs: Vec>, diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 2b1ef3ecdf..5b2ab30b32 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -3,11 +3,11 @@ use std::collections::HashSet; use std::sync::Arc; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; -use miden_objects::transaction::{OutputNote, ProvenTransaction, TransactionId, TxAccountUpdate}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{NoteHeader, Nullifier}; +use miden_protocol::transaction::{OutputNote, ProvenTransaction, TransactionId, TxAccountUpdate}; use crate::errors::VerifyTxError; use crate::store::TransactionInputs; @@ -95,7 +95,7 @@ impl AuthenticatedTransaction { self.inner .output_notes() .iter() - .map(miden_objects::transaction::OutputNote::commitment) + .map(miden_protocol::transaction::OutputNote::commitment) } pub fn output_notes(&self) -> impl Iterator + '_ { @@ -119,8 +119,7 @@ impl AuthenticatedTransaction { pub fn unauthenticated_note_commitments(&self) -> impl Iterator + '_ { self.inner .unauthenticated_notes() - .copied() - .map(|header| header.commitment()) + .map(NoteHeader::commitment) .filter(|commitment| !self.notes_authenticated_by_store.contains(commitment)) } @@ -144,7 +143,7 @@ impl AuthenticatedTransaction { /// Short-hand for `Self::new` where the input's are setup to match the transaction's initial /// account state. This covers the account's initial state and nullifiers being set to unspent. pub fn from_inner(inner: ProvenTransaction) -> Self { - use miden_objects::Word; + use miden_protocol::Word; let store_account_state = match inner.account_update().initial_state_commitment() { zero if zero == Word::empty() => None, diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 9d55617a22..40c74c99f5 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,14 +1,19 @@ -use miden_block_prover::ProvenBlockError; +use core::error::Error as CoreError; + +use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; -use miden_objects::transaction::TransactionId; -use miden_objects::{ProposedBatchError, ProposedBlockError, ProvenBatchError, Word}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::errors::{ProposedBatchError, ProposedBlockError, ProvenBatchError}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; use miden_remote_prover_client::RemoteProverClientError; use thiserror::Error; use tokio::task::JoinError; +use crate::validator::ValidatorError; + // Block-producer errors // ================================================================================================= @@ -16,15 +21,15 @@ use tokio::task::JoinError; pub enum BlockProducerError { /// A block-producer task completed although it should have ran indefinitely. #[error("task {task} completed unexpectedly")] - TaskFailedSuccessfully { task: &'static str }, + UnexpectedTaskCompletion { task: &'static str }, /// A block-producer task panic'd. - #[error("error joining {task} task")] + #[error("task {task} panic'd")] JoinError { task: &'static str, source: JoinError }, /// A block-producer task reported a transport error. - #[error("task {task} had a transport error")] - TonicTransportError { + #[error("task {task} failed")] + TaskError { task: &'static str, source: anyhow::Error, }, @@ -115,7 +120,7 @@ pub enum AddTransactionError { }, #[error("transaction deserialization failed")] - TransactionDeserializationFailed(#[source] miden_objects::utils::DeserializationError), + TransactionDeserializationFailed(#[source] miden_protocol::utils::DeserializationError), #[error( "transaction expired at block height {expired_at} but the block height limit was {limit}" @@ -124,6 +129,9 @@ pub enum AddTransactionError { expired_at: BlockNumber, limit: BlockNumber, }, + + #[error("the mempool is at capacity")] + CapacityExceeded, } impl From for AddTransactionError { @@ -160,7 +168,7 @@ impl From for AddTransactionError { #[grpc(internal)] pub enum SubmitProvenBatchError { #[error("batch deserialization failed")] - Deserialization(#[source] miden_objects::utils::DeserializationError), + Deserialization(#[source] miden_protocol::utils::DeserializationError), } // Batch building errors @@ -202,10 +210,21 @@ pub enum BuildBlockError { StoreApplyBlockFailed(#[source] StoreError), #[error("failed to get block inputs from store")] GetBlockInputsFailed(#[source] StoreError), + #[error( + "Desync detected between block-producer's chain tip {local_chain_tip} and the store's {store_chain_tip}" + )] + Desync { + local_chain_tip: BlockNumber, + store_chain_tip: BlockNumber, + }, #[error("failed to propose block")] ProposeBlockFailed(#[source] ProposedBlockError), + #[error("failed to validate block")] + ValidateBlockFailed(#[source] Box), + #[error("block signature is invalid")] + InvalidSignature, #[error("failed to prove block")] - ProveBlockFailed(#[source] ProvenBlockError), + ProveBlockFailed(#[source] BlockProverError), /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. #[error("nothing actually went wrong, failure was injected on purpose")] @@ -214,6 +233,21 @@ pub enum BuildBlockError { RemoteProverClientError(#[source] RemoteProverClientError), #[error("block proof security level is too low: {0} < {1}")] SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. + #[error("{error_msg}")] + Other { + error_msg: Box, + source: Option>, + }, +} + +impl BuildBlockError { + /// Creates a custom error using the [`BuildBlockError::Other`] variant from an + /// error message. + pub fn other(message: impl Into) -> Self { + let message: String = message.into(); + Self::Other { error_msg: message.into(), source: None } + } } // Store errors diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 503e29cc16..36ab9b53d1 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -1,5 +1,6 @@ #![recursion_limit = "256"] use std::num::NonZeroUsize; +use std::time::Duration; #[cfg(test)] pub mod test_utils; @@ -9,6 +10,7 @@ mod block_builder; mod domain; mod mempool; pub mod store; +mod validator; #[cfg(feature = "testing")] pub mod errors; @@ -45,13 +47,34 @@ const SERVER_MEMPOOL_STATE_RETENTION: NonZeroUsize = NonZeroUsize::new(5).unwrap /// This rejects transactions which would likely expire before making it into a block. const SERVER_MEMPOOL_EXPIRATION_SLACK: u32 = 2; +/// The interval at which to update the cached mempool statistics. +const CACHED_MEMPOOL_STATS_UPDATE_INTERVAL: Duration = Duration::from_secs(5); + +/// How often a block is created. +pub const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(3); + +/// How often a batch is created. +pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); + +/// The default transaction capacity of the mempool. +/// +/// The value is selected such that all transactions should approximately be processed within one +/// minutes with a block time of 5s. +#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( + DEFAULT_MAX_BATCHES_PER_BLOCK + * DEFAULT_MAX_TXS_PER_BATCH + * (Duration::from_secs(60).div_duration_f32(DEFAULT_BLOCK_INTERVAL)) as usize, +) +.unwrap(); + const _: () = assert!( - DEFAULT_MAX_BATCHES_PER_BLOCK <= miden_objects::MAX_BATCHES_PER_BLOCK, + DEFAULT_MAX_BATCHES_PER_BLOCK <= miden_protocol::MAX_BATCHES_PER_BLOCK, "Server constraint cannot exceed the protocol's constraint" ); const _: () = assert!( - DEFAULT_MAX_TXS_PER_BATCH <= miden_objects::MAX_ACCOUNTS_PER_BATCH, + DEFAULT_MAX_TXS_PER_BATCH <= miden_protocol::MAX_ACCOUNTS_PER_BATCH, "Server constraint cannot exceed the protocol's constraint" ); diff --git a/crates/block-producer/src/mempool/budget.rs b/crates/block-producer/src/mempool/budget.rs index 05a7432840..0a3669ae12 100644 --- a/crates/block-producer/src/mempool/budget.rs +++ b/crates/block-producer/src/mempool/budget.rs @@ -1,5 +1,5 @@ -use miden_objects::batch::ProvenBatch; -use miden_objects::{ +use miden_protocol::batch::ProvenBatch; +use miden_protocol::{ MAX_ACCOUNTS_PER_BATCH, MAX_INPUT_NOTES_PER_BATCH, MAX_OUTPUT_NOTES_PER_BATCH, @@ -63,7 +63,7 @@ impl BatchBudget { // This type assertion reminds us to update the account check if we ever support // multiple account updates per tx. pub(crate) const ACCOUNT_UPDATES_PER_TX: usize = 1; - let _: miden_objects::account::AccountId = tx.account_update().account_id(); + let _: miden_protocol::account::AccountId = tx.account_update().account_id(); let output_notes = tx.output_note_count(); let input_notes = tx.input_note_count(); diff --git a/crates/block-producer/src/mempool/inflight_state/tests.rs b/crates/block-producer/src/mempool/inflight_state/tests.rs index 895f32ce47..e4c72f6573 100644 --- a/crates/block-producer/src/mempool/inflight_state/tests.rs +++ b/crates/block-producer/src/mempool/inflight_state/tests.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use miden_node_utils::ErrorReport; -use miden_objects::Word; +use miden_protocol::Word; use super::*; use crate::test_utils::note::{mock_note, mock_output_note}; diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 9e3bcad166..227b7f51be 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -46,9 +46,9 @@ use std::num::NonZeroUsize; use std::sync::Arc; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_objects::batch::{BatchId, ProvenBatch}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::transaction::TransactionId; +use miden_protocol::batch::{BatchId, ProvenBatch}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::transaction::TransactionId; use subscription::SubscriptionProvider; use tokio::sync::{Mutex, MutexGuard, mpsc}; use tracing::{instrument, warn}; @@ -58,7 +58,12 @@ use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::{AddTransactionError, VerifyTxError}; use crate::mempool::budget::BudgetStatus; use crate::mempool::nodes::{BlockNode, Node, NodeId, ProposedBatchNode, TransactionNode}; -use crate::{COMPONENT, SERVER_MEMPOOL_EXPIRATION_SLACK, SERVER_MEMPOOL_STATE_RETENTION}; +use crate::{ + COMPONENT, + DEFAULT_MEMPOOL_TX_CAPACITY, + SERVER_MEMPOOL_EXPIRATION_SLACK, + SERVER_MEMPOOL_STATE_RETENTION, +}; mod budget; pub use budget::{BatchBudget, BlockBudget}; @@ -70,6 +75,9 @@ mod subscription; #[cfg(test)] mod tests; +// MEMPOOL CONFIGURATION +// ================================================================================================ + #[derive(Clone)] pub struct SharedMempool(Arc>); @@ -100,6 +108,13 @@ pub struct MempoolConfig { /// guarantees that the mempool can verify the data against the additional changes so long as /// the data was authenticated against one of the retained blocks. pub state_retention: NonZeroUsize, + + /// The maximum number of uncommitted transactions allowed in the mempool at once. + /// + /// The mempool will reject transactions once it is at capacity. + /// + /// Transactions in batches and uncommitted blocks _do count_ towards this. + pub tx_capacity: NonZeroUsize, } impl Default for MempoolConfig { @@ -109,10 +124,14 @@ impl Default for MempoolConfig { batch_budget: BatchBudget::default(), expiration_slack: SERVER_MEMPOOL_EXPIRATION_SLACK, state_retention: SERVER_MEMPOOL_STATE_RETENTION, + tx_capacity: DEFAULT_MEMPOOL_TX_CAPACITY, } } } +// SHARED MEMPOOL +// ================================================================================================ + impl SharedMempool { #[instrument(target = COMPONENT, name = "mempool.lock", skip_all)] pub async fn lock(&self) -> MutexGuard<'_, Mempool> { @@ -120,6 +139,9 @@ impl SharedMempool { } } +// MEMPOOL +// ================================================================================================ + #[derive(Clone, Debug)] pub struct Mempool { /// Contains the aggregated state of all transactions, batches and blocks currently inflight in @@ -143,6 +165,9 @@ impl PartialEq for Mempool { } impl Mempool { + // CONSTRUCTORS + // -------------------------------------------------------------------------------------------- + /// Creates a new [`SharedMempool`] with the provided configuration. pub fn shared(chain_tip: BlockNumber, config: MempoolConfig) -> SharedMempool { SharedMempool(Arc::new(Mutex::new(Self::new(chain_tip, config)))) @@ -158,6 +183,16 @@ impl Mempool { } } + /// Returns the current chain tip height as seen by the mempool. + /// + /// This reflects the latest committed block that the block producer is aware of. + pub fn chain_tip(&self) -> BlockNumber { + self.chain_tip + } + + // TRANSACTION & BATCH LIFECYCLE + // -------------------------------------------------------------------------------------------- + /// Adds a transaction to the mempool. /// /// Sends a [`MempoolEvent::TransactionAdded`] event to subscribers. @@ -174,6 +209,10 @@ impl Mempool { &mut self, tx: Arc, ) -> Result { + if self.nodes.uncommitted_tx_count() >= self.config.tx_capacity.get() { + return Err(AddTransactionError::CapacityExceeded); + } + self.authentication_staleness_check(tx.authentication_height())?; self.expiration_check(tx.expires_at())?; @@ -541,6 +580,9 @@ impl Mempool { self.inject_telemetry(); } + // EVENTS & SUBSCRIPTIONS + // -------------------------------------------------------------------------------------------- + /// Creates a subscription to [`MempoolEvent`] which will be emitted in the order they occur. /// /// Only emits events which occurred after the current committed block. @@ -557,6 +599,27 @@ impl Mempool { self.subscription.subscribe(chain_tip) } + // STATS & INSPECTION + // -------------------------------------------------------------------------------------------- + + /// Returns the number of transactions currently waiting to be batched. + pub fn unbatched_transactions_count(&self) -> usize { + self.nodes.txs.len() + } + + /// Returns the number of batches currently being proven. + pub fn proposed_batches_count(&self) -> usize { + self.nodes.proposed_batches.len() + } + + /// Returns the number of proven batches waiting for block inclusion. + pub fn proven_batches_count(&self) -> usize { + self.nodes.proven_batches.len() + } + + // INTERNAL HELPERS + // -------------------------------------------------------------------------------------------- + /// Adds mempool stats to the current tracing span. /// /// Note that these are only visible in the OpenTelemetry context, as conventional tracing diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index ff2751ef23..461a836c25 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -1,12 +1,12 @@ use std::collections::{HashMap, VecDeque}; use std::sync::Arc; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::batch::{BatchId, ProvenBatch}; -use miden_objects::block::BlockNumber; -use miden_objects::note::{NoteHeader, Nullifier}; -use miden_objects::transaction::{InputNoteCommitment, TransactionHeader, TransactionId}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::batch::{BatchId, ProvenBatch}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{NoteHeader, Nullifier}; +use miden_protocol::transaction::{InputNoteCommitment, TransactionHeader, TransactionId}; use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; @@ -345,18 +345,26 @@ impl Nodes { pub(super) fn inject_telemetry(&self, span: &tracing::Span) { use miden_node_utils::tracing::OpenTelemetrySpanExt; + span.set_attribute("mempool.transactions.uncommitted", self.uncommitted_tx_count()); span.set_attribute("mempool.transactions.unbatched", self.txs.len()); span.set_attribute("mempool.batches.proposed", self.proposed_batches.len()); span.set_attribute("mempool.batches.proven", self.proven_batches.len()); } + + pub(super) fn uncommitted_tx_count(&self) -> usize { + self.txs.len() + + self.proposed_batches.values().map(|b| b.0.txs().len()).sum::() + + self.proven_batches.values().map(|b| b.txs.len()).sum::() + + self.proposed_block.as_ref().map(|b| b.1.txs.len()).unwrap_or_default() + } } #[cfg(test)] mod tests { use std::collections::BTreeMap; - use miden_objects::batch::BatchAccountUpdate; - use miden_objects::transaction::{InputNotes, OrderedTransactionHeaders}; + use miden_protocol::batch::BatchAccountUpdate; + use miden_protocol::transaction::{InputNotes, OrderedTransactionHeaders}; use super::*; use crate::test_utils::MockProvenTxBuilder; diff --git a/crates/block-producer/src/mempool/state.rs b/crates/block-producer/src/mempool/state.rs index b4db41657b..93c16f6b6c 100644 --- a/crates/block-producer/src/mempool/state.rs +++ b/crates/block-producer/src/mempool/state.rs @@ -1,9 +1,9 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::note::Nullifier; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::note::Nullifier; use crate::mempool::nodes::{Node, NodeId}; diff --git a/crates/block-producer/src/mempool/subscription.rs b/crates/block-producer/src/mempool/subscription.rs index 70789bdd8d..6bfbf7eaa0 100644 --- a/crates/block-producer/src/mempool/subscription.rs +++ b/crates/block-producer/src/mempool/subscription.rs @@ -3,8 +3,8 @@ use std::ops::Mul; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::domain::note::NetworkNote; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::transaction::{OutputNote, TransactionId}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::transaction::{OutputNote, TransactionId}; use tokio::sync::mpsc; use crate::domain::transaction::AuthenticatedTransaction; @@ -106,7 +106,10 @@ impl SubscriptionProvider { self.inflight_txs.remove(tx); } - Self::send_event(&mut self.subscription, MempoolEvent::BlockCommitted { header, txs }); + Self::send_event( + &mut self.subscription, + MempoolEvent::BlockCommitted { header: Box::new(header), txs }, + ); } pub(super) fn txs_reverted(&mut self, txs: HashSet) { diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index 0f41e96601..5cafd0137d 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use miden_objects::Word; -use miden_objects::block::{BlockHeader, BlockNumber}; +use miden_protocol::Word; +use miden_protocol::block::{BlockHeader, BlockNumber}; use pretty_assertions::assert_eq; use serial_test::serial; diff --git a/crates/block-producer/src/mempool/tests/add_transaction.rs b/crates/block-producer/src/mempool/tests/add_transaction.rs index d4ea2d4581..1fc611e4ef 100644 --- a/crates/block-producer/src/mempool/tests/add_transaction.rs +++ b/crates/block-producer/src/mempool/tests/add_transaction.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use assert_matches::assert_matches; -use miden_objects::Word; -use miden_objects::block::BlockHeader; +use miden_protocol::Word; +use miden_protocol::block::BlockHeader; use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::AddTransactionError; diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 39753fe831..8245c1ee6b 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::net::SocketAddr; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; @@ -12,12 +13,12 @@ use miden_node_proto_build::block_producer_api_descriptor; use miden_node_utils::formatting::{format_input_notes, format_output_notes}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::ProvenTransaction; -use miden_objects::utils::serde::Deserializable; +use miden_protocol::batch::ProvenBatch; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::ProvenTransaction; +use miden_protocol::utils::serde::Deserializable; use tokio::net::TcpListener; -use tokio::sync::{Barrier, Mutex}; +use tokio::sync::{Mutex, RwLock}; use tokio_stream::wrappers::{ReceiverStream, TcpListenerStream}; use tonic::Status; use tower_http::trace::TraceLayer; @@ -36,7 +37,8 @@ use crate::errors::{ }; use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; -use crate::{COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +use crate::validator::BlockProducerValidatorClient; +use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; /// The block producer server. /// @@ -49,6 +51,8 @@ pub struct BlockProducer { pub block_producer_address: SocketAddr, /// The address of the store component. pub store_url: Url, + /// The address of the validator component. + pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, /// The address of the block prover component. @@ -61,17 +65,18 @@ pub struct BlockProducer { pub max_txs_per_batch: usize, /// The maximum number of batches per block. pub max_batches_per_block: usize, - /// Block production only begins after this checkpoint barrier completes. - /// - /// The block-producers gRPC endpoint will be available before this point, so this lets the - /// mempool synchronize its event stream without risking a race condition. - pub production_checkpoint: Arc, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. pub grpc_timeout: Duration, + + /// The maximum number of inflight transactions allowed in the mempool at once. + pub mempool_tx_capacity: NonZeroUsize, } +// BLOCK PRODUCER +// ================================================================================================ + impl BlockProducer { /// Serves the block-producer RPC API, the batch-builder and the block-builder. /// @@ -81,6 +86,7 @@ impl BlockProducer { pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); + let validator = BlockProducerValidatorClient::new(self.validator_url.clone()); // Retry fetching the chain tip from the store until it succeeds. let mut retries_counter = 0; @@ -118,7 +124,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); let block_builder = - BlockBuilder::new(store.clone(), self.block_prover_url, self.block_interval); + BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, @@ -131,6 +137,7 @@ impl BlockProducer { ..BatchBudget::default() }, block_budget: BlockBudget { batches: self.max_batches_per_block }, + tx_capacity: self.mempool_tx_capacity, ..Default::default() }; let mempool = Mempool::shared(chain_tip, mempool); @@ -143,12 +150,7 @@ impl BlockProducer { // any complete or fail, we can shutdown the rest (somewhat) gracefully. let mut tasks = tokio::task::JoinSet::new(); - // Launch the gRPC server and wait at the checkpoint for any other components to be in sync. - // - // This is used to ensure the ntb can subscribe to the mempool events without playing catch - // up caused by block-production. - // - // This is a temporary work-around until the ntb can resync on the fly. + // Launch the gRPC server. let rpc_id = tasks .spawn({ let mempool = mempool.clone(); @@ -159,7 +161,6 @@ impl BlockProducer { } }) .id(); - self.production_checkpoint.wait().await; let batch_builder_id = tasks .spawn({ @@ -173,10 +174,7 @@ impl BlockProducer { let block_builder_id = tasks .spawn({ let mempool = mempool.clone(); - async { - block_builder.run(mempool).await; - Ok(()) - } + async { block_builder.run(mempool).await } }) .id(); @@ -202,13 +200,16 @@ impl BlockProducer { task_result .map_err(|source| BlockProducerError::JoinError { task, source }) .map(|(_, result)| match result { - Ok(_) => Err(BlockProducerError::TaskFailedSuccessfully { task }), - Err(source) => Err(BlockProducerError::TonicTransportError { task, source }), + Ok(_) => Err(BlockProducerError::UnexpectedTaskCompletion { task }), + Err(source) => Err(BlockProducerError::TaskError { task, source }), }) .and_then(|x| x)? } } +// BLOCK PRODUCER RPC SERVER +// ================================================================================================ + /// Serves the block producer's RPC [api](api_server::Api). struct BlockProducerRpcServer { /// The mutex effectively rate limits incoming transactions into the mempool by forcing them @@ -220,98 +221,28 @@ struct BlockProducerRpcServer { mempool: Mutex, store: StoreClient, -} - -#[tonic::async_trait] -impl api_server::Api for BlockProducerRpcServer { - async fn submit_proven_transaction( - &self, - request: tonic::Request, - ) -> Result, Status> - { - self.submit_proven_transaction(request.into_inner()) - .await - .map(tonic::Response::new) - // This Status::from mapping takes care of hiding internal errors. - .map_err(Into::into) - } - - async fn submit_proven_batch( - &self, - request: tonic::Request, - ) -> Result, Status> { - self.submit_proven_batch(request.into_inner()) - .await - .map(tonic::Response::new) - // This Status::from mapping takes care of hiding internal errors. - .map_err(Into::into) - } - - #[instrument( - target = COMPONENT, - name = "block_producer.server.status", - skip_all, - err - )] - async fn status( - &self, - _request: tonic::Request<()>, - ) -> Result, Status> { - Ok(tonic::Response::new(proto::block_producer::BlockProducerStatus { - version: env!("CARGO_PKG_VERSION").to_string(), - status: "connected".to_string(), - })) - } - - type MempoolSubscriptionStream = MempoolEventSubscription; - - async fn mempool_subscription( - &self, - request: tonic::Request, - ) -> Result, tonic::Status> { - let chain_tip = BlockNumber::from(request.into_inner().chain_tip); - - let subscription = - self.mempool - .lock() - .await - .lock() - .await - .subscribe(chain_tip) - .map_err(|mempool_tip| { - tonic::Status::invalid_argument(format!( - "Mempool's chain tip {mempool_tip} does not match request's {chain_tip}" - )) - })?; - let subscription = ReceiverStream::new(subscription); - - Ok(tonic::Response::new(MempoolEventSubscription { inner: subscription })) - } -} - -struct MempoolEventSubscription { - inner: ReceiverStream, -} - -impl tokio_stream::Stream for MempoolEventSubscription { - type Item = Result; - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.inner - .poll_next_unpin(cx) - .map(|x| x.map(proto::block_producer::MempoolEvent::from).map(Result::Ok)) - } + /// Cached mempool statistics that are updated periodically to avoid locking the mempool + /// for each status request. + cached_mempool_stats: Arc>, } impl BlockProducerRpcServer { pub fn new(mempool: SharedMempool, store: StoreClient) -> Self { - Self { mempool: Mutex::new(mempool), store } + Self { + mempool: Mutex::new(mempool), + store, + cached_mempool_stats: Arc::new(RwLock::new(MempoolStats::default())), + } } + // SERVER STARTUP + // -------------------------------------------------------------------------------------------- + async fn serve(self, listener: TcpListener, timeout: Duration) -> anyhow::Result<()> { + // Start background task to periodically update cached mempool stats + self.spawn_mempool_stats_updater().await; + let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(block_producer_api_descriptor()) .build_v1() @@ -339,6 +270,43 @@ impl BlockProducerRpcServer { .context("failed to serve block producer API") } + /// Starts a background task that periodically updates the cached mempool statistics. + /// + /// This prevents the need to lock the mempool for each status request. + async fn spawn_mempool_stats_updater(&self) { + let cached_mempool_stats = Arc::clone(&self.cached_mempool_stats); + let mempool = self.mempool.lock().await.clone(); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(CACHED_MEMPOOL_STATS_UPDATE_INTERVAL); + + loop { + interval.tick().await; + + let (chain_tip, unbatched_transactions, proposed_batches, proven_batches) = { + let mempool = mempool.lock().await; + ( + mempool.chain_tip(), + mempool.unbatched_transactions_count() as u64, + mempool.proposed_batches_count() as u64, + mempool.proven_batches_count() as u64, + ) + }; + + let mut cache = cached_mempool_stats.write().await; + *cache = MempoolStats { + chain_tip, + unbatched_transactions, + proposed_batches, + proven_batches, + }; + } + }); + } + + // RPC ENDPOINTS + // -------------------------------------------------------------------------------------------- + #[instrument( target = COMPONENT, name = "block_producer.server.submit_proven_transaction", @@ -348,7 +316,7 @@ impl BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: proto::transaction::ProvenTransaction, - ) -> Result { + ) -> Result { debug!(target: COMPONENT, ?request); let tx = ProvenTransaction::read_from_bytes(&request.transaction) @@ -374,11 +342,13 @@ impl BlockProducerRpcServer { // SAFETY: we assume that the rpc component has verified the transaction proof already. let tx = AuthenticatedTransaction::new_unchecked(tx, inputs).map(Arc::new)?; - self.mempool.lock().await.lock().await.add_transaction(tx).map(|block_height| { - proto::block_producer::SubmitProvenTransactionResponse { - block_height: block_height.as_u32(), - } - }) + self.mempool + .lock() + .await + .lock() + .await + .add_transaction(tx) + .map(|block_height| proto::blockchain::BlockNumber { block_num: block_height.as_u32() }) } #[instrument( @@ -390,10 +360,120 @@ impl BlockProducerRpcServer { async fn submit_proven_batch( &self, request: proto::transaction::ProvenTransactionBatch, - ) -> Result { + ) -> Result { let _batch = ProvenBatch::read_from_bytes(&request.encoded) .map_err(SubmitProvenBatchError::Deserialization)?; todo!(); } } + +#[tonic::async_trait] +impl api_server::Api for BlockProducerRpcServer { + type MempoolSubscriptionStream = MempoolEventSubscription; + + async fn submit_proven_transaction( + &self, + request: tonic::Request, + ) -> Result, Status> { + self.submit_proven_transaction(request.into_inner()) + .await + .map(tonic::Response::new) + // This Status::from mapping takes care of hiding internal errors. + .map_err(Into::into) + } + + async fn submit_proven_batch( + &self, + request: tonic::Request, + ) -> Result, Status> { + self.submit_proven_batch(request.into_inner()) + .await + .map(tonic::Response::new) + // This Status::from mapping takes care of hiding internal errors. + .map_err(Into::into) + } + + async fn status( + &self, + _request: tonic::Request<()>, + ) -> Result, Status> { + let mempool_stats = *self.cached_mempool_stats.read().await; + + Ok(tonic::Response::new(proto::rpc::BlockProducerStatus { + version: env!("CARGO_PKG_VERSION").to_string(), + status: "connected".to_string(), + chain_tip: mempool_stats.chain_tip.as_u32(), + mempool_stats: Some(mempool_stats.into()), + })) + } + + async fn mempool_subscription( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let chain_tip = BlockNumber::from(request.into_inner().chain_tip); + + let subscription = + self.mempool + .lock() + .await + .lock() + .await + .subscribe(chain_tip) + .map_err(|mempool_tip| { + tonic::Status::invalid_argument(format!( + "Mempool's chain tip {mempool_tip} does not match request's {chain_tip}" + )) + })?; + let subscription = ReceiverStream::new(subscription); + + Ok(tonic::Response::new(MempoolEventSubscription { inner: subscription })) + } +} + +// MEMPOOL SUBSCRIPTION +// ================================================================================================ + +struct MempoolEventSubscription { + inner: ReceiverStream, +} + +impl tokio_stream::Stream for MempoolEventSubscription { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner + .poll_next_unpin(cx) + .map(|x| x.map(proto::block_producer::MempoolEvent::from).map(Result::Ok)) + } +} + +// MEMPOOL STATISTICS +// ================================================================================================ + +/// Mempool statistics that are updated periodically to avoid locking the mempool. +#[derive(Clone, Copy, Default)] +struct MempoolStats { + /// The mempool's current view of the chain tip height. + chain_tip: BlockNumber, + /// Number of transactions currently in the mempool waiting to be batched. + unbatched_transactions: u64, + /// Number of batches currently being proven. + proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + proven_batches: u64, +} + +impl From for proto::rpc::MempoolStats { + fn from(stats: MempoolStats) -> Self { + proto::rpc::MempoolStats { + unbatched_transactions: stats.unbatched_transactions, + proposed_batches: stats.proposed_batches, + proven_batches: stats.proven_batches, + } + } +} diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index ad23766137..453512597b 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -5,7 +5,7 @@ use miden_node_proto::generated::{ self as proto, block_producer::api_client as block_producer_client, }; use miden_node_store::{GenesisState, Store}; -use miden_objects::{ +use miden_protocol::{ Digest, account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, transaction::ProvenTransactionBuilder, @@ -94,6 +94,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { ntx_builder_listener, block_producer_listener, data_directory: dir, + grpc_timeout: std::time::Duration::from_secs(30), } .serve() .await @@ -114,13 +115,34 @@ async fn block_producer_startup_is_robust_to_network_failures() { assert!(response.is_ok()); // kill the store - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; // test: request against block-producer api should fail immediately let response = send_request(block_producer_client.clone(), 1).await; assert!(response.is_err()); // test: restart the store and request should succeed + let store_runtime = restart_store(store_addr, data_directory.path()).await; + let response = send_request(block_producer_client.clone(), 2).await; + assert!(response.is_ok()); + + // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + shutdown_store(store_runtime).await; +} + +/// Shuts down the store runtime properly to allow RocksDB to flush before the temp directory is +/// deleted. +async fn shutdown_store(store_runtime: runtime::Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. +async fn restart_store( + store_addr: std::net::SocketAddr, + data_directory: &std::path::Path, +) -> runtime::Runtime { let rpc_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -129,26 +151,29 @@ async fn block_producer_startup_is_robust_to_network_failures() { let block_producer_listener = TcpListener::bind(store_addr) .await .expect("store should bind the block-producer port"); - task::spawn(async move { + let dir = data_directory.to_path_buf(); + let store_runtime = + runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); + store_runtime.spawn(async move { Store { rpc_listener, ntx_builder_listener, block_producer_listener, - data_directory: data_directory.path().to_path_buf(), + data_directory: dir, + grpc_timeout: std::time::Duration::from_secs(30), } .serve() .await .expect("store should start serving"); }); - let response = send_request(block_producer_client.clone(), 2).await; - assert!(response.is_ok()); + store_runtime } /// Creates a dummy transaction and submits it to the block producer. async fn send_request( mut client: block_producer_client::ApiClient, i: u8, -) -> Result, tonic::Status> +) -> Result, tonic::Status> { let tx = ProvenTransactionBuilder::new( AccountId::dummy( diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index df2972a8ed..a82a60582d 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -3,17 +3,17 @@ use std::fmt::{Display, Formatter}; use std::num::NonZeroU32; use itertools::Itertools; -use miden_node_proto::clients::{Builder, StoreBlockProducer, StoreBlockProducerClient}; +use miden_node_proto::clients::{Builder, StoreBlockProducerClient}; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::errors::{ConversionError, MissingFieldHelper}; use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; -use miden_objects::note::Nullifier; -use miden_objects::transaction::ProvenTransaction; -use miden_objects::utils::Serializable; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::ProvenTransaction; +use miden_protocol::utils::Serializable; use tracing::{debug, info, instrument}; use url::Url; @@ -65,17 +65,13 @@ impl Display for TransactionInputs { } } -impl TryFrom for TransactionInputs { +impl TryFrom for TransactionInputs { type Error = ConversionError; - fn try_from( - response: proto::block_producer_store::TransactionInputs, - ) -> Result { + fn try_from(response: proto::store::TransactionInputs) -> Result { let AccountState { account_id, account_commitment } = response .account_state - .ok_or(proto::block_producer_store::TransactionInputs::missing_field(stringify!( - account_state - )))? + .ok_or(proto::store::TransactionInputs::missing_field(stringify!(account_state)))? .try_into()?; let mut nullifiers = HashMap::new(); @@ -83,7 +79,7 @@ impl TryFrom for TransactionInpu let nullifier = nullifier_record .nullifier .ok_or( - proto::block_producer_store::transaction_inputs::NullifierTransactionInputRecord::missing_field( + proto::store::transaction_inputs::NullifierTransactionInputRecord::missing_field( stringify!(nullifier), ), )? @@ -133,7 +129,8 @@ impl StoreClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { client: store } } @@ -145,7 +142,7 @@ impl StoreClient { .client .clone() .get_block_header_by_number(tonic::Request::new( - proto::shared::BlockHeaderByNumberRequest::default(), + proto::rpc::BlockHeaderByNumberRequest::default(), )) .await? .into_inner() @@ -162,7 +159,7 @@ impl StoreClient { &self, proven_tx: &ProvenTransaction, ) -> Result { - let message = proto::block_producer_store::TransactionInputsRequest { + let message = proto::store::TransactionInputsRequest { account_id: Some(proven_tx.account_id().into()), nullifiers: proven_tx.nullifiers().map(Into::into).collect(), unauthenticated_notes: proven_tx @@ -210,7 +207,7 @@ impl StoreClient { unauthenticated_notes: impl Iterator + Send, reference_blocks: impl Iterator + Send, ) -> Result { - let request = tonic::Request::new(proto::block_producer_store::BlockInputsRequest { + let request = tonic::Request::new(proto::store::BlockInputsRequest { account_ids: updated_accounts.map(Into::into).collect(), nullifiers: created_nullifiers.map(proto::primitives::Digest::from).collect(), unauthenticated_notes: unauthenticated_notes @@ -230,7 +227,7 @@ impl StoreClient { block_references: impl Iterator + Send, note_commitments: impl Iterator + Send, ) -> Result { - let request = tonic::Request::new(proto::block_producer_store::BatchInputsRequest { + let request = tonic::Request::new(proto::store::BatchInputsRequest { reference_blocks: block_references.map(|(block_num, _)| block_num.as_u32()).collect(), note_commitments: note_commitments.map(proto::primitives::Digest::from).collect(), }); diff --git a/crates/block-producer/src/test_utils/account.rs b/crates/block-producer/src/test_utils/account.rs index 638fcf9be6..0d1e9100bf 100644 --- a/crates/block-producer/src/test_utils/account.rs +++ b/crates/block-producer/src/test_utils/account.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use std::sync::LazyLock; -use miden_objects::account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}; -use miden_objects::{Hasher, Word}; +use miden_protocol::account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}; +use miden_protocol::{Hasher, Word}; pub static MOCK_ACCOUNTS: LazyLock>> = LazyLock::new(Default::default); diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index 878b155db9..ecbd215863 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; -use miden_objects::Word; -use miden_objects::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::{ +use miden_protocol::Word; +use miden_protocol::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::{ InputNotes, OrderedTransactionHeaders, ProvenTransaction, @@ -19,7 +19,7 @@ pub trait TransactionBatchConstructor { /// This builds a mocked version of a proven batch for testing purposes which can be useful if /// the batch's details don't need to be correct (e.g. if something else is under test but /// requires a transaction batch). If you need an actual valid [`ProvenBatch`], build a - /// [`ProposedBatch`](miden_objects::batch::ProposedBatch) first and convert (without proving) + /// [`ProposedBatch`](miden_protocol::batch::ProposedBatch) first and convert (without proving) /// or prove it into a [`ProvenBatch`]. fn mocked_from_transactions<'tx>(txs: impl IntoIterator) -> Self; diff --git a/crates/block-producer/src/test_utils/mod.rs b/crates/block-producer/src/test_utils/mod.rs index 0695ceadf3..007fb60cb3 100644 --- a/crates/block-producer/src/test_utils/mod.rs +++ b/crates/block-producer/src/test_utils/mod.rs @@ -1,8 +1,8 @@ -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::crypto::rand::{FeltRng, RpoRandomCoin}; -use miden_objects::testing::account_id::AccountIdBuilder; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::crypto::rand::{FeltRng, RpoRandomCoin}; +use miden_protocol::testing::account_id::AccountIdBuilder; +use miden_protocol::transaction::TransactionId; mod proven_tx; @@ -34,7 +34,7 @@ impl Random { } pub fn draw_tx_id(&mut self) -> TransactionId { - self.0.draw_word().into() + TransactionId::from_raw(self.0.draw_word()) } pub fn draw_account_id(&mut self) -> AccountId { diff --git a/crates/block-producer/src/test_utils/note.rs b/crates/block-producer/src/test_utils/note.rs index f632453f1d..6defeac83d 100644 --- a/crates/block-producer/src/test_utils/note.rs +++ b/crates/block-producer/src/test_utils/note.rs @@ -1,6 +1,6 @@ -use miden_lib::testing::note::NoteBuilder; -use miden_objects::note::Note; -use miden_objects::transaction::OutputNote; +use miden_protocol::note::Note; +use miden_protocol::transaction::OutputNote; +use miden_standards::testing::note::NoteBuilder; use rand_chacha::ChaCha20Rng; use rand_chacha::rand_core::SeedableRng; diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index 3a52fa5659..b8d67e9fbe 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -3,18 +3,18 @@ use std::sync::Arc; use itertools::Itertools; use miden_node_utils::fee::test_fee; -use miden_objects::account::AccountId; -use miden_objects::asset::FungibleAsset; -use miden_objects::block::BlockNumber; -use miden_objects::note::{Note, Nullifier}; -use miden_objects::transaction::{ +use miden_protocol::account::AccountId; +use miden_protocol::asset::FungibleAsset; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{Note, Nullifier}; +use miden_protocol::transaction::{ InputNote, OutputNote, ProvenTransaction, ProvenTransactionBuilder, }; -use miden_objects::vm::ExecutionProof; -use miden_objects::{Felt, ONE, Word}; +use miden_protocol::vm::ExecutionProof; +use miden_protocol::{Felt, ONE, Word}; use rand::Rng; use super::MockPrivateAccount; @@ -109,7 +109,7 @@ impl MockProvenTxBuilder { .map(|index| { let nullifier = Word::from([ONE, ONE, ONE, Felt::new(index)]); - Nullifier::from(nullifier) + Nullifier::from_raw(nullifier) }) .collect(); @@ -131,7 +131,7 @@ impl MockProvenTxBuilder { .map(|note_index| { let note = Note::mock_noop(Word::from([0, 0, 0, note_index])); - OutputNote::Header(*note.header()) + OutputNote::Header(note.header().clone()) }) .collect(); diff --git a/crates/block-producer/src/validator/mod.rs b/crates/block-producer/src/validator/mod.rs new file mode 100644 index 0000000000..9844e2d9b3 --- /dev/null +++ b/crates/block-producer/src/validator/mod.rs @@ -0,0 +1,66 @@ +use miden_node_proto::clients::{Builder, ValidatorClient}; +use miden_node_proto::generated as proto; +use miden_protocol::block::ProposedBlock; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use thiserror::Error; +use tracing::{info, instrument}; +use url::Url; + +use crate::COMPONENT; + +// VALIDATOR ERROR +// ================================================================================================ + +#[derive(Debug, Error)] +pub enum ValidatorError { + #[error("gRPC transport error: {0}")] + Transport(#[from] tonic::Status), + #[error("signature deserialization failed: {0}")] + Deserialization(#[from] DeserializationError), +} + +// VALIDATOR CLIENT +// ================================================================================================ + +/// Interface to the validator's gRPC API. +/// +/// Essentially just a thin wrapper around the generated gRPC client which improves type safety. +#[derive(Clone, Debug)] +pub struct BlockProducerValidatorClient { + client: ValidatorClient, +} + +impl BlockProducerValidatorClient { + /// Creates a new validator client with a lazy connection. + pub fn new(validator_url: Url) -> Self { + info!(target: COMPONENT, validator_endpoint = %validator_url, "Initializing validator client"); + + let validator = Builder::new(validator_url) + .without_tls() + .without_timeout() + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::(); + + Self { client: validator } + } + + #[instrument(target = COMPONENT, name = "validator.client.validate_block", skip_all, err)] + pub async fn sign_block( + &self, + proposed_block: ProposedBlock, + ) -> Result { + // Send request and receive response. + let message = proto::blockchain::ProposedBlock { + proposed_block: proposed_block.to_bytes(), + }; + let request = tonic::Request::new(message); + let response = self.client.clone().sign_block(request).await?; + + // Deserialize the signature. + let signature = response.into_inner(); + Signature::read_from_bytes(&signature.signature).map_err(ValidatorError::Deserialization) + } +} diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 7eefab8e49..06ed8eb3bc 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -16,19 +16,23 @@ workspace = true [dependencies] anyhow = { workspace = true } futures = { workspace = true } -lru = { workspace = true } +indexmap = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { default-features = true, workspace = true } +miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["tx-prover"], workspace = true } miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } +tokio-util = { version = "0.7" } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } [dev-dependencies] miden-node-test-macro = { path = "../test-macro" } +miden-node-utils = { features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { workspace = true } rstest = { workspace = true } diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs new file mode 100644 index 0000000000..a130b8079f --- /dev/null +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -0,0 +1,345 @@ +use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::num::NonZeroUsize; + +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::account::Account; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, Nullifier}; +use miden_protocol::transaction::{PartialBlockchain, TransactionId}; +use tracing::instrument; + +use super::ActorShutdownReason; +use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; +use crate::COMPONENT; +use crate::actor::inflight_note::InflightNetworkNote; +use crate::builder::ChainState; +use crate::store::{StoreClient, StoreError}; + +// TRANSACTION CANDIDATE +// ================================================================================================ + +/// A candidate network transaction. +/// +/// Contains the data pertaining to a specific network account which can be used to build a network +/// transaction. +#[derive(Clone, Debug)] +pub struct TransactionCandidate { + /// The current inflight state of the account. + pub account: Account, + + /// A set of notes addressed to this network account. + pub notes: Vec, + + /// The latest locally committed block header. + /// + /// This should be used as the reference block during transaction execution. + pub chain_tip_header: BlockHeader, + + /// The chain MMR, which lags behind the tip by one block. + pub chain_mmr: PartialBlockchain, +} + +// NETWORK ACCOUNT STATE +// ================================================================================================ + +/// The current state of a network account. +#[derive(Clone)] +pub struct NetworkAccountState { + /// The network account ID corresponding to the network account this state represents. + account_id: NetworkAccountId, + + /// Component of this state which Contains the committed and inflight account updates as well + /// as available and nullified notes. + account: NetworkAccountNoteState, + + /// Uncommitted transactions which have some impact on the network state. + /// + /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ + /// an impact are ignored. + inflight_txs: BTreeMap, + + /// A set of nullifiers which have been registered for the network account. + nullifier_idx: HashSet, +} + +impl NetworkAccountState { + /// Maximum number of attempts to execute a network note. + const MAX_NOTE_ATTEMPTS: usize = 30; + + /// Load's all available network notes from the store, along with the required account states. + #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] + pub async fn load( + account: Account, + account_id: NetworkAccountId, + store: &StoreClient, + block_num: BlockNumber, + ) -> Result { + let notes = store.get_unconsumed_network_notes(account_id, block_num.as_u32()).await?; + let notes = notes + .into_iter() + .map(|note| { + let NetworkNote::SingleTarget(note) = note; + note + }) + .collect::>(); + let account = NetworkAccountNoteState::new(account, notes); + + let state = Self { + account, + account_id, + inflight_txs: BTreeMap::default(), + nullifier_idx: HashSet::default(), + }; + + state.inject_telemetry(); + + Ok(state) + } + + /// Selects the next candidate network transaction. + #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] + pub fn select_candidate( + &mut self, + limit: NonZeroUsize, + chain_state: ChainState, + ) -> Option { + // Remove notes that have failed too many times. + self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); + + // Skip empty accounts, and prune them. + // This is how we keep the number of accounts bounded. + if self.account.is_empty() { + return None; + } + + // Select notes from the account that can be consumed or are ready for a retry. + let notes = self + .account + .available_notes(&chain_state.chain_tip_header.block_num()) + .take(limit.get()) + .cloned() + .collect::>(); + + // Skip accounts with no available notes. + if notes.is_empty() { + return None; + } + + let (chain_tip_header, chain_mmr) = chain_state.into_parts(); + TransactionCandidate { + account: self.account.latest_account(), + notes, + chain_tip_header, + chain_mmr, + } + .into() + } + + /// Marks notes of a previously selected candidate as failed. + /// + /// Does not remove the candidate from the in-progress pool. + #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] + pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { + let nullifiers = notes.iter().map(Note::nullifier).collect::>(); + self.account.fail_notes(nullifiers.as_slice(), block_num); + } + + /// Updates state with the mempool event. + #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] + pub fn mempool_update(&mut self, update: &MempoolEvent) -> Option { + let span = tracing::Span::current(); + span.set_attribute("mempool_event.kind", update.kind()); + + match update { + MempoolEvent::TransactionAdded { + id, + nullifiers, + network_notes, + account_delta, + } => { + // Filter network notes relevant to this account. + let network_notes = filter_by_account_id_and_map_to_single_target( + self.account_id, + network_notes.clone(), + ); + self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); + }, + MempoolEvent::TransactionsReverted(txs) => { + for tx in txs { + let shutdown_reason = self.revert_transaction(*tx); + if shutdown_reason.is_some() { + return shutdown_reason; + } + } + }, + MempoolEvent::BlockCommitted { txs, .. } => { + for tx in txs { + self.commit_transaction(*tx); + } + }, + } + self.inject_telemetry(); + + // No shutdown, continue running actor. + None + } + + /// Handles a [`MempoolEvent::TransactionAdded`] event. + fn add_transaction( + &mut self, + id: TransactionId, + nullifiers: &[Nullifier], + network_notes: &[SingleTargetNetworkNote], + account_delta: Option<&AccountUpdateDetails>, + ) { + // Skip transactions we already know about. + // + // This can occur since both ntx builder and the mempool might inform us of the same + // transaction. Once when it was submitted to the mempool, and once by the mempool event. + if self.inflight_txs.contains_key(&id) { + return; + } + + let mut tx_impact = TransactionImpact::default(); + if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { + let account_id = update.network_account_id(); + if account_id == self.account_id { + match update { + NetworkAccountEffect::Updated(account_delta) => { + self.account.add_delta(&account_delta); + tx_impact.account_delta = Some(account_id); + }, + NetworkAccountEffect::Created(_) => {}, + } + } + } + for note in network_notes { + assert_eq!( + note.account_id(), + self.account_id, + "note's account ID does not match network account actor's account ID" + ); + tx_impact.notes.insert(note.nullifier()); + self.nullifier_idx.insert(note.nullifier()); + self.account.add_note(note.clone()); + } + for nullifier in nullifiers { + // Ignore nullifiers that aren't network note nullifiers. + if !self.nullifier_idx.contains(nullifier) { + continue; + } + tx_impact.nullifiers.insert(*nullifier); + // We don't use the entry wrapper here because the account must already exist. + let _ = self.account.add_nullifier(*nullifier); + } + + if !tx_impact.is_empty() { + self.inflight_txs.insert(id, tx_impact); + } + } + + /// Handles [`MempoolEvent::BlockCommitted`] events. + fn commit_transaction(&mut self, tx: TransactionId) { + // We only track transactions which have an impact on the network state. + let Some(impact) = self.inflight_txs.remove(&tx) else { + return; + }; + + if let Some(delta_account_id) = impact.account_delta { + if delta_account_id == self.account_id { + self.account.commit_delta(); + } + } + + for nullifier in impact.nullifiers { + if self.nullifier_idx.remove(&nullifier) { + // Its possible for the account to no longer exist if the transaction creating it + // was reverted. + self.account.commit_nullifier(nullifier); + } + } + } + + /// Handles [`MempoolEvent::TransactionsReverted`] events. + fn revert_transaction(&mut self, tx: TransactionId) -> Option { + // We only track transactions which have an impact on the network state. + let Some(impact) = self.inflight_txs.remove(&tx) else { + tracing::debug!("transaction {tx} not found in inflight transactions"); + return None; + }; + + // Revert account creation. + if let Some(account_id) = impact.account_delta { + // Account creation reverted, actor must stop. + if account_id == self.account_id && self.account.revert_delta() { + return Some(ActorShutdownReason::AccountReverted(account_id)); + } + } + + // Revert notes. + for note_nullifier in impact.notes { + if self.nullifier_idx.contains(¬e_nullifier) { + self.account.revert_note(note_nullifier); + self.nullifier_idx.remove(¬e_nullifier); + } + } + + // Revert nullifiers. + for nullifier in impact.nullifiers { + if self.nullifier_idx.contains(&nullifier) { + self.account.revert_nullifier(nullifier); + self.nullifier_idx.remove(&nullifier); + } + } + + None + } + + /// Adds stats to the current tracing span. + /// + /// Note that these are only visible in the OpenTelemetry context, as conventional tracing + /// does not track fields added dynamically. + fn inject_telemetry(&self) { + let span = tracing::Span::current(); + + span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); + span.set_attribute("ntx.state.notes.total", self.nullifier_idx.len()); + } +} + +/// The impact a transaction has on the state. +#[derive(Clone, Default)] +struct TransactionImpact { + /// The network account this transaction added an account delta to. + account_delta: Option, + + /// Network notes this transaction created. + notes: BTreeSet, + + /// Network notes this transaction consumed. + nullifiers: BTreeSet, +} + +impl TransactionImpact { + fn is_empty(&self) -> bool { + self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() + } +} + +/// Filters network notes by account ID and maps them to single target network notes. +fn filter_by_account_id_and_map_to_single_target( + account_id: NetworkAccountId, + notes: Vec, +) -> Vec { + notes + .into_iter() + .filter_map(|note| match note { + NetworkNote::SingleTarget(note) if note.account_id() == account_id => Some(note), + NetworkNote::SingleTarget(_) => None, + }) + .collect::>() +} diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs new file mode 100644 index 0000000000..c53dc96f69 --- /dev/null +++ b/crates/ntx-builder/src/actor/execute.rs @@ -0,0 +1,543 @@ +use std::collections::{BTreeMap, BTreeSet}; +use std::sync::Arc; + +use miden_node_proto::clients::ValidatorClient; +use miden_node_proto::generated::{self as proto}; +use miden_node_utils::lru_cache::LruCache; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::Word; +use miden_protocol::account::{ + Account, + AccountId, + AccountStorageHeader, + PartialAccount, + StorageMapWitness, + StorageSlotName, + StorageSlotType, +}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::errors::TransactionInputError; +use miden_protocol::note::{Note, NoteScript}; +use miden_protocol::transaction::{ + AccountInputs, + ExecutedTransaction, + InputNote, + InputNotes, + PartialBlockchain, + ProvenTransaction, + TransactionArgs, + TransactionId, + TransactionInputs, +}; +use miden_protocol::vm::FutureMaybeSend; +use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_tx::auth::UnreachableAuth; +use miden_tx::utils::Serializable; +use miden_tx::{ + DataStore, + DataStoreError, + FailedNote, + LocalTransactionProver, + MastForestStore, + NoteCheckerError, + NoteConsumptionChecker, + NoteConsumptionInfo, + TransactionExecutor, + TransactionExecutorError, + TransactionMastStore, + TransactionProverError, +}; +use tokio::sync::Mutex; +use tokio::task::JoinError; +use tracing::{Instrument, instrument}; + +use crate::COMPONENT; +use crate::actor::account_state::TransactionCandidate; +use crate::block_producer::BlockProducerClient; +use crate::store::StoreClient; + +#[derive(Debug, thiserror::Error)] +pub enum NtxError { + #[error("note inputs were invalid")] + InputNotes(#[source] TransactionInputError), + #[error("failed to filter notes")] + NoteFilter(#[source] NoteCheckerError), + #[error("all notes failed to be executed")] + AllNotesFailed(Vec), + #[error("failed to execute transaction")] + Execution(#[source] TransactionExecutorError), + #[error("failed to prove transaction")] + Proving(#[source] TransactionProverError), + #[error("failed to submit transaction")] + Submission(#[source] tonic::Status), + #[error("the ntx task panicked")] + Panic(#[source] JoinError), +} + +type NtxResult = Result; + +// NETWORK TRANSACTION CONTEXT +// ================================================================================================ + +/// Provides the context for execution [network transaction candidates](TransactionCandidate). +#[derive(Clone)] +pub struct NtxContext { + /// TODO(sergerad): Remove block producer client when block proving moved to store. + block_producer: BlockProducerClient, + + /// Client for validating transactions via the Validator. + validator: ValidatorClient, + + /// The prover to delegate proofs to. + /// + /// Defaults to local proving if unset. This should be avoided in production as this is + /// computationally intensive. + prover: Option, + + /// The store client for retrieving note scripts. + store: StoreClient, + + /// LRU cache for storing retrieved note scripts to avoid repeated store calls. + script_cache: LruCache, +} + +impl NtxContext { + /// Creates a new [`NtxContext`] instance. + pub fn new( + block_producer: BlockProducerClient, + validator: ValidatorClient, + prover: Option, + store: StoreClient, + script_cache: LruCache, + ) -> Self { + Self { + block_producer, + validator, + prover, + store, + script_cache, + } + } + + /// Executes a transaction end-to-end: filtering, executing, proving, and submitted to the block + /// producer. + /// + /// The provided [`TransactionCandidate`] is processed in the following stages: + /// 1. Note filtering – all input notes are checked for consumability. Any notes that cannot be + /// executed are returned as [`FailedNote`]s. + /// 2. Execution – the remaining notes are executed against the account state. + /// 3. Proving – a proof is generated for the executed transaction. + /// 4. Submission – the proven transaction is submitted to the block producer. + /// + /// # Returns + /// + /// On success, returns the [`TransactionId`] of the executed transaction and a list of + /// [`FailedNote`]s representing notes that were filtered out before execution. + /// + /// # Errors + /// + /// Returns an [`NtxError`] if any step of the pipeline fails, including: + /// - Note filtering (e.g., all notes fail consumability checks). + /// - Transaction execution. + /// - Proof generation. + /// - Submission to the network. + #[instrument(target = COMPONENT, name = "ntx.execute_transaction", skip_all, err)] + pub fn execute_transaction( + self, + tx: TransactionCandidate, + ) -> impl FutureMaybeSend)>> { + let TransactionCandidate { + account, + notes, + chain_tip_header, + chain_mmr, + } = tx; + tracing::Span::current().set_attribute("account.id", account.id()); + tracing::Span::current() + .set_attribute("account.id.network_prefix", account.id().prefix().to_string().as_str()); + tracing::Span::current().set_attribute("notes.count", notes.len()); + tracing::Span::current() + .set_attribute("reference_block.number", chain_tip_header.block_num()); + + async move { + Box::pin(async move { + let data_store = NtxDataStore::new( + account, + chain_tip_header, + chain_mmr, + self.store.clone(), + self.script_cache.clone(), + ); + + // Filter notes. + let notes = notes.into_iter().map(Note::from).collect::>(); + let (successful_notes, failed_notes) = + self.filter_notes(&data_store, notes).await?; + + // Execute transaction. + let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; + + // Prove transaction. + let tx_inputs: TransactionInputs = executed_tx.into(); + let proven_tx = Box::pin(self.prove(&tx_inputs)).await?; + + // Validate proven transaction. + self.validate(&proven_tx, &tx_inputs).await?; + + // Submit transaction to block producer. + self.submit(&proven_tx).await?; + + Ok((proven_tx.id(), failed_notes)) + }) + .in_current_span() + .await + .inspect_err(|err| tracing::Span::current().set_error(err)) + } + } + + /// Filters a collection of notes, returning only those that can be successfully executed + /// against the given network account. + /// + /// This function performs a consumability check on each provided note and partitions them into + /// two sets: + /// - Successful notes: notes that can be executed and are returned wrapped in [`InputNotes`]. + /// - Failed notes: notes that cannot be executed. + /// + /// # Guarantees + /// + /// - On success, the returned [`InputNotes`] set is guaranteed to be non-empty. + /// - The original ordering of notes is not preserved if any notes have failed. + /// + /// # Errors + /// + /// Returns an [`NtxError`] if: + /// - The consumability check fails unexpectedly. + /// - All notes fail the check (i.e., no note is consumable). + #[instrument(target = COMPONENT, name = "ntx.execute_transaction.filter_notes", skip_all, err)] + async fn filter_notes( + &self, + data_store: &NtxDataStore, + notes: Vec, + ) -> NtxResult<(InputNotes, Vec)> { + let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = + TransactionExecutor::new(data_store); + let checker = NoteConsumptionChecker::new(&executor); + + match Box::pin(checker.check_notes_consumability( + data_store.account.id(), + data_store.reference_block.block_num(), + notes, + TransactionArgs::default(), + )) + .await + { + Ok(NoteConsumptionInfo { successful, failed, .. }) => { + // Map successful notes to input notes. + let successful = InputNotes::from_unauthenticated_notes(successful) + .map_err(NtxError::InputNotes)?; + + // If none are successful, abort. + if successful.is_empty() { + return Err(NtxError::AllNotesFailed(failed)); + } + + Ok((successful, failed)) + }, + Err(err) => return Err(NtxError::NoteFilter(err)), + } + } + + /// Creates an executes a transaction with the network account and the given set of notes. + #[instrument(target = COMPONENT, name = "ntx.execute_transaction.execute", skip_all, err)] + async fn execute( + &self, + data_store: &NtxDataStore, + notes: InputNotes, + ) -> NtxResult { + let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = + TransactionExecutor::new(data_store); + + Box::pin(executor.execute_transaction( + data_store.account.id(), + data_store.reference_block.block_num(), + notes, + TransactionArgs::default(), + )) + .await + .map_err(NtxError::Execution) + } + + /// Delegates the transaction proof to the remote prover if configured, otherwise performs the + /// proof locally. + #[instrument(target = COMPONENT, name = "ntx.execute_transaction.prove", skip_all, err)] + async fn prove(&self, tx_inputs: &TransactionInputs) -> NtxResult { + if let Some(remote) = &self.prover { + remote.prove(tx_inputs).await + } else { + // Only perform tx inptus clone for local proving. + let tx_inputs = tx_inputs.clone(); + tokio::task::spawn_blocking(move || LocalTransactionProver::default().prove(tx_inputs)) + .await + .map_err(NtxError::Panic)? + } + .map_err(NtxError::Proving) + } + + /// Submits the transaction to the block producer. + #[instrument(target = COMPONENT, name = "ntx.execute_transaction.submit", skip_all, err)] + async fn submit(&self, proven_tx: &ProvenTransaction) -> NtxResult<()> { + self.block_producer + .submit_proven_transaction(proven_tx) + .await + .map_err(NtxError::Submission) + } + + /// Validates the transaction against the Validator. + #[instrument(target = COMPONENT, name = "ntx.execute_transaction.validate", skip_all, err)] + async fn validate( + &self, + proven_tx: &ProvenTransaction, + tx_inputs: &TransactionInputs, + ) -> NtxResult<()> { + let request = proto::transaction::ProvenTransaction { + transaction: proven_tx.to_bytes(), + transaction_inputs: Some(tx_inputs.to_bytes()), + }; + self.validator + .clone() + .submit_proven_transaction(request) + .await + .map_err(NtxError::Submission)?; + Ok(()) + } +} + +// NETWORK TRANSACTION DATA STORE +// ================================================================================================ + +/// A [`DataStore`] implementation which provides transaction inputs for a single account and +/// reference block with LRU caching for note scripts. +/// +/// This implementation includes an LRU (Least Recently Used) cache for note scripts to improve +/// performance by avoiding repeated RPC calls for the same script roots. The cache automatically +/// manages memory usage by evicting least recently used entries when the cache reaches capacity. +/// +/// This is sufficient for executing a network transaction. +struct NtxDataStore { + account: Account, + reference_block: BlockHeader, + chain_mmr: PartialBlockchain, + mast_store: TransactionMastStore, + /// Store client for retrieving note scripts. + store: StoreClient, + /// LRU cache for storing retrieved note scripts to avoid repeated store calls. + script_cache: LruCache, + /// Mapping of storage map roots to storage slot names observed during various calls. + /// + /// The registered slot names are subsequently used to retrieve storage map witnesses from the + /// store. We need this because the store interface (and the underling SMT forest) use storage + /// slot names, but the `DataStore` interface works with tree roots. To get around this problem + /// we populate this map when: + /// - The the native account is loaded (in `get_transaction_inputs()`). + /// - When a foreign account is loaded (in `get_foreign_account_inputs`). + /// + /// The assumption here are: + /// - Once an account is loaded, the mapping between `(account_id, map_root)` and slot names do + /// not change. This is always the case. + /// - New storage slots created during transaction execution will not be accesses in the same + /// transaction. The mechanism for adding new storage slots is not implemented yet, but the + /// plan for it is consistent with this assumption. + /// + /// One nuance worth mentioning: it is possible that there could be a root collision where an + /// account has two storage maps with the same root. In this case, the map will contain only a + /// single entry with the storage slot name that was added last. Thus, technically, requests + /// to the store could be "wrong", but given that two identical maps have identical witnesses + /// this does not cause issues in practice. + storage_slots: Arc>>, +} + +impl NtxDataStore { + /// Creates a new `NtxDataStore` with default cache size. + fn new( + account: Account, + reference_block: BlockHeader, + chain_mmr: PartialBlockchain, + store: StoreClient, + script_cache: LruCache, + ) -> Self { + let mast_store = TransactionMastStore::new(); + mast_store.load_account_code(account.code()); + + Self { + account, + reference_block, + chain_mmr, + mast_store, + store, + script_cache, + storage_slots: Arc::new(Mutex::new(BTreeMap::default())), + } + } + + /// Registers storage map slot names for the given account ID and storage header. + /// + /// These slot names are subsequently used to query for storage map witnesses against the store. + async fn register_storage_map_slots( + &self, + account_id: AccountId, + storage_header: &AccountStorageHeader, + ) { + let mut storage_slots = self.storage_slots.lock().await; + for slot_header in storage_header.slots() { + if let StorageSlotType::Map = slot_header.slot_type() { + storage_slots.insert((account_id, slot_header.value()), slot_header.name().clone()); + } + } + } +} + +impl DataStore for NtxDataStore { + fn get_transaction_inputs( + &self, + account_id: AccountId, + ref_blocks: BTreeSet, + ) -> impl FutureMaybeSend> + { + async move { + if self.account.id() != account_id { + return Err(DataStoreError::AccountNotFound(account_id)); + } + + // The latest supplied reference block must match the current reference block. + match ref_blocks.last().copied() { + Some(reference) if reference == self.reference_block.block_num() => {}, + Some(other) => return Err(DataStoreError::BlockNotFound(other)), + None => return Err(DataStoreError::other("no reference block requested")), + } + + // Register slot names from the native account for later use. + self.register_storage_map_slots(account_id, &self.account.storage().to_header()) + .await; + + let partial_account = PartialAccount::from(&self.account); + Ok((partial_account, self.reference_block.clone(), self.chain_mmr.clone())) + } + } + + fn get_foreign_account_inputs( + &self, + foreign_account_id: AccountId, + ref_block: BlockNumber, + ) -> impl FutureMaybeSend> { + async move { + debug_assert_eq!(ref_block, self.reference_block.block_num()); + + // Get foreign account inputs from store. + let account_inputs = + self.store.get_account_inputs(foreign_account_id, ref_block).await.map_err( + |err| DataStoreError::other_with_source("failed to get account inputs", err), + )?; + + // Register slot names from the foreign account for later use. + self.register_storage_map_slots(foreign_account_id, account_inputs.storage().header()) + .await; + + Ok(account_inputs) + } + } + + fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + _vault_root: Word, + vault_keys: BTreeSet, + ) -> impl FutureMaybeSend, DataStoreError>> { + async move { + let ref_block = self.reference_block.block_num(); + + // Get vault asset witnesses from the store. + let witnesses = self + .store + .get_vault_asset_witnesses(account_id, vault_keys, Some(ref_block)) + .await + .map_err(|err| { + DataStoreError::other_with_source("failed to get vault asset witnesses", err) + })?; + + Ok(witnesses) + } + } + + fn get_storage_map_witness( + &self, + account_id: AccountId, + map_root: Word, + map_key: Word, + ) -> impl FutureMaybeSend> { + async move { + // The slot name that corresponds to the given account ID and map root must have been + // registered during previous calls of this data store. + let storage_slots = self.storage_slots.lock().await; + let Some(slot_name) = storage_slots.get(&(account_id, map_root)) else { + return Err(DataStoreError::other( + "requested storage slot has not been registered", + )); + }; + + let ref_block = self.reference_block.block_num(); + + // Get storage map witness from the store. + let witness = self + .store + .get_storage_map_witness(account_id, slot_name.clone(), map_key, Some(ref_block)) + .await + .map_err(|err| { + DataStoreError::other_with_source("failed to get storage map witness", err) + })?; + + Ok(witness) + } + } + + /// Retrieves a note script by its root hash. + /// + /// This implementation uses the configured RPC client to call the `GetNoteScriptByRoot` + /// endpoint on the RPC server. + fn get_note_script( + &self, + script_root: Word, + ) -> impl FutureMaybeSend, DataStoreError>> { + async move { + // Attempt to retrieve the script from the cache. + if let Some(cached_script) = self.script_cache.get(&script_root).await { + return Ok(Some(cached_script)); + } + + // Retrieve the script from the store. + let maybe_script = + self.store.get_note_script_by_root(script_root).await.map_err(|err| { + DataStoreError::Other { + error_msg: "failed to retrieve note script from store".to_string().into(), + source: Some(err.into()), + } + })?; + // Handle response. + if let Some(script) = maybe_script { + self.script_cache.put(script_root, script.clone()).await; + Ok(Some(script)) + } else { + Ok(None) + } + } + } +} + +impl MastForestStore for NtxDataStore { + fn get( + &self, + procedure_hash: &miden_protocol::Word, + ) -> Option> { + self.mast_store.get(procedure_hash) + } +} diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs new file mode 100644 index 0000000000..23c7d06d72 --- /dev/null +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -0,0 +1,66 @@ +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Note; + +use crate::actor::has_backoff_passed; + +// INFLIGHT NETWORK NOTE +// ================================================================================================ + +/// An unconsumed network note that may have failed to execute. +/// +/// The block number at which the network note was attempted are approximate and may not +/// reflect the exact block number for which the execution attempt failed. The actual block +/// will likely be soon after the number that is recorded here. +#[derive(Debug, Clone)] +pub struct InflightNetworkNote { + note: SingleTargetNetworkNote, + attempt_count: usize, + last_attempt: Option, +} + +impl InflightNetworkNote { + /// Creates a new inflight network note. + pub fn new(note: SingleTargetNetworkNote) -> Self { + Self { + note, + attempt_count: 0, + last_attempt: None, + } + } + + /// Consumes the inflight network note and returns the inner network note. + pub fn into_inner(self) -> SingleTargetNetworkNote { + self.note + } + + /// Returns a reference to the inner network note. + pub fn to_inner(&self) -> &SingleTargetNetworkNote { + &self.note + } + + /// Returns the number of attempts made to execute the network note. + pub fn attempt_count(&self) -> usize { + self.attempt_count + } + + /// Checks if the network note is available for execution. + /// + /// The note is available if the backoff period has passed. + pub fn is_available(&self, block_num: BlockNumber) -> bool { + self.note.can_be_consumed(block_num).unwrap_or(true) + && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) + } + + /// Registers a failed attempt to execute the network note at the specified block number. + pub fn fail(&mut self, block_num: BlockNumber) { + self.last_attempt = Some(block_num); + self.attempt_count += 1; + } +} + +impl From for Note { + fn from(value: InflightNetworkNote) -> Self { + value.into_inner().into() + } +} diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs new file mode 100644 index 0000000000..ae8f63629e --- /dev/null +++ b/crates/ntx-builder/src/actor/mod.rs @@ -0,0 +1,356 @@ +pub mod account_state; +mod execute; +mod inflight_note; +mod note_state; + +use std::sync::Arc; +use std::time::Duration; + +use account_state::{NetworkAccountState, TransactionCandidate}; +use futures::FutureExt; +use miden_node_proto::clients::{Builder, ValidatorClient}; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::ErrorReport; +use miden_node_utils::lru_cache::LruCache; +use miden_protocol::Word; +use miden_protocol::account::{Account, AccountDelta}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::TransactionId; +use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; +use tokio_util::sync::CancellationToken; +use url::Url; + +use crate::block_producer::BlockProducerClient; +use crate::builder::ChainState; +use crate::store::StoreClient; + +// ACTOR SHUTDOWN REASON +// ================================================================================================ + +/// The reason an actor has shut down. +pub enum ActorShutdownReason { + /// Occurs when the transaction that created the actor is reverted. + AccountReverted(NetworkAccountId), + /// Occurs when an account actor detects failure in the messaging channel used by the + /// coordinator. + EventChannelClosed, + /// Occurs when an account actor detects failure in acquiring the rate-limiting semaphore. + SemaphoreFailed(AcquireError), + /// Occurs when an account actor detects its corresponding cancellation token has been triggered + /// by the coordinator. Cancellation tokens are triggered by the coordinator to initiate + /// graceful shutdown of actors. + Cancelled(NetworkAccountId), +} + +// ACCOUNT ACTOR CONFIG +// ================================================================================================ + +/// Contains miscellaneous resources that are required by all account actors. +#[derive(Clone)] +pub struct AccountActorContext { + /// Client for interacting with the store in order to load account state. + pub store: StoreClient, + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + /// Address of the Validator server. + pub validator_url: Url, + /// Address of the remote prover. If `None`, transactions will be proven locally, which is + // undesirable due to the performance impact. + pub tx_prover_url: Option, + /// The latest chain state that account all actors can rely on. A single chain state is shared + /// among all actors. + pub chain_state: Arc>, + /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. + /// This cache is shared across all account actors to maximize cache efficiency. + pub script_cache: LruCache, +} + +// ACCOUNT ORIGIN +// ================================================================================================ + +/// The origin of the account which the actor will use to initialize the account state. +#[derive(Debug)] +pub enum AccountOrigin { + /// Accounts that have just been created by a transaction but have not been committed to the + /// store yet. + Transaction(Box), + /// Accounts that already exist in the store. + Store(NetworkAccountId), +} + +impl AccountOrigin { + /// Returns an [`AccountOrigin::Transaction`] if the account is a network account. + pub fn transaction(delta: &AccountDelta) -> Option { + let account = Account::try_from(delta).ok()?; + if account.is_network() { + Some(AccountOrigin::Transaction(account.clone().into())) + } else { + None + } + } + + /// Returns an [`AccountOrigin::Store`]. + pub fn store(account_id: NetworkAccountId) -> Self { + AccountOrigin::Store(account_id) + } + + /// Returns the [`NetworkAccountId`] of the account. + pub fn id(&self) -> NetworkAccountId { + match self { + AccountOrigin::Transaction(account) => NetworkAccountId::try_from(account.id()) + .expect("actor accounts are always network accounts"), + AccountOrigin::Store(account_id) => *account_id, + } + } +} + +// ACTOR MODE +// ================================================================================================ + +/// The mode of operation that the account actor is currently performing. +#[derive(Debug)] +enum ActorMode { + NoViableNotes, + NotesAvailable, + TransactionInflight(TransactionId), +} + +// ACCOUNT ACTOR +// ================================================================================================ + +/// A long-running asynchronous task that handles the complete lifecycle of network transaction +/// processing. Each actor operates independently and is managed by a single coordinator that +/// spawns, monitors, and messages all actors. +/// +/// ## Core Responsibilities +/// +/// - **State Management**: Loads and maintains the current state of network accounts, including +/// available notes, pending transactions, and account commitments. +/// - **Transaction Selection**: Selects viable notes and constructs a [`TransactionCandidate`] +/// based on current chain state. +/// - **Transaction Execution**: Executes selected transactions using either local or remote +/// proving. +/// - **Mempool Integration**: Listens for mempool events to stay synchronized with the network +/// state and adjust behavior based on transaction confirmations. +/// +/// ## Lifecycle +/// +/// 1. **Initialization**: Loads account state from the store or uses provided account data. +/// 2. **Event Loop**: Continuously processes mempool events and executes transactions. +/// 3. **Transaction Processing**: Selects, executes, and proves transactions, and submits them to +/// block producer. +/// 4. **State Updates**: Updates internal state based on mempool events and execution results. +/// 5. **Shutdown**: Terminates gracefully when cancelled or encounters unrecoverable errors. +/// +/// ## Concurrency +/// +/// Each actor runs in its own async task and communicates with other system components through +/// channels and shared state. The actor uses a cancellation token for graceful shutdown +/// coordination. +pub struct AccountActor { + origin: AccountOrigin, + store: StoreClient, + mode: ActorMode, + event_rx: mpsc::Receiver>, + cancel_token: CancellationToken, + // TODO(sergerad): Remove block producer when block proving moved to store. + block_producer: BlockProducerClient, + validator: ValidatorClient, + prover: Option, + chain_state: Arc>, + script_cache: LruCache, +} + +impl AccountActor { + /// Constructs a new account actor and corresponding messaging channel with the given + /// configuration. + pub fn new( + origin: AccountOrigin, + actor_context: &AccountActorContext, + event_rx: mpsc::Receiver>, + cancel_token: CancellationToken, + ) -> Self { + let block_producer = BlockProducerClient::new(actor_context.block_producer_url.clone()); + let validator = Builder::new(actor_context.validator_url.clone()) + .without_tls() + .with_timeout(Duration::from_secs(10)) + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::(); + let prover = actor_context.tx_prover_url.clone().map(RemoteTransactionProver::new); + Self { + origin, + store: actor_context.store.clone(), + mode: ActorMode::NoViableNotes, + event_rx, + cancel_token, + block_producer, + validator, + prover, + chain_state: actor_context.chain_state.clone(), + script_cache: actor_context.script_cache.clone(), + } + } + + /// Runs the account actor, processing events and managing state until a reason to shutdown is + /// encountered. + pub async fn run(mut self, semaphore: Arc) -> ActorShutdownReason { + // Load the account state from the store and set up the account actor state. + let account = { + match self.origin { + AccountOrigin::Store(account_id) => self + .store + .get_network_account(account_id) + .await + .expect("actor should be able to load account") + .expect("actor account should exist"), + AccountOrigin::Transaction(ref account) => *(account.clone()), + } + }; + let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let mut state = + NetworkAccountState::load(account, self.origin.id(), &self.store, block_num) + .await + .expect("actor should be able to load account state"); + + loop { + // Enable or disable transaction execution based on actor mode. + let tx_permit_acquisition = match self.mode { + // Disable transaction execution. + ActorMode::NoViableNotes | ActorMode::TransactionInflight(_) => { + std::future::pending().boxed() + }, + // Enable transaction execution. + ActorMode::NotesAvailable => semaphore.acquire().boxed(), + }; + tokio::select! { + _ = self.cancel_token.cancelled() => { + return ActorShutdownReason::Cancelled(self.origin.id()); + } + // Handle mempool events. + event = self.event_rx.recv() => { + let Some(event) = event else { + return ActorShutdownReason::EventChannelClosed; + }; + // Re-enable transaction execution if the transaction being waited on has been + // added to the mempool. + if let ActorMode::TransactionInflight(awaited_id) = self.mode { + if let MempoolEvent::TransactionAdded { id, .. } = *event { + if id == awaited_id { + self.mode = ActorMode::NotesAvailable; + } + } + } else { + self.mode = ActorMode::NotesAvailable; + } + // Update state. + if let Some(shutdown_reason) = state.mempool_update(event.as_ref()) { + return shutdown_reason; + } + }, + // Execute transactions. + permit = tx_permit_acquisition => { + match permit { + Ok(_permit) => { + // Read the chain state. + let chain_state = self.chain_state.read().await.clone(); + // Find a candidate transaction and execute it. + if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { + self.execute_transactions(&mut state, tx_candidate).await; + } else { + // No transactions to execute, wait for events. + self.mode = ActorMode::NoViableNotes; + } + } + Err(err) => { + return ActorShutdownReason::SemaphoreFailed(err); + } + } + } + } + } + } + + /// Execute a transaction candidate and mark notes as failed as required. + /// + /// Updates the state of the actor based on the execution result. + #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, state, tx_candidate))] + async fn execute_transactions( + &mut self, + state: &mut NetworkAccountState, + tx_candidate: TransactionCandidate, + ) { + let block_num = tx_candidate.chain_tip_header.block_num(); + + // Execute the selected transaction. + let context = execute::NtxContext::new( + self.block_producer.clone(), + self.validator.clone(), + self.prover.clone(), + self.store.clone(), + self.script_cache.clone(), + ); + + let notes = tx_candidate.notes.clone(); + let execution_result = context.execute_transaction(tx_candidate).await; + match execution_result { + // Execution completed without failed notes. + Ok((tx_id, failed)) if failed.is_empty() => { + self.mode = ActorMode::TransactionInflight(tx_id); + }, + // Execution completed with some failed notes. + Ok((tx_id, failed)) => { + let notes = failed.into_iter().map(|note| note.note).collect::>(); + state.notes_failed(notes.as_slice(), block_num); + self.mode = ActorMode::TransactionInflight(tx_id); + }, + // Transaction execution failed. + Err(err) => { + tracing::error!(err = err.as_report(), "network transaction failed"); + self.mode = ActorMode::NoViableNotes; + let notes = + notes.into_iter().map(|note| note.into_inner().into()).collect::>(); + state.notes_failed(notes.as_slice(), block_num); + }, + } + } +} + +// HELPERS +// ================================================================================================ + +/// Checks if the backoff block period has passed. +/// +/// The number of blocks passed since the last attempt must be greater than or equal to +/// e^(0.25 * `attempt_count`) rounded to the nearest integer. +/// +/// This evaluates to the following: +/// - After 1 attempt, the backoff period is 1 block. +/// - After 3 attempts, the backoff period is 2 blocks. +/// - After 10 attempts, the backoff period is 12 blocks. +/// - After 20 attempts, the backoff period is 148 blocks. +/// - etc... +#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +fn has_backoff_passed( + chain_tip: BlockNumber, + last_attempt: Option, + attempts: usize, +) -> bool { + if attempts == 0 { + return true; + } + // Compute the number of blocks passed since the last attempt. + let blocks_passed = last_attempt + .and_then(|last| chain_tip.checked_sub(last.as_u32())) + .unwrap_or_default(); + + // Compute the exponential backoff threshold: Δ = e^(0.25 * n). + let backoff_threshold = (0.25 * attempts as f64).exp().round() as usize; + + // Check if the backoff period has passed. + blocks_passed.as_usize() > backoff_threshold +} diff --git a/crates/ntx-builder/src/state/account.rs b/crates/ntx-builder/src/actor/note_state.rs similarity index 63% rename from crates/ntx-builder/src/state/account.rs rename to crates/ntx-builder/src/actor/note_state.rs index 56af83b427..87b91fc21a 100644 --- a/crates/ntx-builder/src/state/account.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -1,83 +1,20 @@ use std::collections::{HashMap, VecDeque}; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::note::SingleTargetNetworkNote; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{Account, AccountDelta, AccountId}; -use miden_objects::block::BlockNumber; -use miden_objects::note::{Note, Nullifier}; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta, AccountId}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; -// INFLIGHT NETWORK NOTE -// ================================================================================================ - -/// An unconsumed network note that may have failed to execute. -/// -/// The block number at which the network note was attempted are approximate and may not -/// reflect the exact block number for which the execution attempt failed. The actual block -/// will likely be soon after the number that is recorded here. -#[derive(Debug, Clone)] -pub struct InflightNetworkNote { - note: SingleTargetNetworkNote, - attempt_count: usize, - last_attempt: Option, -} - -impl InflightNetworkNote { - /// Creates a new inflight network note. - pub fn new(note: SingleTargetNetworkNote) -> Self { - Self { - note, - attempt_count: 0, - last_attempt: None, - } - } - - /// Consumes the inflight network note and returns the inner network note. - pub fn into_inner(self) -> SingleTargetNetworkNote { - self.note - } - - /// Returns a reference to the inner network note. - pub fn to_inner(&self) -> &SingleTargetNetworkNote { - &self.note - } - - /// Returns the number of attempts made to execute the network note. - pub fn attempt_count(&self) -> usize { - self.attempt_count - } - - /// Checks if the network note is available for execution. - /// - /// The note is available if it can be consumed and the backoff period has passed. - pub fn is_available(&self, block_num: BlockNumber) -> bool { - let can_consume = self - .to_inner() - .metadata() - .execution_hint() - .can_be_consumed(block_num) - .unwrap_or(true); - can_consume && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) - } - - /// Registers a failed attempt to execute the network note at the specified block number. - pub fn fail(&mut self, block_num: BlockNumber) { - self.last_attempt = Some(block_num); - self.attempt_count += 1; - } -} - -impl From for Note { - fn from(value: InflightNetworkNote) -> Self { - value.into_inner().into() - } -} +use crate::actor::inflight_note::InflightNetworkNote; // ACCOUNT STATE // ================================================================================================ /// Tracks the state of a network account and its notes. -pub struct AccountState { +#[derive(Clone)] +pub struct NetworkAccountNoteState { /// The committed account state, if any. /// /// Its possible this is `None` if the account creation transaction is still inflight. @@ -93,25 +30,29 @@ pub struct AccountState { nullified_notes: HashMap, } -impl AccountState { - /// Creates a new account state using the given value as the committed state. - pub fn from_committed_account(account: Account) -> Self { - Self { +impl NetworkAccountNoteState { + /// Creates a new account state from the supplied account and notes. + pub fn new(account: Account, notes: Vec) -> Self { + let account_id = NetworkAccountId::try_from(account.id()) + .expect("only network accounts are used for account state"); + + let mut state = Self { committed: Some(account), inflight: VecDeque::default(), available_notes: HashMap::default(), nullified_notes: HashMap::default(), - } - } + }; - /// Creates a new account state where the creating transaction is still inflight. - pub fn from_uncommitted_account(account: Account) -> Self { - Self { - inflight: VecDeque::from([account]), - committed: None, - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), + for note in notes { + // Currently only support single target network notes in NTB. + assert!( + note.account_id() == account_id, + "Notes supplied into account state must match expected account ID" + ); + state.add_note(note); } + + state } /// Returns an iterator over inflight notes that are not currently within their respective @@ -197,6 +138,7 @@ impl AccountState { // in case it's transaction wasn't available in the first place. // It shouldn't happen practically, since we skip them if the // relevant account cannot be retrieved via `fetch`. + let _ = self.nullified_notes.remove(&nullifier); } @@ -256,27 +198,27 @@ pub enum NetworkAccountEffect { } impl NetworkAccountEffect { - pub fn from_protocol(update: AccountUpdateDetails) -> Option { + pub fn from_protocol(update: &AccountUpdateDetails) -> Option { let update = match update { AccountUpdateDetails::Private => return None, AccountUpdateDetails::Delta(update) if update.is_full_state() => { NetworkAccountEffect::Created( - Account::try_from(&update) + Account::try_from(update) .expect("Account should be derivable by full state AccountDelta"), ) }, - AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update), + AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), }; - update.account_id().is_network().then_some(update) + update.protocol_account_id().is_network().then_some(update) } - pub fn prefix(&self) -> NetworkAccountPrefix { + pub fn network_account_id(&self) -> NetworkAccountId { // SAFETY: This is a network account by construction. - self.account_id().try_into().unwrap() + self.protocol_account_id().try_into().unwrap() } - fn account_id(&self) -> AccountId { + fn protocol_account_id(&self) -> AccountId { match self { NetworkAccountEffect::Created(acc) => acc.id(), NetworkAccountEffect::Updated(delta) => delta.id(), @@ -284,44 +226,9 @@ impl NetworkAccountEffect { } } -// HELPERS -// ================================================================================================ - -/// Checks if the backoff block period has passed. -/// -/// The number of blocks passed since the last attempt must be greater than or equal to -/// e^(0.25 * `attempt_count`) rounded to the nearest integer. -/// -/// This evaluates to the following: -/// - After 1 attempt, the backoff period is 1 block. -/// - After 3 attempts, the backoff period is 2 blocks. -/// - After 10 attempts, the backoff period is 12 blocks. -/// - After 20 attempts, the backoff period is 148 blocks. -/// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] -fn has_backoff_passed( - chain_tip: BlockNumber, - last_attempt: Option, - attempts: usize, -) -> bool { - if attempts == 0 { - return true; - } - // Compute the number of blocks passed since the last attempt. - let blocks_passed = last_attempt - .and_then(|last| chain_tip.checked_sub(last.as_u32())) - .unwrap_or_default(); - - // Compute the exponential backoff threshold: Δ = e^(0.25 * n). - let backoff_threshold = (0.25 * attempts as f64).exp().round() as usize; - - // Check if the backoff period has passed. - blocks_passed.as_usize() > backoff_threshold -} - #[cfg(test)] mod tests { - use miden_objects::block::BlockNumber; + use miden_protocol::block::BlockNumber; #[rstest::rstest] #[test] @@ -341,9 +248,11 @@ mod tests { #[case] attempt_count: usize, #[case] backoff_should_have_passed: bool, ) { + use crate::actor::has_backoff_passed; + assert_eq!( backoff_should_have_passed, - super::has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) + has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) ); } } diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index a29b61295c..ce4d7b9c6a 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -1,16 +1,12 @@ use std::time::Duration; use futures::{TryStream, TryStreamExt}; -use miden_node_proto::clients::{ - BlockProducer, - BlockProducerClient as InnerBlockProducerClient, - Builder, -}; +use miden_node_proto::clients::{BlockProducerClient as InnerBlockProducerClient, Builder}; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::generated::{self as proto}; use miden_node_utils::FlattenResult; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::ProvenTransaction; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::ProvenTransaction; use miden_tx::utils::Serializable; use tokio_stream::StreamExt; use tonic::Status; @@ -40,18 +36,21 @@ impl BlockProducerClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { client: block_producer } } - #[instrument(target = COMPONENT, name = "block_producer.client.submit_proven_transaction", skip_all, err)] + + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.submit_proven_transaction", skip_all, err)] pub async fn submit_proven_transaction( &self, - proven_tx: ProvenTransaction, + proven_tx: &ProvenTransaction, ) -> Result<(), Status> { let request = proto::transaction::ProvenTransaction { transaction: proven_tx.to_bytes(), - transaction_inputs: None, + transaction_inputs: None, /* Transaction inputs are only required for Validator + * transaction re-execution. */ }; self.client.clone().submit_proven_transaction(request).await?; @@ -59,7 +58,7 @@ impl BlockProducerClient { Ok(()) } - #[instrument(target = COMPONENT, name = "block_producer.client.subscribe_to_mempool", skip_all, err)] + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.subscribe_to_mempool", skip_all, err)] pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, @@ -68,9 +67,9 @@ impl BlockProducerClient { loop { match self.subscribe_to_mempool(chain_tip).await { Err(err) if err.code() == tonic::Code::Unavailable => { - // exponential backoff with base 500ms and max 30s + // Exponential backoff with base 500ms and max 30s. let backoff = Duration::from_millis(500) - .saturating_mul(1 << retry_counter) + .saturating_mul(1 << retry_counter.min(6)) .min(Duration::from_secs(30)); tracing::warn!( diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs new file mode 100644 index 0000000000..8b789779f7 --- /dev/null +++ b/crates/ntx-builder/src/builder.rs @@ -0,0 +1,294 @@ +use std::num::NonZeroUsize; +use std::sync::Arc; + +use anyhow::Context; +use futures::TryStreamExt; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::lru_cache::LruCache; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::merkle::mmr::PartialMmr; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::PartialBlockchain; +use tokio::sync::{RwLock, mpsc}; +use url::Url; + +use crate::MAX_IN_PROGRESS_TXS; +use crate::actor::{AccountActorContext, AccountOrigin}; +use crate::block_producer::BlockProducerClient; +use crate::coordinator::Coordinator; +use crate::store::StoreClient; + +// CONSTANTS +// ================================================================================================= + +/// The maximum number of blocks to keep in memory while tracking the chain tip. +const MAX_BLOCK_COUNT: usize = 4; + +// CHAIN STATE +// ================================================================================================ + +/// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and +/// all account actors managed by the [`Coordinator`] +#[derive(Debug, Clone)] +pub struct ChainState { + /// The current tip of the chain. + pub chain_tip_header: BlockHeader, + /// A partial representation of the latest state of the chain. + pub chain_mmr: PartialBlockchain, +} + +impl ChainState { + /// Constructs a new instance of [`ChainState`]. + fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + let chain_mmr = PartialBlockchain::new(chain_mmr, []) + .expect("partial blockchain should build from partial mmr"); + Self { chain_tip_header, chain_mmr } + } + + /// Consumes the chain state and returns the chain tip header and the partial blockchain as a + /// tuple. + pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + (self.chain_tip_header, self.chain_mmr) + } +} + +// NETWORK TRANSACTION BUILDER +// ================================================================================================ + +/// Network transaction builder component. +/// +/// The network transaction builder is in in charge of building transactions that consume notes +/// against network accounts. These notes are identified and communicated by the block producer. +/// The service maintains a list of unconsumed notes and periodically executes and proves +/// transactions that consume them (reaching out to the store to retrieve state as necessary). +/// +/// The builder manages the tasks for every network account on the chain through the coordinator. +pub struct NetworkTransactionBuilder { + /// Address of the store gRPC server. + store_url: Url, + /// Address of the block producer gRPC server. + block_producer_url: Url, + /// Address of the Validator server. + validator_url: Url, + /// Address of the remote prover. If `None`, transactions will be proven locally, which is + /// undesirable due to the performance impact. + tx_prover_url: Option, + /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. + /// This cache is shared across all account actors. + script_cache: LruCache, + /// Coordinator for managing actor tasks. + coordinator: Coordinator, +} + +impl NetworkTransactionBuilder { + /// Channel capacity for account loading. + const ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; + + /// Creates a new instance of the network transaction builder. + pub fn new( + store_url: Url, + block_producer_url: Url, + validator_url: Url, + tx_prover_url: Option, + script_cache_size: NonZeroUsize, + ) -> Self { + let script_cache = LruCache::new(script_cache_size); + let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); + Self { + store_url, + block_producer_url, + validator_url, + tx_prover_url, + script_cache, + coordinator, + } + } + + /// Runs the network transaction builder until a fatal error occurs. + pub async fn run(mut self) -> anyhow::Result<()> { + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + // Loop until we successfully subscribe. + // + // The mempool rejects our subscription if we don't have the same view of the chain aka + // if our chain tip does not match the mempools. This can occur if a new block is committed + // _after_ we fetch the chain tip from the store but _before_ our subscription request is + // handled. + // + // This is a hack-around for https://github.com/0xMiden/miden-node/issues/1566. + let (chain_tip_header, chain_mmr, mut mempool_events) = loop { + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .expect("store should contain a latest block"); + + match block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + { + Ok(subscription) => break (chain_tip_header, chain_mmr, subscription), + Err(status) if status.code() == tonic::Code::InvalidArgument => { + tracing::error!(err=%status, "mempool subscription failed due to desync, trying again"); + }, + Err(err) => return Err(err).context("failed to subscribe to mempool events"), + } + }; + + // Create chain state that will be updated by the coordinator and read by actors. + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + validator_url: self.validator_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache: self.script_cache.clone(), + }; + + // Spawn a background task to load network accounts from the store. + // Accounts are sent through a channel in batches and processed in the main event loop. + let (account_tx, mut account_rx) = + mpsc::channel::(Self::ACCOUNT_CHANNEL_CAPACITY); + let account_loader_store = store.clone(); + let mut account_loader_handle = tokio::spawn(async move { + account_loader_store + .stream_network_account_ids(account_tx) + .await + .context("failed to load network accounts from store") + }); + + // Main loop which manages actors and passes mempool events to them. + loop { + tokio::select! { + // Handle actor result. + result = self.coordinator.next() => { + result?; + }, + // Handle mempool events. + event = mempool_events.try_next() => { + let event = event + .context("mempool event stream ended")? + .context("mempool event stream failed")?; + + self.handle_mempool_event( + event.into(), + &actor_context, + chain_state.clone(), + ).await?; + }, + // Handle account batches loaded from the store. + // Once all accounts are loaded, the channel closes and this branch + // becomes inactive (recv returns None and we stop matching). + Some(account_id) = account_rx.recv() => { + self.handle_loaded_account(account_id, &actor_context).await?; + }, + // Handle account loader task completion/failure. + // If the task fails, we abort since the builder would be in a degraded state + // where existing notes against network accounts won't be processed. + result = &mut account_loader_handle => { + result + .context("account loader task panicked") + .flatten()?; + + tracing::info!("account loading from store completed"); + account_loader_handle = tokio::spawn(std::future::pending()); + }, + } + } + } + + /// Handles a batch of account IDs loaded from the store by spawning actors for them. + #[tracing::instrument( + name = "ntx.builder.handle_loaded_accounts", + skip(self, account_id, actor_context) + )] + async fn handle_loaded_account( + &mut self, + account_id: NetworkAccountId, + actor_context: &AccountActorContext, + ) -> Result<(), anyhow::Error> { + self.coordinator + .spawn_actor(AccountOrigin::store(account_id), actor_context) + .await?; + Ok(()) + } + + /// Handles mempool events by sending them to actors via the coordinator and/or spawning new + /// actors as required. + #[tracing::instrument( + name = "ntx.builder.handle_mempool_event", + skip(self, event, actor_context, chain_state) + )] + async fn handle_mempool_event( + &mut self, + event: Arc, + actor_context: &AccountActorContext, + chain_state: Arc>, + ) -> Result<(), anyhow::Error> { + match event.as_ref() { + MempoolEvent::TransactionAdded { account_delta, .. } => { + // Handle account deltas in case an account is being created. + if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { + // Handle account deltas for network accounts only. + if let Some(network_account) = AccountOrigin::transaction(delta) { + // Spawn new actors if a transaction creates a new network account + let is_creating_account = delta.is_full_state(); + if is_creating_account { + self.coordinator.spawn_actor(network_account, actor_context).await?; + } + } + } + self.coordinator.send_targeted(&event).await?; + Ok(()) + }, + // Update chain state and broadcast. + MempoolEvent::BlockCommitted { header, txs } => { + self.update_chain_tip(header.as_ref().clone(), chain_state).await; + self.coordinator.broadcast(event.clone()).await; + + // All transactions pertaining to predating events should now be available through + // the store. So we can now drain them. + for tx_id in txs { + self.coordinator.drain_predating_events(tx_id); + } + Ok(()) + }, + // Broadcast to all actors. + MempoolEvent::TransactionsReverted(txs) => { + self.coordinator.broadcast(event.clone()).await; + + // Reverted predating transactions need not be processed. + for tx_id in txs { + self.coordinator.drain_predating_events(tx_id); + } + Ok(()) + }, + } + } + + /// Updates the chain tip and MMR block count. + /// + /// Blocks in the MMR are pruned if the block count exceeds the maximum. + async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { + // Lock the chain state. + let mut chain_state = chain_state.write().await; + + // Update MMR which lags by one block. + let mmr_tip = chain_state.chain_tip_header.clone(); + chain_state.chain_mmr.add_block(&mmr_tip, true); + + // Set the new tip. + chain_state.chain_tip_header = tip; + + // Keep MMR pruned. + let pruned_block_height = + (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) + as u32; + chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + } +} diff --git a/crates/ntx-builder/src/builder/mod.rs b/crates/ntx-builder/src/builder/mod.rs deleted file mode 100644 index ac65f63efb..0000000000 --- a/crates/ntx-builder/src/builder/mod.rs +++ /dev/null @@ -1,199 +0,0 @@ -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Context; -use futures::TryStreamExt; -use miden_node_proto::domain::account::NetworkAccountPrefix; -use miden_node_utils::ErrorReport; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; -use tokio::sync::Barrier; -use tokio::time; -use url::Url; - -use crate::MAX_IN_PROGRESS_TXS; -use crate::block_producer::BlockProducerClient; -use crate::store::StoreClient; - -// NETWORK TRANSACTION BUILDER -// ================================================================================================ - -/// Network transaction builder component. -/// -/// The network transaction builder is in in charge of building transactions that consume notes -/// against network accounts. These notes are identified and communicated by the block producer. -/// The service maintains a list of unconsumed notes and periodically executes and proves -/// transactions that consume them (reaching out to the store to retrieve state as necessary). -pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the perofmrance impact. - tx_prover_url: Option, - /// Interval for checking pending notes and executing network transactions. - ticker_interval: Duration, - /// A checkpoint used to sync start-up process with the block-producer. - /// - /// This informs the block-producer when we have subscribed to mempool events and that it is - /// safe to begin block-production. - bp_checkpoint: Arc, -} - -impl NetworkTransactionBuilder { - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - tx_prover_url: Option, - ticker_interval: Duration, - bp_checkpoint: Arc, - ) -> Self { - Self { - store_url, - block_producer_url, - tx_prover_url, - ticker_interval, - bp_checkpoint, - } - } - - pub async fn serve_new(self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url); - let block_producer = BlockProducerClient::new(self.block_producer_url); - - let mut state = crate::state::State::load(store.clone()) - .await - .context("failed to load ntx state")?; - - let mut mempool_events = block_producer - .subscribe_to_mempool_with_retry(state.chain_tip()) - .await - .context("failed to subscribe to mempool events")?; - - // Unlock the block-producer's block production. The block-producer is prevented from - // producing blocks until we have subscribed to mempool events. - // - // This is a temporary work-around until the ntb can resync on the fly. - self.bp_checkpoint.wait().await; - - let prover = self.tx_prover_url.map(RemoteTransactionProver::new); - - let mut interval = tokio::time::interval(self.ticker_interval); - interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - - // Tracks network transaction tasks until they are submitted to the mempool. - // - // We also map the task ID to the network account so we can mark it as failed if it doesn't - // get submitted. - let mut inflight = JoinSet::new(); - let mut inflight_idx = HashMap::new(); - - let context = crate::transaction::NtxContext { - block_producer: block_producer.clone(), - prover, - store, - }; - - loop { - tokio::select! { - _next = interval.tick() => { - if inflight.len() > MAX_IN_PROGRESS_TXS { - tracing::info!("At maximum network tx capacity, skipping"); - continue; - } - - let Some(candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX) else { - tracing::debug!("No candidate network transaction available"); - continue; - }; - - let network_account_prefix = NetworkAccountPrefix::try_from(candidate.account.id()) - .expect("all accounts managed by NTB are network accounts"); - let indexed_candidate = (network_account_prefix, candidate.chain_tip_header.block_num()); - let task_id = inflight.spawn({ - let context = context.clone(); - context.execute_transaction(candidate) - }).id(); - - // SAFETY: This is definitely a network account. - inflight_idx.insert(task_id, indexed_candidate); - }, - event = mempool_events.try_next() => { - let event = event - .context("mempool event stream ended")? - .context("mempool event stream failed")?; - state.mempool_update(event).await.context("failed to update state")?; - }, - completed = inflight.join_next_with_id() => { - // Grab the task ID and associated network account reference. - let task_id = match &completed { - Ok((task_id, _)) => *task_id, - Err(join_handle) => join_handle.id(), - }; - // SAFETY: both inflights should have the same set. - let (candidate, block_num) = inflight_idx.remove(&task_id).unwrap(); - - match completed { - // Some notes failed. - Ok((_, Ok(failed))) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(candidate, notes.as_slice(), block_num); - }, - // Transaction execution failed. - Ok((_, Err((notes, err)))) => { - tracing::warn!(err=err.as_report(), "network transaction failed"); - // Always mark notes as failed. They can get retried eventually. - state.notes_failed(candidate, notes.as_slice(), block_num); - - state.candidate_failed(candidate); - }, - // Unexpected error occurred. - Err(err) => { - tracing::warn!(err=err.as_report(), "network transaction panicked"); - state.candidate_failed(candidate); - } - } - } - } - } - } -} - -/// A wrapper arounnd tokio's [`JoinSet`](tokio::task::JoinSet) which returns pending instead of -/// [`None`] if its empty. -/// -/// This makes it much more convenient to use in a `select!`. -struct JoinSet(tokio::task::JoinSet); - -impl JoinSet -where - T: 'static, -{ - fn new() -> Self { - Self(tokio::task::JoinSet::new()) - } - - fn spawn(&mut self, task: F) -> tokio::task::AbortHandle - where - F: Future, - F: Send + 'static, - T: Send, - { - self.0.spawn(task) - } - - async fn join_next_with_id(&mut self) -> Result<(tokio::task::Id, T), tokio::task::JoinError> { - if self.0.is_empty() { - std::future::pending().await - } else { - // Cannot be None as its not empty. - self.0.join_next_with_id().await.unwrap() - } - } - - fn len(&self) -> usize { - self.0.len() - } -} diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs new file mode 100644 index 0000000000..285cee47af --- /dev/null +++ b/crates/ntx-builder/src/coordinator.rs @@ -0,0 +1,292 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use anyhow::Context; +use indexmap::IndexMap; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_proto::domain::note::NetworkNote; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::transaction::TransactionId; +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{Semaphore, mpsc}; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; + +use crate::actor::{AccountActor, AccountActorContext, AccountOrigin, ActorShutdownReason}; + +// ACTOR HANDLE +// ================================================================================================ + +/// Handle to account actors that are spawned by the coordinator. +#[derive(Clone)] +struct ActorHandle { + event_tx: mpsc::Sender>, + cancel_token: CancellationToken, +} + +impl ActorHandle { + fn new(event_tx: mpsc::Sender>, cancel_token: CancellationToken) -> Self { + Self { event_tx, cancel_token } + } +} + +// COORDINATOR +// ================================================================================================ + +/// Coordinator for managing [`AccountActor`] instances, tasks, and associated communication. +/// +/// The `Coordinator` is the central orchestrator of the network transaction builder system. +/// It manages the lifecycle of account actors. Each actor is responsible for handling transactions +/// for a specific network account. The coordinator provides the following core +/// functionality: +/// +/// ## Actor Management +/// - Spawns new [`AccountActor`] instances for network accounts as needed. +/// - Maintains a registry of active actors with their communication channels. +/// - Gracefully handles actor shutdown and cleanup when actors complete or fail. +/// - Monitors actor tasks through a join set to detect completion or errors. +/// +/// ## Event Broadcasting +/// - Distributes mempool events to all account actors. +/// - Handles communication failures by canceling disconnected actors. +/// - Maintains reliable message delivery through dedicated channels per actor. +/// +/// ## Resource Management +/// - Controls transaction concurrency across all network accounts using a semaphore. +/// - Prevents resource exhaustion by limiting simultaneous transaction processing. +/// +/// The coordinator operates in an event-driven manner: +/// 1. Network accounts are registered and actors spawned as needed. +/// 2. Mempool events are broadcast to all active actors. +/// 3. Actor completion/failure events are monitored and handled. +/// 4. Failed or completed actors are cleaned up from the registry. +pub struct Coordinator { + /// Mapping of network account IDs to their respective message channels and cancellation + /// tokens. + /// + /// This registry serves as the primary directory for communicating with active account actors. + /// When actors are spawned, they register their communication channel here. When events need + /// to be broadcast, this registry is used to locate the appropriate actors. The registry is + /// automatically cleaned up when actors complete their execution. + actor_registry: HashMap, + + /// Join set for managing actor tasks and monitoring their completion status. + /// + /// This join set allows the coordinator to wait for actor task completion and handle + /// different shutdown scenarios. When an actor task completes (either successfully or + /// due to an error), the corresponding entry is removed from the actor registry. + actor_join_set: JoinSet, + + /// Semaphore for controlling the maximum number of concurrent transactions across all network + /// accounts. + /// + /// This shared semaphore prevents the system from becoming overwhelmed by limiting the total + /// number of transactions that can be processed simultaneously across all account actors. + /// Each actor must acquire a permit from this semaphore before processing a transaction, + /// ensuring fair resource allocation and system stability under load. + semaphore: Arc, + + /// Cache of events received from the mempool that predate corresponding network accounts. + /// Grouped by network account ID to allow targeted event delivery to actors upon creation. + predating_events: HashMap>>, +} + +impl Coordinator { + /// Maximum number of messages of the message channel for each actor. + const ACTOR_CHANNEL_SIZE: usize = 100; + + /// Creates a new coordinator with the specified maximum number of inflight transactions + /// and shared script cache. + pub fn new(max_inflight_transactions: usize) -> Self { + Self { + actor_registry: HashMap::new(), + actor_join_set: JoinSet::new(), + semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), + predating_events: HashMap::new(), + } + } + + /// Spawns a new actor to manage the state of the provided network account. + /// + /// This method creates a new [`AccountActor`] instance for the specified account origin + /// and adds it to the coordinator's management system. The actor will be responsible for + /// processing transactions and managing state for the network account. + #[tracing::instrument(name = "ntx.builder.spawn_actor", skip(self, origin, actor_context))] + pub async fn spawn_actor( + &mut self, + origin: AccountOrigin, + actor_context: &AccountActorContext, + ) -> Result<(), SendError>> { + let account_id = origin.id(); + + // If an actor already exists for this account ID, something has gone wrong. + if let Some(handle) = self.actor_registry.remove(&account_id) { + tracing::error!("account actor already exists for account: {}", account_id); + handle.cancel_token.cancel(); + } + + let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let cancel_token = tokio_util::sync::CancellationToken::new(); + let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); + let handle = ActorHandle::new(event_tx, cancel_token); + + // Run the actor. + let semaphore = self.semaphore.clone(); + self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); + + // Send the new actor any events that contain notes that predate account creation. + if let Some(predating_events) = self.predating_events.remove(&account_id) { + for event in predating_events.values() { + Self::send(&handle, event.clone()).await?; + } + } + + self.actor_registry.insert(account_id, handle); + tracing::info!("created actor for account: {}", account_id); + Ok(()) + } + + /// Broadcasts a mempool event to all active account actors. + /// + /// This method distributes the provided event to every actor currently registered + /// with the coordinator. Each actor will receive the event through its dedicated + /// message channel and can process it accordingly. + /// + /// If an actor fails to receive the event, it will be canceled. + pub async fn broadcast(&mut self, event: Arc) { + tracing::debug!( + actor_count = self.actor_registry.len(), + "broadcasting event to all actors" + ); + + let mut failed_actors = Vec::new(); + + // Send event to all actors. + for (account_id, handle) in &self.actor_registry { + if let Err(err) = Self::send(handle, event.clone()).await { + tracing::error!("failed to send event to actor {}: {}", account_id, err); + failed_actors.push(*account_id); + } + } + // Remove failed actors from registry and cancel them. + for account_id in failed_actors { + let handle = + self.actor_registry.remove(&account_id).expect("actor found in send loop above"); + handle.cancel_token.cancel(); + } + } + + /// Waits for the next actor to complete and processes the shutdown reason. + /// + /// This method monitors the join set for actor task completion and handles + /// different shutdown scenarios appropriately. It's designed to be called + /// in a loop to continuously monitor and manage actor lifecycles. + /// + /// If no actors are currently running, this method will wait indefinitely until + /// new actors are spawned. This prevents busy-waiting when the coordinator is idle. + pub async fn next(&mut self) -> anyhow::Result<()> { + let actor_result = self.actor_join_set.join_next().await; + match actor_result { + Some(Ok(shutdown_reason)) => match shutdown_reason { + ActorShutdownReason::Cancelled(account_id) => { + // Do not remove the actor from the registry, as it may be re-spawned. + // The coordinator should always remove actors immediately after cancellation. + tracing::info!("account actor cancelled: {}", account_id); + Ok(()) + }, + ActorShutdownReason::AccountReverted(account_id) => { + tracing::info!("account reverted: {}", account_id); + self.actor_registry.remove(&account_id); + Ok(()) + }, + ActorShutdownReason::EventChannelClosed => { + anyhow::bail!("event channel closed"); + }, + ActorShutdownReason::SemaphoreFailed(err) => Err(err).context("semaphore failed"), + }, + Some(Err(err)) => { + tracing::error!(err = %err, "actor task failed"); + Ok(()) + }, + None => { + // There are no actors to wait for. Wait indefinitely until actors are spawned. + std::future::pending().await + }, + } + } + + /// Sends a mempool event to all network account actors that are found in the corresponding + /// transaction's notes. + /// + /// Caches the mempool event for each network account found in the transaction's notes that does + /// not currently have a corresponding actor. If an actor does not exist for the account, it is + /// assumed that the account has not been created on the chain yet. + /// + /// Cached events will be fed to the corresponding actor when the account creation transaction + /// is processed. + pub async fn send_targeted( + &mut self, + event: &Arc, + ) -> Result<(), SendError>> { + let mut target_actors = HashMap::new(); + if let MempoolEvent::TransactionAdded { id, network_notes, account_delta, .. } = + event.as_ref() + { + // We need to inform the account if it was updated. This lets it know that its own + // transaction has been applied, and in the future also resolves race conditions with + // external network transactions (once these are allowed). + if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { + let account_id = delta.id(); + if account_id.is_network() { + let network_account_id = + account_id.try_into().expect("account is network account"); + if let Some(actor) = self.actor_registry.get(&network_account_id) { + target_actors.insert(network_account_id, actor); + } + } + } + + // Determine target actors for each note. + for note in network_notes { + let NetworkNote::SingleTarget(note) = note; + let network_account_id = note.account_id(); + if let Some(actor) = self.actor_registry.get(&network_account_id) { + // Register actor as target. + target_actors.insert(network_account_id, actor); + } else { + // Cache event for every note that doesn't have a corresponding actor. + self.predating_events + .entry(network_account_id) + .or_default() + .insert(*id, event.clone()); + } + } + } + // Send event to target actors. + for actor in target_actors.values() { + Self::send(actor, event.clone()).await?; + } + Ok(()) + } + + /// Removes any cached events for a given transaction ID from all account caches. + pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { + // Remove the transaction from all account caches. + // This iterates over all predating events which is fine because the count is expected to be + // low. + self.predating_events.retain(|_, account_events| { + account_events.shift_remove(tx_id); + // Remove entries for accounts with no more cached events. + !account_events.is_empty() + }); + } + + /// Helper function to send an event to a single account actor. + async fn send( + handle: &ActorHandle, + event: Arc, + ) -> Result<(), SendError>> { + handle.event_tx.send(event).await + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index a9a28781cf..62088ce6cc 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,10 +1,10 @@ use std::num::NonZeroUsize; +mod actor; mod block_producer; mod builder; -mod state; +mod coordinator; mod store; -mod transaction; pub use builder::NetworkTransactionBuilder; diff --git a/crates/ntx-builder/src/state/mod.rs b/crates/ntx-builder/src/state/mod.rs deleted file mode 100644 index fbdb8ea563..0000000000 --- a/crates/ntx-builder/src/state/mod.rs +++ /dev/null @@ -1,487 +0,0 @@ -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}; -use std::num::NonZeroUsize; - -use account::{AccountState, InflightNetworkNote, NetworkAccountEffect}; -use anyhow::Context; -use miden_node_proto::domain::account::NetworkAccountPrefix; -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::account::Account; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, Nullifier}; -use miden_objects::transaction::{PartialBlockchain, TransactionId}; -use tracing::instrument; - -use crate::COMPONENT; -use crate::store::{StoreClient, StoreError}; - -mod account; - -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - -/// A candidate network transaction. -/// -/// Contains the data pertaining to a specific network account which can be used to build a network -/// transaction. -#[derive(Clone)] -pub struct TransactionCandidate { - /// The current inflight state of the account. - pub account: Account, - - /// A set of notes addressed to this network account. - pub notes: Vec, - - /// The latest locally committed block header. - /// - /// This should be used as the reference block during transaction execution. - pub chain_tip_header: BlockHeader, - - /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, -} - -/// Holds the state of the network transaction builder. -/// -/// It tracks inflight transactions, and their impact on network-related state. -pub struct State { - /// The latest committed block header. - chain_tip_header: BlockHeader, - - /// The chain MMR, which lags behind the tip by one block. - chain_mmr: PartialBlockchain, - - /// Tracks all network accounts with inflight state. - /// - /// This is network account deltas, network notes and their nullifiers. - accounts: HashMap, - - /// A rotating queue of all tracked network accounts. - /// - /// This is used to select the next transaction's account. - /// - /// Note that this _always_ includes _all_ network accounts. Filtering out accounts that aren't - /// viable is handled within the select method itself. - queue: VecDeque, - - /// Network accounts which have been selected but whose transaction has not yet completed. - /// - /// This locks these accounts so they cannot be selected. - in_progress: HashSet, - - /// Uncommitted transactions which have a some impact on the network state. - /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. - inflight_txs: BTreeMap, - - /// A mapping of network note's to their account. - nullifier_idx: BTreeMap, - - /// gRPC client used to retrieve the network account state from the store. - store: StoreClient, -} - -impl State { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 1; - - /// Load's all available network notes from the store, along with the required account states. - #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] - pub async fn load(store: StoreClient) -> Result { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - let chain_mmr = PartialBlockchain::new(chain_mmr, []) - .expect("PartialBlockchain should build from latest partial MMR"); - - let mut state = Self { - chain_tip_header, - chain_mmr, - store, - accounts: HashMap::default(), - queue: VecDeque::default(), - in_progress: HashSet::default(), - inflight_txs: BTreeMap::default(), - nullifier_idx: BTreeMap::default(), - }; - - let notes = state.store.get_unconsumed_network_notes().await?; - for note in notes { - // Currently only support single target network notes in NTB. - if let NetworkNote::SingleTarget(note) = note { - let prefix = note.account_prefix(); - // Ignore notes which don't target an existing account. - if let Some(account) = state.fetch_account(prefix).await? { - account.add_note(note); - } - } - } - state.inject_telemetry(); - - Ok(state) - } - - /// Selects the next candidate network transaction. - /// - /// Note that this marks the candidate account as in-progress and that it cannot be selected - /// again until either: - /// - /// - it has been marked as failed if the transaction failed, or - /// - the transaction was submitted successfully, indicated by the associated mempool event - /// being submitted - #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] - pub fn select_candidate(&mut self, limit: NonZeroUsize) -> Option { - // Loop through the account queue until we find one that is selectable. - // - // Since the queue contains _all_ accounts, including unselectable accounts, we limit our - // search to once through the entire queue. - // - // There are smarter ways of doing this, but this should scale more than well enough for a - // long time. - for _ in 0..self.queue.len() { - // This is a rotating queue. - let candidate = self.queue.pop_front().unwrap(); - self.queue.push_back(candidate); - - // Skip accounts which are already in-progress. - if self.in_progress.contains(&candidate) { - continue; - } - - let account = self.accounts.get_mut(&candidate).expect("queue account must be tracked"); - - // Remove notes that have failed too many times. - account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); - - // Skip empty accounts, and prune them. - // This is how we keep the number of accounts bounded. - if account.is_empty() { - // We don't need to prune the inflight transactions because if the account is empty, - // then it would have no inflight txs. - self.accounts.remove(&candidate); - // We know this account is the backmost one since we just rotated it there. - self.queue.pop_back(); - continue; - } - - // Select notes from the account that can be consumed or are ready for a retry. - let notes = account - .available_notes(&self.chain_tip_header.block_num()) - .take(limit.get()) - .cloned() - .collect::>(); - - // Skip accounts with no available notes. - if notes.is_empty() { - continue; - } - - self.in_progress.insert(candidate); - return TransactionCandidate { - account: account.latest_account(), - notes, - chain_tip_header: self.chain_tip_header.clone(), - chain_mmr: self.chain_mmr.clone(), - } - .into(); - } - self.inject_telemetry(); - - None - } - - /// The latest block number the state knows of. - pub fn chain_tip(&self) -> BlockNumber { - self.chain_tip_header.block_num() - } - - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - fn update_chain_tip(&mut self, tip: BlockHeader) { - // Update MMR which lags by one block. - self.chain_mmr.add_block(self.chain_tip_header.clone(), true); - - // Set the new tip. - self.chain_tip_header = tip; - - // Keep MMR pruned. - let pruned_block_height = - (self.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) as u32; - self.chain_mmr.prune_to(..pruned_block_height.into()); - } - - /// Marks notes of a previously selected candidate as failed. - /// - /// Does not remove the candidate from the in-progress pool. - #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] - pub fn notes_failed( - &mut self, - candidate: NetworkAccountPrefix, - notes: &[Note], - block_num: BlockNumber, - ) { - if let Some(account) = self.accounts.get_mut(&candidate) { - let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - account.fail_notes(nullifiers.as_slice(), block_num); - } else { - tracing::error!(account.prefix=%candidate, "failed network notes have no local account state"); - } - } - - /// Marks a previously selected candidate account as failed, allowing it to be available for - /// selection again. - /// - /// All notes in the candidate will be marked as failed. - #[instrument(target = COMPONENT, name = "ntx.state.candidate_failed", skip_all)] - pub fn candidate_failed(&mut self, candidate: NetworkAccountPrefix) { - self.in_progress.remove(&candidate); - - self.inject_telemetry(); - } - - /// Updates state with the mempool event. - #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] - pub async fn mempool_update(&mut self, update: MempoolEvent) -> anyhow::Result<()> { - let span = tracing::Span::current(); - span.set_attribute("mempool_event.kind", update.kind()); - - match update { - // Note: this event will get triggered by normal user transactions, as well as our - // network transactions. The mempool does not distinguish between the two. - MempoolEvent::TransactionAdded { - id, - nullifiers, - network_notes, - account_delta, - } => { - let network_notes = network_notes - .into_iter() - .filter_map(|note| match note { - NetworkNote::SingleTarget(note) => Some(note), - NetworkNote::MultiTarget(_) => None, - }) - .collect::>(); - self.add_transaction(id, nullifiers, network_notes, account_delta).await?; - }, - MempoolEvent::BlockCommitted { header, txs } => { - anyhow::ensure!( - header.prev_block_commitment() == self.chain_tip_header.commitment(), - "New block's parent commitment {} does not match local chain tip {}", - header.prev_block_commitment(), - self.chain_tip_header.commitment() - ); - self.update_chain_tip(header); - for tx in txs { - self.commit_transaction(tx); - } - }, - MempoolEvent::TransactionsReverted(txs) => { - for tx in txs { - self.revert_transaction(tx); - } - }, - } - self.inject_telemetry(); - - Ok(()) - } - - /// Handles a [`MempoolEvent::TransactionAdded`] event. - /// - /// Note that this will include our own network transactions as well as user submitted - /// transactions. - /// - /// This updates the state of network accounts affected by this transaction. Account state - /// may be loaded from the store if it isn't already known locally. This would be the case if - /// the network account has no inflight state changes. - async fn add_transaction( - &mut self, - id: TransactionId, - nullifiers: Vec, - network_notes: Vec, - account_delta: Option, - ) -> anyhow::Result<()> { - // Skip transactions we already know about. - // - // This can occur since both ntx builder and the mempool might inform us of the same - // transaction. Once when it was submitted to the mempool, and once by the mempool event. - if self.inflight_txs.contains_key(&id) { - return Ok(()); - } - - let mut tx_impact = TransactionImpact::default(); - if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let prefix = update.prefix(); - - match update { - NetworkAccountEffect::Created(account) => { - let account_state = AccountState::from_uncommitted_account(account); - self.accounts.insert(prefix, account_state); - self.queue.push_back(prefix); - }, - NetworkAccountEffect::Updated(account_delta) => { - self.fetch_account(prefix) - .await - .context("failed to load account")? - .context("account with delta not found")? - .add_delta(&account_delta); - }, - } - - // If this account was in-progress, then it should no longer be as this update is the - // result of our own network transaction. - self.in_progress.remove(&prefix); - tx_impact.account_delta = Some(prefix); - } - for note in network_notes { - let prefix = note.account_prefix(); - tx_impact.notes.insert(note.nullifier()); - - // Skip and ignore nullifier if note targets a non-existent network account - let Some(account) = self.fetch_account(prefix).await? else { - tracing::warn!("could not fetch account from network: {:?}", prefix); - continue; - }; - - account.add_note(note.clone()); - self.nullifier_idx.insert(note.nullifier(), prefix); - } - for nullifier in nullifiers { - // Ignore nullifiers that aren't network note nullifiers. - let Some(account) = self.nullifier_idx.get(&nullifier) else { - continue; - }; - tx_impact.nullifiers.insert(nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _res = self - .accounts - .get_mut(account) - .expect("nullifier account must exist") - .add_nullifier(nullifier); - } - - if !tx_impact.is_empty() { - self.inflight_txs.insert(id, tx_impact); - } - - Ok(()) - } - - /// Handles [`MempoolEvent::BlockCommitted`] events. - fn commit_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(prefix) = impact.account_delta { - self.accounts.get_mut(&prefix).unwrap().commit_delta(); - } - - for nullifier in impact.nullifiers { - let prefix = self.nullifier_idx.remove(&nullifier).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(&prefix) { - account.commit_nullifier(nullifier); - } - } - } - - /// Handles [`MempoolEvent::TransactionsReverted`] events. - fn revert_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(prefix) = impact.account_delta { - // We need to remove the account if this transaction created the account. - if self.accounts.get_mut(&prefix).unwrap().revert_delta() { - self.accounts.remove(&prefix); - } - } - - for note in impact.notes { - let prefix = self.nullifier_idx.remove(¬e).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(&prefix) { - account.revert_note(note); - } - } - - for nullifier in impact.nullifiers { - let prefix = self.nullifier_idx.get(&nullifier).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(prefix) { - account.revert_nullifier(nullifier); - } - } - } - - /// Returns the current inflight account, loading it from the store if it isn't present locally. - /// - /// Returns `None` if the account is unknown. - async fn fetch_account( - &mut self, - prefix: NetworkAccountPrefix, - ) -> Result, StoreError> { - match self.accounts.entry(prefix) { - Entry::Occupied(occupied_entry) => Ok(Some(occupied_entry.into_mut())), - Entry::Vacant(vacant_entry) => { - let Some(account) = self.store.get_network_account(prefix).await? else { - return Ok(None); - }; - - self.queue.push_back(prefix); - let entry = vacant_entry.insert(AccountState::from_committed_account(account)); - - Ok(Some(entry)) - }, - } - } - - /// Adds stats to the current tracing span. - /// - /// Note that these are only visible in the OpenTelemetry context, as conventional tracing - /// does not track fields added dynamically. - fn inject_telemetry(&self) { - let span = tracing::Span::current(); - - span.set_attribute("ntx.state.accounts.total", self.accounts.len()); - span.set_attribute("ntx.state.accounts.in_progress", self.in_progress.len()); - span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); - span.set_attribute("ntx.state.notes.total", self.nullifier_idx.len()); - } -} - -/// The impact a transaction has on the state. -#[derive(Default)] -struct TransactionImpact { - /// The network account this transaction added an account delta to. - account_delta: Option, - - /// Network notes this transaction created. - notes: BTreeSet, - - /// Network notes this transaction consumed. - nullifiers: BTreeSet, -} - -impl TransactionImpact { - fn is_empty(&self) -> bool { - self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() - } -} diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 9222752769..ac94f20b72 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -1,17 +1,32 @@ +use std::collections::BTreeSet; +use std::ops::RangeInclusive; use std::time::Duration; -use miden_node_proto::clients::{Builder, StoreNtxBuilder, StoreNtxBuilderClient}; -use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; +use miden_node_proto::domain::account::{AccountDetails, AccountResponse, NetworkAccountId}; use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; +use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; -use miden_objects::Word; -use miden_objects::account::Account; -use miden_objects::block::BlockHeader; -use miden_objects::crypto::merkle::{Forest, MmrPeaks, PartialMmr}; -use miden_objects::note::NoteScript; -use miden_tx::utils::Deserializable; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::Word; +use miden_protocol::account::{ + Account, + AccountCode, + AccountId, + PartialAccount, + PartialStorage, + StorageMapWitness, + StorageSlotName, +}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness, PartialVault}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks, PartialMmr}; +use miden_protocol::crypto::merkle::smt::SmtProof; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::AccountInputs; +use miden_tx::utils::{Deserializable, Serializable}; use thiserror::Error; use tracing::{info, instrument}; use url::Url; @@ -39,7 +54,8 @@ impl StoreClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { inner: store } } @@ -55,7 +71,7 @@ impl StoreClient { Err(StoreError::GrpcClientError(err)) => { // Exponential backoff with base 500ms and max 30s. let backoff = Duration::from_millis(500) - .saturating_mul(1 << retry_counter) + .saturating_mul(1 << retry_counter.min(6)) .min(Duration::from_secs(30)); tracing::warn!( @@ -104,43 +120,12 @@ impl StoreClient { } } - /// Returns the list of unconsumed network notes. - #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] - pub async fn get_unconsumed_network_notes(&self) -> Result, StoreError> { - let mut all_notes = Vec::new(); - let mut page_token: Option = None; - - loop { - let req = proto::ntx_builder_store::UnconsumedNetworkNotesRequest { - page_token, - page_size: 128, - }; - let resp = self.inner.clone().get_unconsumed_network_notes(req).await?.into_inner(); - - let page: Vec = resp - .notes - .into_iter() - .map(NetworkNote::try_from) - .collect::, _>>()?; - - all_notes.extend(page); - - match resp.next_token { - Some(tok) => page_token = Some(tok), - None => break, - } - } - - Ok(all_notes) - } - #[instrument(target = COMPONENT, name = "store.client.get_network_account", skip_all, err)] pub async fn get_network_account( &self, - prefix: NetworkAccountPrefix, + account_id: NetworkAccountId, ) -> Result, StoreError> { - let request = - proto::ntx_builder_store::AccountIdPrefix { account_id_prefix: prefix.inner() }; + let request = proto::store::AccountIdPrefix { account_id_prefix: account_id.prefix() }; let store_response = self .inner @@ -164,6 +149,217 @@ impl StoreClient { Ok(account) } + /// Get the inputs for an account at a given block number from the store. + /// + /// Retrieves account details from the store. The retrieved details are limited to the account + /// code, account header, and storage header. The vault and storage slots are not required for + /// the purposes of the NTX Builder. + #[instrument(target = COMPONENT, name = "store.client.get_account_inputs", skip_all, err)] + pub async fn get_account_inputs( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result { + // Construct proto request. + let proto_request = proto::rpc::AccountRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + block_num: Some(block_num.into()), + // Request account code, account header, and storage header in order to build minimal + // partial account. + details: Some(proto::rpc::account_request::AccountDetailRequest { + code_commitment: Some(Word::default().into()), + asset_vault_commitment: None, + storage_maps: vec![], + }), + }; + + // Make the gRPC call. + let proto_response = self.inner.clone().get_account(proto_request).await?.into_inner(); + + // Convert proto response to domain type. + let account_response = + AccountResponse::try_from(proto_response).map_err(StoreError::DeserializationError)?; + + // Build partial account. + let account_details = account_response + .details + .ok_or(StoreError::MissingDetails("account details".into()))?; + let partial_account = build_minimal_foreign_account(&account_details)?; + + Ok(AccountInputs::new(partial_account, account_response.witness)) + } + + /// Returns the list of unconsumed network notes for a specific network account up to a + /// specified block. + #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] + pub async fn get_unconsumed_network_notes( + &self, + network_account_id: NetworkAccountId, + block_num: u32, + ) -> Result, StoreError> { + // Upper bound of each note is ~10KB. Limit page size to ~10MB. + const PAGE_SIZE: u64 = 1024; + + let mut all_notes = Vec::new(); + let mut page_token: Option = None; + + let mut store_client = self.inner.clone(); + loop { + let req = proto::store::UnconsumedNetworkNotesRequest { + page_token, + page_size: PAGE_SIZE, + account_id: Some(network_account_id.inner().into()), + block_num, + }; + let resp = store_client.get_unconsumed_network_notes(req).await?.into_inner(); + + all_notes.reserve(resp.notes.len()); + for note in resp.notes { + all_notes.push(NetworkNote::try_from(note)?); + } + + match resp.next_token { + Some(token) => page_token = Some(token), + None => break, + } + } + + Ok(all_notes) + } + + /// Streams network account IDs to the provided sender. + /// + /// This method is designed to be run in a background task, sending accounts to the main event + /// loop as they are loaded. This allows the ntx-builder to start processing mempool events + /// without waiting for all accounts to be preloaded. + pub async fn stream_network_account_ids( + &self, + sender: tokio::sync::mpsc::Sender, + ) -> Result<(), StoreError> { + let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + + while let Some(next_start) = self.load_accounts_page(block_range, &sender).await? { + block_range = next_start..=BlockNumber::from(u32::MAX); + } + + Ok(()) + } + + /// Loads a single page of network accounts and submits them to the sender. + /// + /// Returns the next block number to fetch from, or `None` if the chain tip has been reached. + #[instrument(target = COMPONENT, name = "store.client.load_accounts_page", skip_all, err)] + async fn load_accounts_page( + &self, + block_range: RangeInclusive, + sender: &tokio::sync::mpsc::Sender, + ) -> Result, StoreError> { + let (accounts, pagination_info) = self.fetch_network_account_ids_page(block_range).await?; + + let chain_tip = pagination_info.chain_tip; + let current_height = pagination_info.block_num; + + self.send_accounts_to_channel(accounts, sender).await?; + + if current_height >= chain_tip { + Ok(None) + } else { + Ok(Some(BlockNumber::from(current_height))) + } + } + + #[instrument(target = COMPONENT, name = "store.client.fetch_network_account_ids_page", skip_all, err)] + async fn fetch_network_account_ids_page( + &self, + block_range: std::ops::RangeInclusive, + ) -> Result<(Vec, proto::rpc::PaginationInfo), StoreError> { + self.fetch_network_account_ids_page_inner(block_range) + .await + .inspect_err(|err| tracing::Span::current().set_error(err)) + } + + async fn fetch_network_account_ids_page_inner( + &self, + block_range: std::ops::RangeInclusive, + ) -> Result<(Vec, proto::rpc::PaginationInfo), StoreError> { + let mut retry_counter = 0u32; + + let response = loop { + match self + .inner + .clone() + .get_network_account_ids(Into::::into(block_range.clone())) + .await + { + Ok(response) => break response.into_inner(), + Err(err) => { + // Exponential backoff with base 500ms and max 30s. + let backoff = Duration::from_millis(500) + .saturating_mul(1 << retry_counter.min(6)) + .min(Duration::from_secs(30)); + + tracing::warn!( + ?backoff, + %retry_counter, + %err, + "store connection failed while fetching committed accounts page, retrying" + ); + + retry_counter += 1; + tokio::time::sleep(backoff).await; + }, + } + }; + + let accounts = response + .account_ids + .into_iter() + .map(|account_id| { + let account_id = AccountId::read_from_bytes(&account_id.id).map_err(|err| { + StoreError::DeserializationError(ConversionError::deserialization_error( + "account_id", + err, + )) + })?; + NetworkAccountId::try_from(account_id).map_err(|_| { + StoreError::MalformedResponse( + "account id is not a valid network account".into(), + ) + }) + }) + .collect::, StoreError>>()?; + + let pagination_info = response.pagination_info.ok_or( + ConversionError::MissingFieldInProtobufRepresentation { + entity: "NetworkAccountIdList", + field_name: "pagination_info", + }, + )?; + + Ok((accounts, pagination_info)) + } + + #[instrument( + target = COMPONENT, + name = "store.client.send_accounts_to_channel", + skip_all + )] + async fn send_accounts_to_channel( + &self, + accounts: Vec, + sender: &tokio::sync::mpsc::Sender, + ) -> Result<(), StoreError> { + for account in accounts { + // If the receiver is dropped, stop loading. + if sender.send(account).await.is_err() { + tracing::warn!("Account receiver dropped"); + return Ok(()); + } + } + + Ok(()) + } + #[instrument(target = COMPONENT, name = "store.client.get_note_script_by_root", skip_all, err)] pub async fn get_note_script_by_root( &self, @@ -188,9 +384,88 @@ impl StoreClient { Ok(None) } } + + #[instrument(target = COMPONENT, name = "store.client.get_vault_asset_witnesses", skip_all, err)] + pub async fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + vault_keys: BTreeSet, + block_num: Option, + ) -> Result, StoreError> { + // Construct proto request. + let request = proto::store::VaultAssetWitnessesRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + vault_keys: vault_keys + .into_iter() + .map(|key| { + let word: Word = key.into(); + word.into() + }) + .collect(), + block_num: block_num.map(|num| num.as_u32()), + }; + + // Make the gRPC request. + let witness_proto = + self.inner.clone().get_vault_asset_witnesses(request).await?.into_inner(); + + // Convert the response to domain type. + let mut asset_witnesses = Vec::new(); + for asset_witness in witness_proto.asset_witnesses { + let smt_opening = asset_witness.proof.ok_or_else(|| { + StoreError::MalformedResponse("missing proof in vault asset witness".to_string()) + })?; + let proof: SmtProof = + smt_opening.try_into().map_err(StoreError::DeserializationError)?; + let witness = AssetWitness::new(proof) + .map_err(|err| StoreError::DeserializationError(ConversionError::from(err)))?; + + asset_witnesses.push(witness); + } + + Ok(asset_witnesses) + } + + #[instrument(target = COMPONENT, name = "store.client.get_storage_map_witness", skip_all, err)] + pub async fn get_storage_map_witness( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + map_key: Word, + block_num: Option, + ) -> Result { + // Construct proto request. + let request = proto::store::StorageMapWitnessRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + map_key: Some(map_key.into()), + slot_name: slot_name.to_string(), + block_num: block_num.map(|num| num.as_u32()), + }; + + // Make the request to the store. + let witness_proto = self.inner.clone().get_storage_map_witness(request).await?.into_inner(); + + // Convert the response to domain type. + let witness_proto = witness_proto.witness.ok_or_else(|| { + StoreError::MalformedResponse("missing storage map witness in response".to_string()) + })?; + + let smt_opening = witness_proto.proof.ok_or_else(|| { + StoreError::MalformedResponse("missing proof in storage map witness".to_string()) + })?; + + let proof: SmtProof = smt_opening.try_into().map_err(StoreError::DeserializationError)?; + + // Create the storage map witness using the proof and raw map key. + let witness = StorageMapWitness::new(proof, [map_key]).map_err(|_err| { + StoreError::MalformedResponse("failed to create storage map witness".to_string()) + })?; + + Ok(witness) + } } -// Store errors +// STORE ERROR // ================================================================================================= #[derive(Debug, Error)] @@ -201,4 +476,41 @@ pub enum StoreError { MalformedResponse(String), #[error("failed to parse response")] DeserializationError(#[from] ConversionError), + #[error("missing details: {0}")] + MissingDetails(String), +} + +// HELPERS +// ================================================================================================= + +/// Builds a minimal partial account from the provided account details. +/// +/// The partial account is built without storage maps or an asset vault. This is intended to be used +/// to retrieve foreign account data during transaction execution. +pub fn build_minimal_foreign_account( + account_details: &AccountDetails, +) -> Result { + // Derive account code. + let account_code_bytes = account_details + .account_code + .as_ref() + .ok_or(ConversionError::AccountCodeMissing)?; + let account_code = AccountCode::from_bytes(account_code_bytes)?; + + // Derive partial storage. Storage maps are not required for foreign accounts. + let partial_storage = PartialStorage::new(account_details.storage_details.header.clone(), [])?; + + // Derive partial vault from vault root only. + let partial_vault = PartialVault::new(account_details.account_header.vault_root()); + + // Construct partial account. + let partial_account = PartialAccount::new( + account_details.account_header.id(), + account_details.account_header.nonce(), + account_code, + partial_storage, + partial_vault, + None, + )?; + Ok(partial_account) } diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/transaction.rs deleted file mode 100644 index 7c7cfce3e4..0000000000 --- a/crates/ntx-builder/src/transaction.rs +++ /dev/null @@ -1,444 +0,0 @@ -use std::collections::BTreeSet; -use std::sync::Arc; - -use lru::LruCache; -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::account::{Account, AccountId, PartialAccount, StorageMapWitness, StorageSlot}; -use miden_objects::asset::{AssetVaultKey, AssetWitness}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, NoteScript}; -use miden_objects::transaction::{ - AccountInputs, - ExecutedTransaction, - InputNote, - InputNotes, - PartialBlockchain, - ProvenTransaction, - TransactionArgs, - TransactionInputs, -}; -use miden_objects::vm::FutureMaybeSend; -use miden_objects::{TransactionInputError, Word}; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; -use miden_tx::auth::UnreachableAuth; -use miden_tx::{ - DataStore, - DataStoreError, - FailedNote, - LocalTransactionProver, - MastForestStore, - NoteCheckerError, - NoteConsumptionChecker, - NoteConsumptionInfo, - TransactionExecutor, - TransactionExecutorError, - TransactionMastStore, - TransactionProverError, -}; -use tokio::sync::Mutex; -use tokio::task::JoinError; -use tracing::{Instrument, instrument}; - -use crate::COMPONENT; -use crate::block_producer::BlockProducerClient; -use crate::state::TransactionCandidate; -use crate::store::StoreClient; - -#[derive(Debug, thiserror::Error)] -pub enum NtxError { - #[error("note inputs were invalid")] - InputNotes(#[source] TransactionInputError), - #[error("failed to filter notes")] - NoteFilter(#[source] NoteCheckerError), - #[error("all notes failed to be executed")] - AllNotesFailed(Vec), - #[error("failed to execute transaction")] - Execution(#[source] TransactionExecutorError), - #[error("failed to prove transaction")] - Proving(#[source] TransactionProverError), - #[error("failed to submit transaction")] - Submission(#[source] tonic::Status), - #[error("the ntx task panicked")] - Panic(#[source] JoinError), -} - -type NtxResult = Result; - -// Context and execution of network transactions -// ================================================================================================ - -/// Provides the context for execution [network transaction candidates](TransactionCandidate). -#[derive(Clone)] -pub struct NtxContext { - pub block_producer: BlockProducerClient, - - /// The prover to delegate proofs to. - /// - /// Defaults to local proving if unset. This should be avoided in production as this is - /// computationally intensive. - pub prover: Option, - - /// The store client for retrieving note scripts. - pub store: StoreClient, -} - -impl NtxContext { - /// Executes a transaction end-to-end: filtering, executing, proving, and submitted to the block - /// producer. - /// - /// The provided [`TransactionCandidate`] is processed in the following stages: - /// 1. Note filtering – all input notes are checked for consumability. Any notes that cannot be - /// executed are returned as [`FailedNote`]s. - /// 2. Execution – the remaining notes are executed against the account state. - /// 3. Proving – a proof is generated for the executed transaction. - /// 4. Submission – the proven transaction is submitted to the block producer. - /// - /// # Returns - /// - /// On success, returns the list of [`FailedNote`]s representing notes that were - /// filtered out before execution. - /// - /// # Errors - /// - /// Returns an [`NtxError`] if any step of the pipeline fails, including: - /// - Note filtering (e.g., all notes fail consumability checks). - /// - Transaction execution. - /// - Proof generation. - /// - Submission to the network. - #[instrument(target = COMPONENT, name = "ntx.execute_transaction", skip_all)] - pub fn execute_transaction( - self, - tx: TransactionCandidate, - ) -> impl FutureMaybeSend, (Vec, NtxError)>> { - let TransactionCandidate { - account, - notes, - chain_tip_header, - chain_mmr, - } = tx; - - let notes = notes.into_iter().map(Note::from).collect::>(); - let notes_copy = notes.clone(); - - async move { - async move { - let span = tracing::Span::current(); - span.set_attribute("account.id", account.id()); - span.set_attribute( - "account.id.network_prefix", - account.id().prefix().to_string().as_str(), - ); - span.set_attribute("notes.count", notes.len()); - span.set_attribute("reference_block.number", chain_tip_header.block_num()); - - let data_store = - NtxDataStore::new(account, chain_tip_header, chain_mmr, self.store.clone()); - - let (successful, failed) = self.filter_notes(&data_store, notes).await?; - let executed = Box::pin(self.execute(&data_store, successful)).await?; - let proven = Box::pin(self.prove(executed.into())).await?; - self.submit(proven).await?; - Ok(failed) - } - .in_current_span() - .await - .inspect_err(|err| tracing::Span::current().set_error(err)) - .map_err(|err| (notes_copy, err)) - } - } - - /// Filters a collection of notes, returning only those that can be successfully executed - /// against the given network account. - /// - /// This function performs a consumability check on each provided note and partitions them into - /// two sets: - /// - Successful notes: notes that can be executed and are returned wrapped in [`InputNotes`]. - /// - Failed notes: notes that cannot be executed. - /// - /// # Guarantees - /// - /// - On success, the returned [`InputNotes`] set is guaranteed to be non-empty. - /// - The original ordering of notes is not preserved if any notes have failed. - /// - /// # Errors - /// - /// Returns an [`NtxError`] if: - /// - The consumability check fails unexpectedly. - /// - All notes fail the check (i.e., no note is consumable). - #[instrument(target = COMPONENT, name = "ntx.execute_transaction.filter_notes", skip_all, err)] - async fn filter_notes( - &self, - data_store: &NtxDataStore, - notes: Vec, - ) -> NtxResult<(InputNotes, Vec)> { - let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = - TransactionExecutor::new(data_store); - let checker = NoteConsumptionChecker::new(&executor); - - match Box::pin(checker.check_notes_consumability( - data_store.account.id(), - data_store.reference_header.block_num(), - notes, - TransactionArgs::default(), - )) - .await - { - Ok(NoteConsumptionInfo { successful, failed, .. }) => { - // Map successful notes to input notes. - let successful = InputNotes::from_unauthenticated_notes(successful) - .map_err(NtxError::InputNotes)?; - - // If none are successful, abort. - if successful.is_empty() { - return Err(NtxError::AllNotesFailed(failed)); - } - - Ok((successful, failed)) - }, - Err(err) => return Err(NtxError::NoteFilter(err)), - } - } - - /// Creates an executes a transaction with the network account and the given set of notes. - #[instrument(target = COMPONENT, name = "ntx.execute_transaction.execute", skip_all, err)] - async fn execute( - &self, - data_store: &NtxDataStore, - notes: InputNotes, - ) -> NtxResult { - let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = - TransactionExecutor::new(data_store); - - Box::pin(executor.execute_transaction( - data_store.account.id(), - data_store.reference_header.block_num(), - notes, - TransactionArgs::default(), - )) - .await - .map_err(NtxError::Execution) - } - - /// Delegates the transaction proof to the remote prover if configured, otherwise performs the - /// proof locally. - #[instrument(target = COMPONENT, name = "ntx.execute_transaction.prove", skip_all, err)] - async fn prove(&self, tx_inputs: TransactionInputs) -> NtxResult { - if let Some(remote) = &self.prover { - remote.prove(tx_inputs).await - } else { - tokio::task::spawn_blocking(move || LocalTransactionProver::default().prove(tx_inputs)) - .await - .map_err(NtxError::Panic)? - } - .map_err(NtxError::Proving) - } - - /// Submits the transaction to the block producer. - #[instrument(target = COMPONENT, name = "ntx.execute_transaction.submit", skip_all, err)] - async fn submit(&self, tx: ProvenTransaction) -> NtxResult<()> { - self.block_producer - .submit_proven_transaction(tx) - .await - .map_err(NtxError::Submission) - } -} - -// Data store implementation for the transaction execution -// ================================================================================================ - -/// A [`DataStore`] implementation which provides transaction inputs for a single account and -/// reference block with LRU caching for note scripts. -/// -/// This implementation includes an LRU (Least Recently Used) cache for note scripts to improve -/// performance by avoiding repeated RPC calls for the same script roots. The cache automatically -/// manages memory usage by evicting least recently used entries when the cache reaches capacity. -/// -/// This is sufficient for executing a network transaction. -struct NtxDataStore { - account: Account, - reference_header: BlockHeader, - chain_mmr: PartialBlockchain, - mast_store: TransactionMastStore, - /// Store client for retrieving note scripts. - store: StoreClient, - /// LRU cache for storing retrieved note scripts to avoid repeated store calls. - script_cache: Arc>>, -} - -impl NtxDataStore { - /// Default cache size for note scripts. - /// - /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage - /// depends on the complexity of the scripts being cached. - const DEFAULT_SCRIPT_CACHE_SIZE: usize = 1000; - - /// Creates a new `NtxDataStore` with default cache size. - fn new( - account: Account, - reference_header: BlockHeader, - chain_mmr: PartialBlockchain, - store: StoreClient, - ) -> Self { - let mast_store = TransactionMastStore::new(); - mast_store.load_account_code(account.code()); - - Self { - account, - reference_header, - chain_mmr, - mast_store, - store, - script_cache: Arc::new(Mutex::new(LruCache::new( - std::num::NonZeroUsize::new(Self::DEFAULT_SCRIPT_CACHE_SIZE) - .expect("default script cache size is non-zero"), - ))), - } - } -} - -impl DataStore for NtxDataStore { - fn get_transaction_inputs( - &self, - account_id: AccountId, - ref_blocks: BTreeSet, - ) -> impl FutureMaybeSend> - { - async move { - if self.account.id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - match ref_blocks.last().copied() { - Some(reference) if reference == self.reference_header.block_num() => {}, - - Some(other) => return Err(DataStoreError::BlockNotFound(other)), - None => return Err(DataStoreError::other("no reference block requested")), - } - - let partial_account = PartialAccount::from(&self.account); - - Ok((partial_account, self.reference_header.clone(), self.chain_mmr.clone())) - } - } - - fn get_foreign_account_inputs( - &self, - foreign_account_id: AccountId, - _ref_block: BlockNumber, - ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } - } - - fn get_vault_asset_witness( - &self, - account_id: AccountId, - vault_root: Word, - vault_key: AssetVaultKey, - ) -> impl FutureMaybeSend> { - async move { - if self.account.id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - if self.account.vault().root() != vault_root { - return Err(DataStoreError::Other { - error_msg: "vault root mismatch".into(), - source: None, - }); - } - - AssetWitness::new(self.account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) - } - } - - fn get_storage_map_witness( - &self, - account_id: AccountId, - map_root: Word, - map_key: Word, - ) -> impl FutureMaybeSend> { - async move { - if self.account.id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - let mut map_witness = None; - for slot in self.account.storage().slots() { - if let StorageSlot::Map(map) = slot { - if map.root() == map_root { - map_witness = Some(map.open(&map_key)); - } - } - } - - if let Some(map_witness) = map_witness { - Ok(map_witness) - } else { - Err(DataStoreError::Other { - error_msg: "account storage does not contain the expected root".into(), - source: None, - }) - } - } - } - - /// Retrieves a note script by its root hash. - /// - /// This implementation uses the configured RPC client to call the `GetNoteScriptByRoot` - /// endpoint on the RPC server. - fn get_note_script( - &self, - script_root: Word, - ) -> impl FutureMaybeSend> { - let store = self.store.clone(); - let cache = self.script_cache.clone(); - - async move { - // Attempt to retrieve the script from the cache. - if let Some(cached_script) = { - let mut cache_guard = cache.lock().await; - cache_guard.get(&script_root).cloned() - } { - return Ok(cached_script); - } - - // Retrieve the script from the store. - let maybe_script = store.get_note_script_by_root(script_root).await.map_err(|err| { - DataStoreError::Other { - error_msg: "failed to retrieve note script from store".to_string().into(), - source: Some(err.into()), - } - })?; - // Handle response. - match maybe_script { - Some(script) => { - // Cache the retrieved script. - { - let mut cache_guard = cache.lock().await; - cache_guard.put(script_root, script.clone()); - } - // Return script. - Ok(script) - }, - None => { - // Response did not contain the note script. - Err(DataStoreError::NoteScriptNotFound(script_root)) - }, - } - } - } -} - -impl MastForestStore for NtxDataStore { - fn get( - &self, - procedure_hash: &miden_objects::Word, - ) -> Option> { - self.mast_store.get(procedure_hash) - } -} diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 0b09430305..6d3589ca3d 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -20,15 +20,17 @@ hex = { version = "0.4" } http = { workspace = true } miden-node-grpc-error-macro = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { workspace = true } +miden-protocol = { workspace = true } +miden-standards = { workspace = true } prost = { workspace = true } thiserror = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } tonic-prost = { workspace = true } url = { workspace = true } [dev-dependencies] -proptest = { version = "1.7" } +assert_matches = { workspace = true } +proptest = { version = "1.7" } [build-dependencies] fs-err = { workspace = true } diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 6d71e84004..b0ac773a72 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -9,7 +9,6 @@ use miden_node_proto_build::{ store_block_producer_api_descriptor, store_ntx_builder_api_descriptor, store_rpc_api_descriptor, - store_shared_api_descriptor, validator_api_descriptor, }; use miette::{Context, IntoDiagnostic}; @@ -44,7 +43,6 @@ fn main() -> miette::Result<()> { generate_bindings(store_rpc_api_descriptor(), &dst_dir)?; generate_bindings(store_ntx_builder_api_descriptor(), &dst_dir)?; generate_bindings(store_block_producer_api_descriptor(), &dst_dir)?; - generate_bindings(store_shared_api_descriptor(), &dst_dir)?; generate_bindings(block_producer_api_descriptor(), &dst_dir)?; generate_bindings(remote_prover_api_descriptor(), &dst_dir)?; generate_bindings(validator_api_descriptor(), &dst_dir)?; diff --git a/crates/proto/src/clients/mod.rs b/crates/proto/src/clients/mod.rs index 4fb6b622c0..3599b472c4 100644 --- a/crates/proto/src/clients/mod.rs +++ b/crates/proto/src/clients/mod.rs @@ -5,31 +5,34 @@ //! //! # Examples //! -//! ```rust,no_run -//! use miden_node_proto::clients::{Builder, WantsTls, StoreNtxBuilderClient, StoreNtxBuilder}; +//! ```rust +//! # use miden_node_proto::clients::{Builder, WantsTls, StoreNtxBuilderClient}; +//! # use url::Url; //! //! # async fn example() -> anyhow::Result<()> { //! // Create a store client with OTEL and TLS -//! let client: StoreNtxBuilderClient = Builder::new("https://store.example.com")? -//! .with_tls()? // or `.without_tls()` -//! .without_timeout() // or `.with_timeout(Duration::from_secs(10))` -//! .without_metadata_version() // or `.with_metadata_version("1.0".into())` -//! .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` -//! .connect::() +//! let url = Url::parse("https://example.com:8080")?; +//! let client: StoreNtxBuilderClient = Builder::new(url) +//! .with_tls()? // or `.without_tls()` +//! .without_timeout() // or `.with_timeout(Duration::from_secs(10))` +//! .without_metadata_version() // or `.with_metadata_version("1.0".into())` +//! .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` +//! .with_otel_context_injection() // or `.without_otel_context_injection()` +//! .connect::() //! .await?; //! # Ok(()) //! # } //! ``` -use std::collections::HashMap; -use std::fmt::Write; use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::time::Duration; use anyhow::{Context, Result}; +use http::header::ACCEPT; use miden_node_utils::tracing::grpc::OtelInterceptor; use tonic::metadata::AsciiMetadataValue; -use tonic::service::Interceptor; use tonic::service::interceptor::InterceptedService; use tonic::transport::{Channel, ClientTlsConfig, Endpoint}; use tonic::{Request, Status}; @@ -37,206 +40,278 @@ use url::Url; use crate::generated; -// METADATA INTERCEPTOR -// ================================================================================================ +#[derive(Clone)] +pub struct Interceptor { + otel: Option, + accept: AsciiMetadataValue, +} -/// Interceptor designed to inject required metadata into all RPC requests. -#[derive(Default, Clone)] -pub struct MetadataInterceptor { - metadata: HashMap<&'static str, AsciiMetadataValue>, +impl Default for Interceptor { + fn default() -> Self { + Self { + otel: None, + accept: AsciiMetadataValue::from_static(Self::MEDIA_TYPE), + } + } } -impl MetadataInterceptor { - /// Adds or overwrites HTTP ACCEPT metadata to the interceptor. - /// - /// Provided version string must be ASCII. - pub fn with_accept_metadata( - mut self, - version: &str, - genesis: Option<&str>, - ) -> Result { - let mut accept_value = format!("application/vnd.miden; version={version}"); - if let Some(genesis) = genesis { - write!(accept_value, "; genesis={genesis}")?; +impl Interceptor { + const MEDIA_TYPE: &str = "application/vnd.miden"; + const VERSION: &str = "version"; + const GENESIS: &str = "genesis"; + + fn new(enable_otel: bool, version: Option<&str>, genesis: Option<&str>) -> Self { + if let Some(version) = version + && !version.is_ascii() + { + panic!("version contains non-ascii values: {version}"); + } + + if let Some(genesis) = genesis + && !genesis.is_ascii() + { + panic!("genesis contains non-ascii values: {genesis}"); + } + + let accept = match (version, genesis) { + (None, None) => Self::MEDIA_TYPE.to_string(), + (None, Some(genesis)) => format!("{}; {}={genesis}", Self::MEDIA_TYPE, Self::GENESIS), + (Some(version), None) => format!("{}; {}={version}", Self::MEDIA_TYPE, Self::VERSION), + (Some(version), Some(genesis)) => format!( + "{}; {}={version}, {}={genesis}", + Self::MEDIA_TYPE, + Self::VERSION, + Self::GENESIS + ), + }; + Self { + otel: enable_otel.then_some(OtelInterceptor), + // SAFETY: we checked that all values are ascii at the top of the function. + accept: AsciiMetadataValue::from_str(&accept).unwrap(), } - self.metadata.insert("accept", AsciiMetadataValue::try_from(accept_value)?); - Ok(self) } } -// COMBINED INTERCEPTOR (OTEL + METADATA) + +impl tonic::service::Interceptor for Interceptor { + fn call(&mut self, mut request: tonic::Request<()>) -> Result, Status> { + if let Some(mut otel) = self.otel { + request = otel.call(request)?; + } + + request.metadata_mut().insert(ACCEPT.as_str(), self.accept.clone()); + + Ok(request) + } +} + +// TYPE ALIASES TO AID LEGIBILITY // ================================================================================================ -#[derive(Clone)] -pub struct OtelAndMetadataInterceptor { - otel: OtelInterceptor, - metadata: MetadataInterceptor, +type InterceptedChannel = InterceptedService; +type GeneratedRpcClient = generated::rpc::api_client::ApiClient; +type GeneratedBlockProducerClient = + generated::block_producer::api_client::ApiClient; +type GeneratedStoreClientForNtxBuilder = + generated::store::ntx_builder_client::NtxBuilderClient; +type GeneratedStoreClientForBlockProducer = + generated::store::block_producer_client::BlockProducerClient; +type GeneratedStoreClientForRpc = generated::store::rpc_client::RpcClient; +type GeneratedProxyStatusClient = + generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient; +type GeneratedProverClient = generated::remote_prover::api_client::ApiClient; +type GeneratedValidatorClient = generated::validator::api_client::ApiClient; + +// gRPC CLIENTS +// ================================================================================================ + +#[derive(Debug, Clone)] +pub struct RpcClient(GeneratedRpcClient); +#[derive(Debug, Clone)] +pub struct BlockProducerClient(GeneratedBlockProducerClient); +#[derive(Debug, Clone)] +pub struct StoreNtxBuilderClient(GeneratedStoreClientForNtxBuilder); +#[derive(Debug, Clone)] +pub struct StoreBlockProducerClient(GeneratedStoreClientForBlockProducer); +#[derive(Debug, Clone)] +pub struct StoreRpcClient(GeneratedStoreClientForRpc); +#[derive(Debug, Clone)] +pub struct RemoteProverProxyStatusClient(GeneratedProxyStatusClient); +#[derive(Debug, Clone)] +pub struct RemoteProverClient(GeneratedProverClient); +#[derive(Debug, Clone)] +pub struct ValidatorClient(GeneratedValidatorClient); + +impl DerefMut for RpcClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } -impl OtelAndMetadataInterceptor { - pub fn new(otel: OtelInterceptor, metadata: MetadataInterceptor) -> Self { - Self { otel, metadata } +impl Deref for RpcClient { + type Target = GeneratedRpcClient; + + fn deref(&self) -> &Self::Target { + &self.0 } } -impl Interceptor for OtelAndMetadataInterceptor { - fn call(&mut self, request: Request<()>) -> Result, Status> { - // Apply OTEL first so tracing context propagates, then attach metadata headers - let req = self.otel.call(request)?; - self.metadata.call(req) +impl DerefMut for BlockProducerClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl Interceptor for MetadataInterceptor { - fn call(&mut self, request: Request<()>) -> Result, Status> { - let mut request = request; - for (key, value) in &self.metadata { - request.metadata_mut().insert(*key, value.clone()); - } - Ok(request) +impl Deref for BlockProducerClient { + type Target = GeneratedBlockProducerClient; + + fn deref(&self) -> &Self::Target { + &self.0 } } -// TYPE ALIASES FOR INSTRUMENTED CLIENTS -// ================================================================================================ +impl DerefMut for StoreNtxBuilderClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -pub type RpcClient = - generated::rpc::api_client::ApiClient>; -pub type BlockProducerClient = - generated::block_producer::api_client::ApiClient>; -pub type StoreNtxBuilderClient = generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient< - InterceptedService, ->; -pub type StoreBlockProducerClient = - generated::block_producer_store::block_producer_client::BlockProducerClient< - InterceptedService, - >; -pub type StoreRpcClient = - generated::rpc_store::rpc_client::RpcClient>; - -pub type RemoteProverProxyStatusClient = - generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient< - InterceptedService, - >; - -pub type RemoteProverClient = - generated::remote_prover::api_client::ApiClient>; +impl Deref for StoreNtxBuilderClient { + type Target = GeneratedStoreClientForNtxBuilder; -// GRPC CLIENT BUILDER TRAIT -// ================================================================================================ + fn deref(&self) -> &Self::Target { + &self.0 + } +} -/// Configuration for gRPC clients. -/// -/// This struct contains the configuration for gRPC clients, including the metadata version and -/// genesis commitment. -pub struct ClientConfig { - pub metadata_version: Option, - pub metadata_genesis: Option, +impl DerefMut for StoreBlockProducerClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } -/// Trait for building gRPC clients from a common [`Builder`] configuration. -/// -/// This trait provides a standardized way to create different gRPC clients with consistent -/// configuration options like TLS, OTEL interceptors, and connection types. -pub trait GrpcClientBuilder { - type Service; +impl Deref for StoreBlockProducerClient { + type Target = GeneratedStoreClientForBlockProducer; - fn with_interceptor(channel: Channel, config: &ClientConfig) -> Self::Service; + fn deref(&self) -> &Self::Target { + &self.0 + } } -// CLIENT BUILDER MARKERS -// ================================================================================================ +impl DerefMut for StoreRpcClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct Rpc; +impl Deref for StoreRpcClient { + type Target = GeneratedStoreClientForRpc; -#[derive(Copy, Clone, Debug)] -pub struct BlockProducer; + fn deref(&self) -> &Self::Target { + &self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct StoreNtxBuilder; +impl DerefMut for RemoteProverProxyStatusClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct StoreBlockProducer; +impl Deref for RemoteProverProxyStatusClient { + type Target = GeneratedProxyStatusClient; -#[derive(Copy, Clone, Debug)] -pub struct StoreRpc; + fn deref(&self) -> &Self::Target { + &self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct RemoteProverProxy; +impl DerefMut for RemoteProverClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -// CLIENT BUILDER IMPLEMENTATIONS -// ================================================================================================ +impl Deref for RemoteProverClient { + type Target = GeneratedProverClient; -impl GrpcClientBuilder for Rpc { - type Service = RpcClient; + fn deref(&self) -> &Self::Target { + &self.0 + } +} - fn with_interceptor(channel: Channel, config: &ClientConfig) -> Self::Service { - // Include Accept header only if version was explicitly provided; still combine with OTEL. - let mut metadata = MetadataInterceptor::default(); - if let Some(version) = config.metadata_version.as_deref() { - metadata = metadata - .with_accept_metadata(version, config.metadata_genesis.as_deref()) - .expect("Failed to create metadata interceptor"); - } - let combined = OtelAndMetadataInterceptor::new(OtelInterceptor, metadata); - generated::rpc::api_client::ApiClient::with_interceptor(channel, combined) +impl DerefMut for ValidatorClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl GrpcClientBuilder for BlockProducer { - type Service = BlockProducerClient; +impl Deref for ValidatorClient { + type Target = GeneratedValidatorClient; - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::block_producer::api_client::ApiClient::with_interceptor(channel, OtelInterceptor) + fn deref(&self) -> &Self::Target { + &self.0 } } -impl GrpcClientBuilder for StoreNtxBuilder { - type Service = StoreNtxBuilderClient; +// GRPC CLIENT BUILDER TRAIT +// ================================================================================================ - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient::with_interceptor( - channel, - OtelInterceptor, - ) +/// Trait for building gRPC clients from a common [`Builder`] configuration. +pub trait GrpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self; +} + +impl GrpcClient for RpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedRpcClient::new(InterceptedService::new(channel, interceptor))) } } -impl GrpcClientBuilder for StoreBlockProducer { - type Service = StoreBlockProducerClient; +impl GrpcClient for BlockProducerClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedBlockProducerClient::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::block_producer_store::block_producer_client::BlockProducerClient::with_interceptor( +impl GrpcClient for StoreNtxBuilderClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForNtxBuilder::new(InterceptedService::new( channel, - OtelInterceptor, - ) + interceptor, + ))) } } -impl GrpcClientBuilder for StoreRpc { - type Service = StoreRpcClient; - - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::rpc_store::rpc_client::RpcClient::with_interceptor(channel, OtelInterceptor) +impl GrpcClient for StoreBlockProducerClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForBlockProducer::new(InterceptedService::new( + channel, + interceptor, + ))) } } -impl GrpcClientBuilder for RemoteProverProxy { - type Service = RemoteProverProxyStatusClient; +impl GrpcClient for StoreRpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForRpc::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient::with_interceptor( - channel, - OtelInterceptor, - ) +impl GrpcClient for RemoteProverProxyStatusClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedProxyStatusClient::new(InterceptedService::new(channel, interceptor))) } } -impl GrpcClientBuilder for RemoteProverClient { - type Service = RemoteProverClient; +impl GrpcClient for RemoteProverClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedProverClient::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::remote_prover::api_client::ApiClient::with_interceptor(channel, OtelInterceptor) +impl GrpcClient for ValidatorClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedValidatorClient::new(InterceptedService::new(channel, interceptor))) } } @@ -251,17 +326,20 @@ impl GrpcClientBuilder for RemoteProverClient { /// /// Usage example: /// -/// ```rust,no_run -/// use miden_node_proto::clients::{Builder, WantsTls, Rpc, RpcClient}; -/// use std::time::Duration; +/// ```rust +/// # use miden_node_proto::clients::{Builder, WantsTls, RpcClient}; +/// # use url::Url; +/// # use std::time::Duration; /// /// # async fn example() -> anyhow::Result<()> { -/// let client: RpcClient = Builder::new("https://rpc.example.com:8080")? -/// .with_tls()? // or `.without_tls()` +/// let url = Url::parse("https://rpc.example.com:8080")?; +/// let client: RpcClient = Builder::new(url) +/// .with_tls()? // or `.without_tls()` /// .with_timeout(Duration::from_secs(5)) // or `.without_timeout()` -/// .with_metadata_version("1.0".into()) // or `.without_metadata_version()` +/// .with_metadata_version("1.0".into()) // or `.without_metadata_version()` /// .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` -/// .connect::() +/// .with_otel_context_injection() // or `.without_otel_context_injection()` +/// .connect::() /// .await?; /// # Ok(()) /// # } @@ -271,6 +349,7 @@ pub struct Builder { endpoint: Endpoint, metadata_version: Option, metadata_genesis: Option, + enable_otel: bool, _state: PhantomData, } @@ -283,6 +362,8 @@ pub struct WantsVersion; #[derive(Copy, Clone, Debug)] pub struct WantsGenesis; #[derive(Copy, Clone, Debug)] +pub struct WantsOTel; +#[derive(Copy, Clone, Debug)] pub struct WantsConnection; impl Builder { @@ -292,6 +373,7 @@ impl Builder { endpoint: self.endpoint, metadata_version: self.metadata_version, metadata_genesis: self.metadata_genesis, + enable_otel: self.enable_otel, _state: PhantomData::, } } @@ -308,6 +390,7 @@ impl Builder { endpoint, metadata_version: None, metadata_genesis: None, + enable_otel: false, _state: PhantomData, } } @@ -357,42 +440,64 @@ impl Builder { impl Builder { /// Do not include genesis commitment in request metadata. - pub fn without_metadata_genesis(mut self) -> Builder { + pub fn without_metadata_genesis(mut self) -> Builder { self.metadata_genesis = None; self.next_state() } /// Include a specific genesis commitment string in request metadata. - pub fn with_metadata_genesis(mut self, genesis: String) -> Builder { + pub fn with_metadata_genesis(mut self, genesis: String) -> Builder { self.metadata_genesis = Some(genesis); self.next_state() } } +impl Builder { + /// Enables OpenTelemetry context propagation via gRPC. + /// + /// This is used to by OpenTelemetry to connect traces across network boundaries. The server on + /// the other end must be configured to receive and use the injected trace context. + pub fn with_otel_context_injection(mut self) -> Builder { + self.enable_otel = true; + self.next_state() + } + + /// Disables OpenTelemetry context propagation. This should be disabled when interfacing with + /// external third party gRPC servers. + pub fn without_otel_context_injection(mut self) -> Builder { + self.enable_otel = false; + self.next_state() + } +} + impl Builder { /// Establish an eager connection and return a fully configured client. - pub async fn connect(self) -> Result + pub async fn connect(self) -> Result where - T: GrpcClientBuilder, + T: GrpcClient, { let channel = self.endpoint.connect().await?; - let cfg = ClientConfig { - metadata_version: self.metadata_version, - metadata_genesis: self.metadata_genesis, - }; - Ok(T::with_interceptor(channel, &cfg)) + Ok(self.connect_with_channel::(channel)) } /// Establish a lazy connection and return a client that will connect on first use. - pub fn connect_lazy(self) -> T::Service + pub fn connect_lazy(self) -> T where - T: GrpcClientBuilder, + T: GrpcClient, { let channel = self.endpoint.connect_lazy(); - let cfg = ClientConfig { - metadata_version: self.metadata_version, - metadata_genesis: self.metadata_genesis, - }; - T::with_interceptor(channel, &cfg) + self.connect_with_channel::(channel) + } + + fn connect_with_channel(self, channel: Channel) -> T + where + T: GrpcClient, + { + let interceptor = Interceptor::new( + self.enable_otel, + self.metadata_version.as_deref(), + self.metadata_genesis.as_deref(), + ); + T::with_interceptor(channel, interceptor) } } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 8d690803c0..cf24e253f8 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -1,26 +1,35 @@ use std::fmt::{Debug, Display, Formatter}; use miden_node_utils::formatting::format_opt; -use miden_objects::Word; -use miden_objects::account::{ +use miden_node_utils::limiter::{QueryParamLimiter, QueryParamStorageMapKeyTotalLimit}; +use miden_protocol::Word; +use miden_protocol::account::{ Account, AccountHeader, AccountId, AccountStorageHeader, StorageMap, + StorageSlotHeader, + StorageSlotName, StorageSlotType, }; -use miden_objects::asset::{Asset, AssetVault}; -use miden_objects::block::{AccountWitness, BlockNumber}; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{NoteExecutionMode, NoteTag}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::asset::{Asset, AssetVault}; +use miden_protocol::block::BlockNumber; +use miden_protocol::block::account_tree::AccountWitness; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::merkle::smt::SmtProof; +use miden_protocol::note::NoteAttachment; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_standards::note::{NetworkAccountTarget, NetworkAccountTargetError}; use thiserror::Error; use super::try_convert; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated::{self as proto}; +#[cfg(test)] +mod tests; + // ACCOUNT ID // ================================================================================================ @@ -96,36 +105,60 @@ impl From<&AccountInfo> for proto::account::AccountDetails { fn from(AccountInfo { summary, details }: &AccountInfo) -> Self { Self { summary: Some(summary.into()), - details: details.as_ref().map(miden_objects::utils::Serializable::to_bytes), + details: details.as_ref().map(Serializable::to_bytes), } } } -// ACCOUNT PROOF REQUEST +// ACCOUNT STORAGE HEADER +//================================================================================================ + +impl TryFrom for AccountStorageHeader { + type Error = ConversionError; + + fn try_from(value: proto::account::AccountStorageHeader) -> Result { + let proto::account::AccountStorageHeader { slots } = value; + + let slot_headers = slots + .into_iter() + .map(|slot| { + let slot_name = StorageSlotName::new(slot.slot_name)?; + let slot_type = storage_slot_type_from_raw(slot.slot_type)?; + let commitment = + slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; + Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) + }) + .collect::, ConversionError>>()?; + + Ok(AccountStorageHeader::new(slot_headers)?) + } +} + +// ACCOUNT REQUEST // ================================================================================================ /// Represents a request for an account proof. -pub struct AccountProofRequest { +pub struct AccountRequest { pub account_id: AccountId, // If not present, the latest account proof references the latest available pub block_num: Option, pub details: Option, } -impl TryFrom for AccountProofRequest { +impl TryFrom for AccountRequest { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountProofRequest) -> Result { - let proto::rpc_store::AccountProofRequest { account_id, block_num, details } = value; + fn try_from(value: proto::rpc::AccountRequest) -> Result { + let proto::rpc::AccountRequest { account_id, block_num, details } = value; let account_id = account_id - .ok_or(proto::rpc_store::AccountProofRequest::missing_field(stringify!(account_id)))? + .ok_or(proto::rpc::AccountRequest::missing_field(stringify!(account_id)))? .try_into()?; let block_num = block_num.map(Into::into); let details = details.map(TryFrom::try_from).transpose()?; - Ok(AccountProofRequest { account_id, block_num, details }) + Ok(AccountRequest { account_id, block_num, details }) } } @@ -136,15 +169,13 @@ pub struct AccountDetailRequest { pub storage_requests: Vec, } -impl TryFrom - for AccountDetailRequest -{ +impl TryFrom for AccountDetailRequest { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_request::AccountDetailRequest, + value: proto::rpc::account_request::AccountDetailRequest, ) -> Result { - let proto::rpc_store::account_proof_request::AccountDetailRequest { + let proto::rpc::account_request::AccountDetailRequest { code_commitment, asset_vault_commitment, storage_maps, @@ -162,120 +193,56 @@ impl TryFrom } } -impl TryFrom for AccountStorageHeader { - type Error = ConversionError; - - fn try_from(value: proto::account::AccountStorageHeader) -> Result { - let proto::account::AccountStorageHeader { slots } = value; - - let items = slots - .into_iter() - .map(|slot| { - let slot_type = storage_slot_type_from_raw(slot.slot_type)?; - let commitment = - slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; - Ok((slot_type, commitment)) - }) - .collect::, ConversionError>>()?; - - Ok(AccountStorageHeader::new(items)) - } -} - -impl TryFrom - for AccountStorageMapDetails -{ - type Error = ConversionError; - - fn try_from( - value: proto::rpc_store::account_storage_details::AccountStorageMapDetails, - ) -> Result { - let proto::rpc_store::account_storage_details::AccountStorageMapDetails { - slot_index, - too_many_entries, - entries, - } = value; - - let slot_index = slot_index.try_into().map_err(ConversionError::TryFromIntError)?; - - // Extract map_entries from the MapEntries message - let map_entries = if let Some(entries) = entries { - entries - .entries - .into_iter() - .map(|entry| { - let key = entry - .key - .ok_or(proto::rpc_store::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(key), - ))? - .try_into()?; - let value = entry - .value - .ok_or(proto::rpc_store::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(value), - ))? - .try_into()?; - Ok((key, value)) - }) - .collect::, ConversionError>>()? - } else { - Vec::new() - }; - - Ok(Self { - slot_index, - too_many_entries, - map_entries, - }) - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { - pub slot_index: u8, + pub slot_name: StorageSlotName, pub slot_data: SlotData, } -impl - TryFrom< - proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest, - > for StorageMapRequest +impl TryFrom + for StorageMapRequest { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest, + value: proto::rpc::account_request::account_detail_request::StorageMapDetailRequest, ) -> Result { - let proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest { - slot_index, + let proto::rpc::account_request::account_detail_request::StorageMapDetailRequest { + slot_name, slot_data, } = value; - let slot_index = slot_index.try_into()?; - let slot_data = slot_data.ok_or(proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; + let slot_name = StorageSlotName::new(slot_name)?; + let slot_data = slot_data.ok_or(proto::rpc::account_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; - Ok(StorageMapRequest { slot_index, slot_data }) + Ok(StorageMapRequest { slot_name, slot_data }) } } +/// Request of slot data values. #[derive(Debug, Clone, PartialEq, Eq)] pub enum SlotData { All, MapKeys(Vec), } -impl TryFrom - for SlotData +impl + TryFrom< + proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData, + > for SlotData { type Error = ConversionError; - fn try_from(value: proto::rpc_store::account_proof_request::account_detail_request::storage_map_detail_request::SlotData) -> Result { - use proto::rpc_store::account_proof_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; + fn try_from( + value: proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData, + ) -> Result { + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; Ok(match value { ProtoSlotData::AllEntries(true) => SlotData::All, - ProtoSlotData::AllEntries(false) => return Err(ConversionError::EnumDiscriminantOutOfRange), + ProtoSlotData::AllEntries(false) => { + return Err(ConversionError::EnumDiscriminantOutOfRange); + }, ProtoSlotData::MapKeys(keys) => { let keys = try_convert(keys.map_keys).collect::, _>>()?; SlotData::MapKeys(keys) @@ -339,9 +306,10 @@ impl From for proto::account::AccountStorageHeader { fn from(value: AccountStorageHeader) -> Self { let slots = value .slots() - .map(|(slot_type, slot_value)| proto::account::account_storage_header::StorageSlot { - slot_type: storage_slot_type_to_raw(*slot_type), - commitment: Some(proto::primitives::Digest::from(*slot_value)), + .map(|slot_header| proto::account::account_storage_header::StorageSlot { + slot_name: slot_header.name().to_string(), + slot_type: storage_slot_type_to_raw(slot_header.slot_type()), + commitment: Some(proto::primitives::Digest::from(slot_header.value())), }) .collect(); @@ -349,133 +317,359 @@ impl From for proto::account::AccountStorageHeader { } } +// ACCOUNT VAULT DETAILS +//================================================================================================ + +/// Account vault details +/// +/// When an account contains a large number of assets (> +/// [`AccountVaultDetails::MAX_RETURN_ENTRIES`]), including all assets in a single RPC response +/// creates performance issues. In such cases, the `LimitExceeded` variant indicates to the client +/// to use the `SyncAccountVault` endpoint instead. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct AccountVaultDetails { - pub too_many_assets: bool, - pub assets: Vec, +pub enum AccountVaultDetails { + /// The vault has too many assets to return inline. + /// Clients must use `SyncAccountVault` endpoint instead. + LimitExceeded, + + /// The assets in the vault (up to `MAX_RETURN_ENTRIES`). + Assets(Vec), } + impl AccountVaultDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of vault entries that can be returned in a single response. + /// Accounts with more assets will have `LimitExceeded` variant. + pub const MAX_RETURN_ENTRIES: usize = 1000; pub fn new(vault: &AssetVault) -> Self { if vault.assets().nth(Self::MAX_RETURN_ENTRIES).is_some() { - Self::too_many() + Self::LimitExceeded } else { - Self { - too_many_assets: false, - assets: Vec::from_iter(vault.assets()), - } + Self::Assets(Vec::from_iter(vault.assets())) } } pub fn empty() -> Self { - Self { - too_many_assets: false, - assets: Vec::new(), - } + Self::Assets(Vec::new()) } - fn too_many() -> Self { - Self { - too_many_assets: true, - assets: Vec::new(), + /// Creates `AccountVaultDetails` from a list of assets. + pub fn from_assets(assets: Vec) -> Self { + if assets.len() > Self::MAX_RETURN_ENTRIES { + Self::LimitExceeded + } else { + Self::Assets(assets) } } } -impl TryFrom for AccountVaultDetails { +impl TryFrom for AccountVaultDetails { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountVaultDetails) -> Result { - let proto::rpc_store::AccountVaultDetails { too_many_assets, assets } = value; + fn try_from(value: proto::rpc::AccountVaultDetails) -> Result { + let proto::rpc::AccountVaultDetails { too_many_assets, assets } = value; - let assets = - Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { - let asset = asset - .asset - .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; - let asset = Word::try_from(asset)?; - Asset::try_from(asset).map_err(ConversionError::AssetError) - }))?; - Ok(Self { too_many_assets, assets }) + if too_many_assets { + Ok(Self::LimitExceeded) + } else { + let parsed_assets = + Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { + let asset = asset + .asset + .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; + let asset = Word::try_from(asset)?; + Asset::try_from(asset).map_err(ConversionError::AssetError) + }))?; + Ok(Self::Assets(parsed_assets)) + } } } -impl From for proto::rpc_store::AccountVaultDetails { +impl From for proto::rpc::AccountVaultDetails { fn from(value: AccountVaultDetails) -> Self { - let AccountVaultDetails { too_many_assets, assets } = value; - - Self { - too_many_assets, - assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { - asset: Some(proto::primitives::Digest::from(Word::from(asset))), - })), + match value { + AccountVaultDetails::LimitExceeded => Self { + too_many_assets: true, + assets: Vec::new(), + }, + AccountVaultDetails::Assets(assets) => Self { + too_many_assets: false, + assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { + asset: Some(proto::primitives::Digest::from(Word::from(asset))), + })), + }, } } } +// ACCOUNT STORAGE MAP DETAILS +//================================================================================================ + +/// Details about an account storage map slot. #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageMapDetails { - pub slot_index: u8, - pub too_many_entries: bool, - pub map_entries: Vec<(Word, Word)>, + pub slot_name: StorageSlotName, + pub entries: StorageMapEntries, } -impl AccountStorageMapDetails { - const MAX_RETURN_ENTRIES: usize = 1000; +/// Storage map entries for an account storage slot. +/// +/// When a storage map contains many entries (> [`AccountStorageMapDetails::MAX_RETURN_ENTRIES`]), +/// returning all entries in a single RPC response creates performance issues. In such cases, +/// the `LimitExceeded` variant indicates to the client to use the `SyncStorageMaps` endpoint +/// instead. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageMapEntries { + /// The map has too many entries to return inline. + /// Clients must use `SyncStorageMaps` endpoint instead. + LimitExceeded, + + /// All storage map entries (key-value pairs) without proofs. + /// Used when all entries are requested for small maps. + AllEntries(Vec<(Word, Word)>), + + /// Specific entries with their SMT proofs for client-side verification. + /// Used when specific keys are requested from the storage map. + EntriesWithProofs(Vec), +} - pub fn new(slot_index: u8, slot_data: SlotData, storage_map: &StorageMap) -> Self { - match slot_data { - SlotData::All => Self::from_all_entries(slot_index, storage_map), - SlotData::MapKeys(keys) => Self::from_specific_keys(slot_index, &keys[..], storage_map), +impl AccountStorageMapDetails { + /// Maximum number of storage map entries that can be returned in a single response. + pub const MAX_RETURN_ENTRIES: usize = 1000; + + /// Maximum number of SMT proofs that can be returned in a single response. + /// + /// This limit is more restrictive than [`Self::MAX_RETURN_ENTRIES`] because SMT proofs + /// are larger (up to 64 inner nodes each) and more CPU-intensive to generate. + /// + /// This is defined by [`QueryParamStorageMapKeyTotalLimit::LIMIT`] and used both in RPC + /// validation and store-level enforcement to ensure consistent limits. + pub const MAX_SMT_PROOF_ENTRIES: usize = QueryParamStorageMapKeyTotalLimit::LIMIT; + + /// Creates storage map details with all entries from the storage map. + /// + /// If the storage map has too many entries (> `MAX_RETURN_ENTRIES`), + /// returns `LimitExceeded` variant. + pub fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { + if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } + } else { + let entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); + Self { + slot_name, + entries: StorageMapEntries::AllEntries(entries), + } } } - fn from_all_entries(slot_index: u8, storage_map: &StorageMap) -> Self { - if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_index) + /// Creates storage map details from forest-queried entries. + /// + /// Returns `LimitExceeded` if too many entries. + pub fn from_forest_entries(slot_name: StorageSlotName, entries: Vec<(Word, Word)>) -> Self { + if entries.len() > Self::MAX_RETURN_ENTRIES { + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { - let map_entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); Self { - slot_index, - too_many_entries: false, - map_entries, + slot_name, + entries: StorageMapEntries::AllEntries(entries), } } } - fn from_specific_keys(slot_index: u8, keys: &[Word], storage_map: &StorageMap) -> Self { - if keys.len() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_index) + /// Creates storage map details from pre-computed SMT proofs. + /// + /// Use this when the caller has already obtained the proofs from an `SmtForest`. + /// Returns `LimitExceeded` if too many proofs are provided. + pub fn from_proofs(slot_name: StorageSlotName, proofs: Vec) -> Self { + if proofs.len() > Self::MAX_SMT_PROOF_ENTRIES { + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { - // TODO For now, we return all entries instead of specific keys with proofs - Self::from_all_entries(slot_index, storage_map) + Self { + slot_name, + entries: StorageMapEntries::EntriesWithProofs(proofs), + } } } - pub fn too_many_entries(slot_index: u8) -> Self { + /// Creates storage map details indicating the limit was exceeded. + pub fn limit_exceeded(slot_name: StorageSlotName) -> Self { Self { - slot_index, - too_many_entries: true, - map_entries: Vec::new(), + slot_name, + entries: StorageMapEntries::LimitExceeded, } } } -#[derive(Debug, Clone, PartialEq, Eq)] +impl TryFrom + for AccountStorageMapDetails +{ + type Error = ConversionError; + + fn try_from( + value: proto::rpc::account_storage_details::AccountStorageMapDetails, + ) -> Result { + use proto::rpc::account_storage_details::account_storage_map_details::{ + all_map_entries::StorageMapEntry, + map_entries_with_proofs::StorageMapEntryWithProof, + AllMapEntries, + Entries as ProtoEntries, + MapEntriesWithProofs, + }; + + let proto::rpc::account_storage_details::AccountStorageMapDetails { + slot_name, + too_many_entries, + entries, + } = value; + + let slot_name = StorageSlotName::new(slot_name)?; + + let entries = if too_many_entries { + StorageMapEntries::LimitExceeded + } else { + match entries { + None => { + return Err( + proto::rpc::account_storage_details::AccountStorageMapDetails::missing_field( + stringify!(entries), + ), + ); + }, + Some(ProtoEntries::AllEntries(AllMapEntries { entries })) => { + let entries = entries + .into_iter() + .map(|entry| { + let key = entry + .key + .ok_or(StorageMapEntry::missing_field(stringify!(key)))? + .try_into()?; + let value = entry + .value + .ok_or(StorageMapEntry::missing_field(stringify!(value)))? + .try_into()?; + Ok((key, value)) + }) + .collect::, ConversionError>>()?; + StorageMapEntries::AllEntries(entries) + }, + Some(ProtoEntries::EntriesWithProofs(MapEntriesWithProofs { entries })) => { + let proofs = entries + .into_iter() + .map(|entry| { + let smt_opening = entry.proof.ok_or( + StorageMapEntryWithProof::missing_field(stringify!(proof)), + )?; + SmtProof::try_from(smt_opening) + }) + .collect::, ConversionError>>()?; + StorageMapEntries::EntriesWithProofs(proofs) + }, + } + }; + + Ok(Self { slot_name, entries }) + } +} + +impl From + for proto::rpc::account_storage_details::AccountStorageMapDetails +{ + fn from(value: AccountStorageMapDetails) -> Self { + use proto::rpc::account_storage_details::account_storage_map_details::{ + AllMapEntries, + Entries as ProtoEntries, + MapEntriesWithProofs, + }; + + let AccountStorageMapDetails { slot_name, entries } = value; + + let (too_many_entries, proto_entries) = match entries { + StorageMapEntries::LimitExceeded => (true, None), + StorageMapEntries::AllEntries(entries) => { + let all = AllMapEntries { + entries: Vec::from_iter(entries.into_iter().map(|(key, value)| { + proto::rpc::account_storage_details::account_storage_map_details::all_map_entries::StorageMapEntry { + key: Some(key.into()), + value: Some(value.into()), + } + })), + }; + (false, Some(ProtoEntries::AllEntries(all))) + }, + StorageMapEntries::EntriesWithProofs(proofs) => { + use miden_protocol::crypto::merkle::smt::SmtLeaf; + + let with_proofs = MapEntriesWithProofs { + entries: Vec::from_iter(proofs.into_iter().map(|proof| { + // Get key/value from the leaf before consuming the proof + let (key, value) = match proof.leaf() { + SmtLeaf::Empty(_) => { + (miden_protocol::EMPTY_WORD, miden_protocol::EMPTY_WORD) + }, + SmtLeaf::Single((k, v)) => (*k, *v), + SmtLeaf::Multiple(entries) => entries.iter().next().map_or( + (miden_protocol::EMPTY_WORD, miden_protocol::EMPTY_WORD), + |(k, v)| (*k, *v), + ), + }; + let smt_opening = proto::primitives::SmtOpening::from(proof); + proto::rpc::account_storage_details::account_storage_map_details::map_entries_with_proofs::StorageMapEntryWithProof { + key: Some(key.into()), + value: Some(value.into()), + proof: Some(smt_opening), + } + })), + }; + (false, Some(ProtoEntries::EntriesWithProofs(with_proofs))) + }, + }; + + Self { + slot_name: slot_name.to_string(), + too_many_entries, + entries: proto_entries, + } + } +} + +#[derive(Debug, Clone, PartialEq)] pub struct AccountStorageDetails { pub header: AccountStorageHeader, pub map_details: Vec, } -impl TryFrom for AccountStorageDetails { +impl AccountStorageDetails { + /// Creates storage details where all map slots indicate limit exceeded. + pub fn all_limits_exceeded( + header: AccountStorageHeader, + slot_names: impl IntoIterator, + ) -> Self { + Self { + header, + map_details: Vec::from_iter( + slot_names.into_iter().map(AccountStorageMapDetails::limit_exceeded), + ), + } + } +} + +impl TryFrom for AccountStorageDetails { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountStorageDetails) -> Result { - let proto::rpc_store::AccountStorageDetails { header, map_details } = value; + fn try_from(value: proto::rpc::AccountStorageDetails) -> Result { + let proto::rpc::AccountStorageDetails { header, map_details } = value; let header = header - .ok_or(proto::rpc_store::AccountStorageDetails::missing_field(stringify!(header)))? + .ok_or(proto::rpc::AccountStorageDetails::missing_field(stringify!(header)))? .try_into()?; let map_details = try_convert(map_details).collect::, _>>()?; @@ -484,7 +678,7 @@ impl TryFrom for AccountStorageDetails } } -impl From for proto::rpc_store::AccountStorageDetails { +impl From for proto::rpc::AccountStorageDetails { fn from(value: AccountStorageDetails) -> Self { let AccountStorageDetails { header, map_details } = value; @@ -497,57 +691,52 @@ impl From for proto::rpc_store::AccountStorageDetails { const fn storage_slot_type_from_raw(slot_type: u32) -> Result { Ok(match slot_type { - 0 => StorageSlotType::Map, - 1 => StorageSlotType::Value, + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, _ => return Err(ConversionError::EnumDiscriminantOutOfRange), }) } const fn storage_slot_type_to_raw(slot_type: StorageSlotType) -> u32 { match slot_type { - StorageSlotType::Map => 0, - StorageSlotType::Value => 1, + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, } } -/// Represents account details returned in response to an account proof request. -pub struct AccountDetails { - pub account_header: AccountHeader, - pub account_code: Option>, - pub vault_details: AccountVaultDetails, - pub storage_details: AccountStorageDetails, -} +// ACCOUNT PROOF RESPONSE +//================================================================================================ /// Represents the response to an account proof request. -pub struct AccountProofResponse { +pub struct AccountResponse { pub block_num: BlockNumber, pub witness: AccountWitness, pub details: Option, } -impl TryFrom for AccountProofResponse { +impl TryFrom for AccountResponse { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountProofResponse) -> Result { - let proto::rpc_store::AccountProofResponse { block_num, witness, details } = value; + fn try_from(value: proto::rpc::AccountResponse) -> Result { + let proto::rpc::AccountResponse { block_num, witness, details } = value; let block_num = block_num - .ok_or(proto::rpc_store::AccountProofResponse::missing_field(stringify!(block_num)))? + .ok_or(proto::rpc::AccountResponse::missing_field(stringify!(block_num)))? .into(); let witness = witness - .ok_or(proto::rpc_store::AccountProofResponse::missing_field(stringify!(witness)))? + .ok_or(proto::rpc::AccountResponse::missing_field(stringify!(witness)))? .try_into()?; let details = details.map(TryFrom::try_from).transpose()?; - Ok(AccountProofResponse { block_num, witness, details }) + Ok(AccountResponse { block_num, witness, details }) } } -impl From for proto::rpc_store::AccountProofResponse { - fn from(value: AccountProofResponse) -> Self { - let AccountProofResponse { block_num, witness, details } = value; +impl From for proto::rpc::AccountResponse { + fn from(value: AccountResponse) -> Self { + let AccountResponse { block_num, witness, details } = value; Self { witness: Some(witness.into()), @@ -557,13 +746,40 @@ impl From for proto::rpc_store::AccountProofResponse { } } -impl TryFrom for AccountDetails { +// ACCOUNT DETAILS +//================================================================================================ + +/// Represents account details returned in response to an account proof request. +pub struct AccountDetails { + pub account_header: AccountHeader, + pub account_code: Option>, + pub vault_details: AccountVaultDetails, + pub storage_details: AccountStorageDetails, +} + +impl AccountDetails { + /// Creates account details where all storage map slots indicate limit exceeded. + pub fn with_storage_limits_exceeded( + account_header: AccountHeader, + account_code: Option>, + vault_details: AccountVaultDetails, + storage_header: AccountStorageHeader, + slot_names: impl IntoIterator, + ) -> Self { + Self { + account_header, + account_code, + vault_details, + storage_details: AccountStorageDetails::all_limits_exceeded(storage_header, slot_names), + } + } +} + +impl TryFrom for AccountDetails { type Error = ConversionError; - fn try_from( - value: proto::rpc_store::account_proof_response::AccountDetails, - ) -> Result { - let proto::rpc_store::account_proof_response::AccountDetails { + fn try_from(value: proto::rpc::account_response::AccountDetails) -> Result { + let proto::rpc::account_response::AccountDetails { header, code, vault_details, @@ -571,21 +787,19 @@ impl TryFrom for Accou } = value; let account_header = header - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(header), - ))? + .ok_or(proto::rpc::account_response::AccountDetails::missing_field(stringify!(header)))? .try_into()?; let storage_details = storage_details - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(storage_details), - ))? + .ok_or(proto::rpc::account_response::AccountDetails::missing_field(stringify!( + storage_details + )))? .try_into()?; let vault_details = vault_details - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(vault_details), - ))? + .ok_or(proto::rpc::account_response::AccountDetails::missing_field(stringify!( + vault_details + )))? .try_into()?; let account_code = code; @@ -598,7 +812,7 @@ impl TryFrom for Accou } } -impl From for proto::rpc_store::account_proof_response::AccountDetails { +impl From for proto::rpc::account_response::AccountDetails { fn from(value: AccountDetails) -> Self { let AccountDetails { account_header, @@ -621,35 +835,6 @@ impl From for proto::rpc_store::account_proof_response::AccountD } } -impl From - for proto::rpc_store::account_storage_details::AccountStorageMapDetails -{ - fn from(value: AccountStorageMapDetails) -> Self { - use proto::rpc_store::account_storage_details::account_storage_map_details; - - let AccountStorageMapDetails { - slot_index, - too_many_entries, - map_entries, - } = value; - - let entries = Some(account_storage_map_details::MapEntries { - entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { - account_storage_map_details::map_entries::StorageMapEntry { - key: Some(key.into()), - value: Some(value.into()), - } - })), - }); - - Self { - slot_index: u32::from(slot_index), - too_many_entries, - entries, - } - } -} - // ACCOUNT WITNESS // ================================================================================================ @@ -770,24 +955,22 @@ impl Display for AccountState { } } -impl TryFrom - for AccountState -{ +impl TryFrom for AccountState { type Error = ConversionError; fn try_from( - from: proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord, + from: proto::store::transaction_inputs::AccountTransactionInputRecord, ) -> Result { let account_id = from .account_id - .ok_or(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord::missing_field( + .ok_or(proto::store::transaction_inputs::AccountTransactionInputRecord::missing_field( stringify!(account_id), ))? .try_into()?; let account_commitment = from .account_commitment - .ok_or(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord::missing_field( + .ok_or(proto::store::transaction_inputs::AccountTransactionInputRecord::missing_field( stringify!(account_commitment), ))? .try_into()?; @@ -804,9 +987,7 @@ impl TryFrom - for proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord -{ +impl From for proto::store::transaction_inputs::AccountTransactionInputRecord { fn from(from: AccountState) -> Self { Self { account_id: Some(from.account_id.into()), @@ -842,76 +1023,95 @@ impl From for proto::primitives::Asset { pub type AccountPrefix = u32; -/// Newtype wrapper for network account prefix. +/// Newtype wrapper for network account IDs. +/// /// Provides type safety for accounts that are meant for network execution. +/// This wraps the full `AccountId` of a network account, typically extracted +/// from a `NetworkAccountTarget` attachment. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -pub struct NetworkAccountPrefix(u32); +pub struct NetworkAccountId(AccountId); -impl std::fmt::Display for NetworkAccountPrefix { +impl std::fmt::Display for NetworkAccountId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } -impl NetworkAccountPrefix { - pub fn inner(&self) -> u32 { +impl NetworkAccountId { + /// Returns the inner `AccountId`. + pub fn inner(&self) -> AccountId { self.0 } -} - -impl TryFrom for NetworkAccountPrefix { - type Error = NetworkAccountError; - fn try_from(value: u32) -> Result { - if value >> 30 != 0 { - return Err(NetworkAccountError::InvalidPrefix(value)); - } - Ok(NetworkAccountPrefix(value)) + /// Gets the 30-bit prefix of the account ID used for tag matching. + pub fn prefix(&self) -> AccountPrefix { + get_account_id_tag_prefix(self.0) } } -impl TryFrom for NetworkAccountPrefix { +impl TryFrom for NetworkAccountId { type Error = NetworkAccountError; fn try_from(id: AccountId) -> Result { if !id.is_network() { return Err(NetworkAccountError::NotNetworkAccount(id)); } - let prefix = get_account_id_tag_prefix(id); - Ok(NetworkAccountPrefix(prefix)) + Ok(NetworkAccountId(id)) } } -impl TryFrom for NetworkAccountPrefix { +impl TryFrom<&NoteAttachment> for NetworkAccountId { type Error = NetworkAccountError; - fn try_from(tag: NoteTag) -> Result { - if tag.execution_mode() != NoteExecutionMode::Network || !tag.is_single_target() { - return Err(NetworkAccountError::InvalidExecutionMode(tag)); - } + fn try_from(attachment: &NoteAttachment) -> Result { + let target = NetworkAccountTarget::try_from(attachment) + .map_err(NetworkAccountError::InvalidAttachment)?; + Ok(NetworkAccountId(target.target_id())) + } +} + +impl TryFrom for NetworkAccountId { + type Error = NetworkAccountError; - let tag_inner: u32 = tag.into(); - assert!(tag_inner >> 30 == 0, "first 2 bits have to be 0"); - Ok(NetworkAccountPrefix(tag_inner)) + fn try_from(attachment: NoteAttachment) -> Result { + NetworkAccountId::try_from(&attachment) } } -impl From for u32 { - fn from(value: NetworkAccountPrefix) -> Self { +impl From for AccountId { + fn from(value: NetworkAccountId) -> Self { value.inner() } } +impl From for u32 { + /// Returns the 30-bit prefix of the network account ID. + /// This is used for note tag matching. + fn from(value: NetworkAccountId) -> Self { + value.prefix() + } +} + #[derive(Debug, Error)] pub enum NetworkAccountError { #[error("account ID {0} is not a valid network account ID")] NotNetworkAccount(AccountId), - #[error("note tag {0} is not valid for network account execution")] - InvalidExecutionMode(NoteTag), - #[error("note prefix should be 30-bit long ({0} has non-zero in the 2 most significant bits)")] + #[error("invalid network account attachment: {0}")] + InvalidAttachment(#[source] NetworkAccountTargetError), + #[error("invalid network account prefix: {0}")] InvalidPrefix(u32), } +/// Validates that a u32 represents a valid network account prefix. +/// +/// Network accounts have a 30-bit prefix (top 2 bits must be 0). +pub fn validate_network_account_prefix(prefix: u32) -> Result { + if prefix >> 30 != 0 { + return Err(NetworkAccountError::InvalidPrefix(prefix)); + } + Ok(prefix) +} + /// Gets the 30-bit prefix of the account ID. fn get_account_id_tag_prefix(id: AccountId) -> AccountPrefix { (id.prefix().as_u64() >> 34) as AccountPrefix diff --git a/crates/proto/src/domain/account/tests.rs b/crates/proto/src/domain/account/tests.rs new file mode 100644 index 0000000000..695813d990 --- /dev/null +++ b/crates/proto/src/domain/account/tests.rs @@ -0,0 +1,41 @@ +use super::*; + +fn word_from_u32(arr: [u32; 4]) -> Word { + Word::from(arr) +} + +fn test_slot_name() -> StorageSlotName { + StorageSlotName::new("miden::test::storage::slot").unwrap() +} + +#[test] +fn account_storage_map_details_from_forest_entries() { + let slot_name = test_slot_name(); + let entries = vec![ + (word_from_u32([1, 2, 3, 4]), word_from_u32([5, 6, 7, 8])), + (word_from_u32([9, 10, 11, 12]), word_from_u32([13, 14, 15, 16])), + ]; + + let details = AccountStorageMapDetails::from_forest_entries(slot_name.clone(), entries.clone()); + + assert_eq!(details.slot_name, slot_name); + assert_eq!(details.entries, StorageMapEntries::AllEntries(entries)); +} + +#[test] +fn account_storage_map_details_from_forest_entries_limit_exceeded() { + let slot_name = test_slot_name(); + // Create more entries than MAX_RETURN_ENTRIES + let entries: Vec<_> = (0..=AccountStorageMapDetails::MAX_RETURN_ENTRIES) + .map(|i| { + let key = word_from_u32([i as u32, 0, 0, 0]); + let value = word_from_u32([0, 0, 0, i as u32]); + (key, value) + }) + .collect(); + + let details = AccountStorageMapDetails::from_forest_entries(slot_name.clone(), entries); + + assert_eq!(details.slot_name, slot_name); + assert_eq!(details.entries, StorageMapEntries::LimitExceeded); +} diff --git a/crates/proto/src/domain/batch.rs b/crates/proto/src/domain/batch.rs index 718e74463a..1cccf6ab8b 100644 --- a/crates/proto/src/domain/batch.rs +++ b/crates/proto/src/domain/batch.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; -use miden_objects::block::BlockHeader; -use miden_objects::note::{NoteId, NoteInclusionProof}; -use miden_objects::transaction::PartialBlockchain; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::block::BlockHeader; +use miden_protocol::note::{NoteId, NoteInclusionProof}; +use miden_protocol::transaction::PartialBlockchain; +use miden_protocol::utils::{Deserializable, Serializable}; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -16,7 +16,7 @@ pub struct BatchInputs { pub partial_block_chain: PartialBlockchain, } -impl From for proto::block_producer_store::BatchInputs { +impl From for proto::store::BatchInputs { fn from(inputs: BatchInputs) -> Self { Self { batch_reference_block_header: Some(inputs.batch_reference_block_header.into()), @@ -26,16 +26,14 @@ impl From for proto::block_producer_store::BatchInputs { } } -impl TryFrom for BatchInputs { +impl TryFrom for BatchInputs { type Error = ConversionError; - fn try_from( - response: proto::block_producer_store::BatchInputs, - ) -> Result { + fn try_from(response: proto::store::BatchInputs) -> Result { let result = Self { batch_reference_block_header: response .batch_reference_block_header - .ok_or(proto::block_producer_store::BatchInputs::missing_field("block_header"))? + .ok_or(proto::store::BatchInputs::missing_field("block_header"))? .try_into()?, note_proofs: response .note_proofs diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index a64427d1ae..aa94f306dd 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -1,17 +1,13 @@ use std::collections::BTreeMap; use std::ops::RangeInclusive; -use miden_objects::account::AccountId; -use miden_objects::block::{ - BlockHeader, - BlockInputs, - BlockNumber, - FeeParameters, - NullifierWitness, -}; -use miden_objects::note::{NoteId, NoteInclusionProof}; -use miden_objects::transaction::PartialBlockchain; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::account::AccountId; +use miden_protocol::block::nullifier_tree::NullifierWitness; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; +use miden_protocol::note::{NoteId, NoteInclusionProof}; +use miden_protocol::transaction::PartialBlockchain; +use miden_protocol::utils::{Deserializable, Serializable}; use thiserror::Error; use crate::errors::{ConversionError, MissingFieldHelper}; @@ -47,7 +43,7 @@ impl From<&BlockHeader> for proto::blockchain::BlockHeader { note_root: Some(header.note_root().into()), tx_commitment: Some(header.tx_commitment().into()), tx_kernel_commitment: Some(header.tx_kernel_commitment().into()), - proof_commitment: Some(header.proof_commitment().into()), + validator_key: Some(header.validator_key().into()), timestamp: header.timestamp(), fee_parameters: Some(header.fee_parameters().into()), } @@ -108,8 +104,8 @@ impl TryFrom for BlockHeader { )))? .try_into()?, value - .proof_commitment - .ok_or(proto::blockchain::BlockHeader::missing_field(stringify!(proof_commitment)))? + .validator_key + .ok_or(proto::blockchain::BlockHeader::missing_field(stringify!(validator_key)))? .try_into()?, FeeParameters::try_from(value.fee_parameters.ok_or( proto::blockchain::FeeParameters::missing_field(stringify!(fee_parameters)), @@ -122,7 +118,7 @@ impl TryFrom for BlockHeader { // BLOCK INPUTS // ================================================================================================ -impl From for proto::block_producer_store::BlockInputs { +impl From for proto::store::BlockInputs { fn from(inputs: BlockInputs) -> Self { let ( prev_block_header, @@ -132,7 +128,7 @@ impl From for proto::block_producer_store::BlockInputs { unauthenticated_note_proofs, ) = inputs.into_parts(); - proto::block_producer_store::BlockInputs { + proto::store::BlockInputs { latest_block_header: Some(prev_block_header.into()), account_witnesses: account_witnesses .into_iter() @@ -154,10 +150,10 @@ impl From for proto::block_producer_store::BlockInputs { } } -impl TryFrom for BlockInputs { +impl TryFrom for BlockInputs { type Error = ConversionError; - fn try_from(response: proto::block_producer_store::BlockInputs) -> Result { + fn try_from(response: proto::store::BlockInputs) -> Result { let latest_block_header: BlockHeader = response .latest_block_header .ok_or(proto::blockchain::BlockHeader::missing_field("block_header"))? @@ -202,6 +198,52 @@ impl TryFrom for BlockInputs { } } +// PUBLIC KEY +// ================================================================================================ + +impl TryFrom for PublicKey { + type Error = ConversionError; + fn try_from(public_key: proto::blockchain::ValidatorPublicKey) -> Result { + PublicKey::read_from_bytes(&public_key.validator_key) + .map_err(|source| ConversionError::deserialization_error("PublicKey", source)) + } +} + +impl From for proto::blockchain::ValidatorPublicKey { + fn from(value: PublicKey) -> Self { + Self::from(&value) + } +} + +impl From<&PublicKey> for proto::blockchain::ValidatorPublicKey { + fn from(value: &PublicKey) -> Self { + Self { validator_key: value.to_bytes() } + } +} + +// SIGNATURE +// ================================================================================================ + +impl TryFrom for Signature { + type Error = ConversionError; + fn try_from(signature: proto::blockchain::BlockSignature) -> Result { + Signature::read_from_bytes(&signature.signature) + .map_err(|source| ConversionError::deserialization_error("Signature", source)) + } +} + +impl From for proto::blockchain::BlockSignature { + fn from(value: Signature) -> Self { + Self::from(&value) + } +} + +impl From<&Signature> for proto::blockchain::BlockSignature { + fn from(value: &Signature) -> Self { + Self { signature: value.to_bytes() } + } +} + // FEE PARAMETERS // ================================================================================================ @@ -242,7 +284,7 @@ pub enum InvalidBlockRange { EmptyRange { start: BlockNumber, end: BlockNumber }, } -impl proto::rpc_store::BlockRange { +impl proto::rpc::BlockRange { /// Converts the block range into an inclusive range, using the fallback block number if the /// block to is not specified. pub fn into_inclusive_range>( @@ -274,7 +316,7 @@ impl proto::rpc_store::BlockRange { } } -impl From> for proto::rpc_store::BlockRange { +impl From> for proto::rpc::BlockRange { fn from(range: RangeInclusive) -> Self { Self { block_from: range.start().as_u32(), diff --git a/crates/proto/src/domain/digest.rs b/crates/proto/src/domain/digest.rs index 68cfbd9b58..7be94e5304 100644 --- a/crates/proto/src/domain/digest.rs +++ b/crates/proto/src/domain/digest.rs @@ -1,8 +1,8 @@ use std::fmt::{Debug, Display, Formatter}; use hex::{FromHex, ToHex}; -use miden_objects::note::NoteId; -use miden_objects::{Felt, StarkField, Word}; +use miden_protocol::note::NoteId; +use miden_protocol::{Felt, StarkField, Word}; use crate::errors::ConversionError; use crate::generated as proto; diff --git a/crates/proto/src/domain/mempool.rs b/crates/proto/src/domain/mempool.rs index 6e9e56322c..332cd67725 100644 --- a/crates/proto/src/domain/mempool.rs +++ b/crates/proto/src/domain/mempool.rs @@ -1,10 +1,10 @@ use std::collections::HashSet; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::block::BlockHeader; -use miden_objects::note::Nullifier; -use miden_objects::transaction::TransactionId; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::BlockHeader; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::{Deserializable, Serializable}; use super::note::NetworkNote; use crate::errors::{ConversionError, MissingFieldHelper}; @@ -19,7 +19,8 @@ pub enum MempoolEvent { account_delta: Option, }, BlockCommitted { - header: BlockHeader, + // Box'd as this struct is quite large and triggers clippy. + header: Box, txs: Vec, }, TransactionsReverted(HashSet), @@ -58,7 +59,7 @@ impl From for proto::block_producer::MempoolEvent { MempoolEvent::BlockCommitted { header, txs } => { proto::block_producer::mempool_event::Event::BlockCommitted( proto::block_producer::mempool_event::BlockCommitted { - block_header: Some(header.into()), + block_header: Some(header.as_ref().into()), transactions: txs.into_iter().map(Into::into).collect(), }, ) @@ -120,6 +121,7 @@ impl TryFrom for MempoolEvent { "block_header", ))? .try_into()?; + let header = Box::new(header); let txs = block_committed .transactions .into_iter() diff --git a/crates/proto/src/domain/merkle.rs b/crates/proto/src/domain/merkle.rs index 6d3845625f..ed14d523ba 100644 --- a/crates/proto/src/domain/merkle.rs +++ b/crates/proto/src/domain/merkle.rs @@ -1,13 +1,7 @@ -use miden_objects::Word; -use miden_objects::crypto::merkle::{ - Forest, - LeafIndex, - MerklePath, - MmrDelta, - SmtLeaf, - SmtProof, - SparseMerklePath, -}; +use miden_protocol::Word; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta}; +use miden_protocol::crypto::merkle::smt::{LeafIndex, SmtLeaf, SmtProof}; +use miden_protocol::crypto::merkle::{MerklePath, SparseMerklePath}; use crate::domain::{convert, try_convert}; use crate::errors::{ConversionError, MissingFieldHelper}; diff --git a/crates/proto/src/domain/mod.rs b/crates/proto/src/domain/mod.rs index f70c8f738a..b078655532 100644 --- a/crates/proto/src/domain/mod.rs +++ b/crates/proto/src/domain/mod.rs @@ -6,6 +6,7 @@ pub mod mempool; pub mod merkle; pub mod note; pub mod nullifier; +pub mod proof_request; pub mod transaction; // UTILITIES diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index a61884f69b..b7e07f2cc2 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -1,8 +1,10 @@ -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{ +use miden_protocol::Word; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{ Note, + NoteAttachment, NoteDetails, - NoteExecutionHint, NoteId, NoteInclusionProof, NoteMetadata, @@ -11,11 +13,11 @@ use miden_objects::note::{ NoteType, Nullifier, }; -use miden_objects::utils::{Deserializable, Serializable}; -use miden_objects::{Felt, Word}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_standards::note::{NetworkAccountTarget, NetworkAccountTargetError}; use thiserror::Error; -use super::account::NetworkAccountPrefix; +use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -28,20 +30,24 @@ impl TryFrom for NoteMetadata { .ok_or_else(|| proto::note::NoteMetadata::missing_field(stringify!(sender)))? .try_into()?; let note_type = NoteType::try_from(u64::from(value.note_type))?; - let tag = NoteTag::from(value.tag); + let tag = NoteTag::new(value.tag); - let execution_hint = NoteExecutionHint::try_from(value.execution_hint)?; - - let aux = Felt::try_from(value.aux).map_err(|_| ConversionError::NotAValidFelt)?; + // Deserialize attachment if present + let attachment = if value.attachment.is_empty() { + NoteAttachment::default() + } else { + NoteAttachment::read_from_bytes(&value.attachment) + .map_err(|err| ConversionError::deserialization_error("NoteAttachment", err))? + }; - Ok(NoteMetadata::new(sender, note_type, tag, execution_hint, aux)?) + Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) } } impl From for proto::note::NetworkNote { fn from(note: Note) -> Self { Self { - metadata: Some(proto::note::NoteMetadata::from(*note.metadata())), + metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: NoteDetails::from(note).to_bytes(), } } @@ -50,7 +56,7 @@ impl From for proto::note::NetworkNote { impl From for proto::note::Note { fn from(note: Note) -> Self { Self { - metadata: Some(proto::note::NoteMetadata::from(*note.metadata())), + metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: Some(NoteDetails::from(note).to_bytes()), } } @@ -60,7 +66,7 @@ impl From for proto::note::NetworkNote { fn from(note: NetworkNote) -> Self { let note = Note::from(note); Self { - metadata: Some(proto::note::NoteMetadata::from(*note.metadata())), + metadata: Some(proto::note::NoteMetadata::from(note.metadata().clone())), details: NoteDetails::from(note).to_bytes(), } } @@ -70,17 +76,10 @@ impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); let note_type = val.note_type() as u32; - let tag = val.tag().into(); - let execution_hint: u64 = val.execution_hint().into(); - let aux = val.aux().into(); - - proto::note::NoteMetadata { - sender, - note_type, - tag, - execution_hint, - aux, - } + let tag = val.tag().as_u32(); + let attachment = val.attachment().to_bytes(); + + proto::note::NoteMetadata { sender, note_type, tag, attachment } } } @@ -135,19 +134,18 @@ impl TryFrom<&proto::note::NoteInclusionInBlockProof> for (NoteId, NoteInclusion .clone(), )?; + let note_id = Word::try_from( + proof + .note_id + .as_ref() + .ok_or(proto::note::NoteInclusionInBlockProof::missing_field(stringify!(note_id)))? + .id + .as_ref() + .ok_or(proto::note::NoteId::missing_field(stringify!(id)))?, + )?; + Ok(( - Word::try_from( - proof - .note_id - .as_ref() - .ok_or(proto::note::NoteInclusionInBlockProof::missing_field(stringify!( - note_id - )))? - .id - .as_ref() - .ok_or(proto::note::NoteId::missing_field(stringify!(id)))?, - )? - .into(), + NoteId::from_raw(note_id), NoteInclusionProof::new( proof.block_num.into(), proof.note_index_in_block.try_into()?, @@ -185,14 +183,12 @@ impl TryFrom for Note { #[derive(Clone, Debug, PartialEq, Eq)] pub enum NetworkNote { SingleTarget(SingleTargetNetworkNote), - MultiTarget(MultiTargetNetworkNote), } impl NetworkNote { pub fn inner(&self) -> &Note { match self { - NetworkNote::SingleTarget(note) => ¬e.0, - NetworkNote::MultiTarget(note) => ¬e.0, + NetworkNote::SingleTarget(note) => note.inner(), } } @@ -212,8 +208,7 @@ impl NetworkNote { impl From for Note { fn from(value: NetworkNote) -> Self { match value { - NetworkNote::SingleTarget(note) => note.0, - NetworkNote::MultiTarget(note) => note.0, + NetworkNote::SingleTarget(note) => note.into(), } } } @@ -222,15 +217,7 @@ impl TryFrom for NetworkNote { type Error = NetworkNoteError; fn try_from(note: Note) -> Result { - if note.is_network_note() { - if note.metadata().tag().is_single_target() { - Ok(NetworkNote::SingleTarget(SingleTargetNetworkNote(note))) - } else { - Ok(NetworkNote::MultiTarget(MultiTargetNetworkNote(note))) - } - } else { - Err(NetworkNoteError::InvalidExecutionMode(note.metadata().tag())) - } + SingleTargetNetworkNote::try_from(note).map(NetworkNote::SingleTarget) } } @@ -242,43 +229,22 @@ impl TryFrom for NetworkNote { } } -// MULTI TARGET NETWORK NOTE -// ================================================================================================ - -/// A newtype that wraps around notes having multiple targets to be used in a network mode. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct MultiTargetNetworkNote(Note); - -impl TryFrom for MultiTargetNetworkNote { - type Error = NetworkNoteError; - - fn try_from(note: Note) -> Result { - if note.is_network_note() && !note.metadata().tag().is_single_target() { - Ok(Self(note)) - } else { - Err(NetworkNoteError::InvalidExecutionMode(note.metadata().tag())) - } - } -} - -impl TryFrom for MultiTargetNetworkNote { - type Error = ConversionError; - - fn try_from(proto_note: proto::note::NetworkNote) -> Result { - from_proto(proto_note) - } -} - // SINGLE TARGET NETWORK NOTE // ================================================================================================ -/// A newtype that wraps around notes targeting a single account to be used in a network mode. +/// A newtype that wraps around notes targeting a single network account. +/// +/// A note is considered a single-target network note if its attachment +/// is a valid `NetworkAccountTarget`. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct SingleTargetNetworkNote(Note); +pub struct SingleTargetNetworkNote { + note: Note, + account_target: NetworkAccountTarget, +} impl SingleTargetNetworkNote { pub fn inner(&self) -> &Note { - &self.0 + &self.note } pub fn metadata(&self) -> &NoteMetadata { @@ -293,18 +259,19 @@ impl SingleTargetNetworkNote { self.inner().id() } - /// The account prefix that this note targets. - pub fn account_prefix(&self) -> NetworkAccountPrefix { - self.metadata() - .tag() - .try_into() - .expect("Single target network note's tag should contain an account prefix") + /// The network account ID that this note targets. + pub fn account_id(&self) -> NetworkAccountId { + self.account_target.target_id().try_into().expect("always a network account ID") + } + + pub fn can_be_consumed(&self, block_num: BlockNumber) -> Option { + self.account_target.execution_hint().can_be_consumed(block_num) } } impl From for Note { fn from(value: SingleTargetNetworkNote) -> Self { - value.0 + value.note } } @@ -312,11 +279,11 @@ impl TryFrom for SingleTargetNetworkNote { type Error = NetworkNoteError; fn try_from(note: Note) -> Result { - if note.is_network_note() && note.metadata().tag().is_single_target() { - Ok(Self(note)) - } else { - Err(NetworkNoteError::InvalidExecutionMode(note.metadata().tag())) - } + // Single-target network notes are identified by having a NetworkAccountTarget attachment + let attachment = note.metadata().attachment(); + let account_target = NetworkAccountTarget::try_from(attachment) + .map_err(NetworkNoteError::InvalidAttachment)?; + Ok(Self { note, account_target }) } } @@ -347,8 +314,8 @@ where #[derive(Debug, Error)] pub enum NetworkNoteError { - #[error("note tag {0} is not a valid network note tag")] - InvalidExecutionMode(NoteTag), + #[error("note does not have a valid NetworkAccountTarget attachment: {0}")] + InvalidAttachment(#[source] NetworkAccountTargetError), } // NOTE SCRIPT diff --git a/crates/proto/src/domain/nullifier.rs b/crates/proto/src/domain/nullifier.rs index f511731f91..3ccdf88bae 100644 --- a/crates/proto/src/domain/nullifier.rs +++ b/crates/proto/src/domain/nullifier.rs @@ -1,6 +1,6 @@ -use miden_objects::Word; -use miden_objects::crypto::merkle::SmtProof; -use miden_objects::note::Nullifier; +use miden_protocol::Word; +use miden_protocol::crypto::merkle::smt::SmtProof; +use miden_protocol::note::Nullifier; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -28,7 +28,7 @@ impl TryFrom for Nullifier { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(digest.into()) + Ok(Nullifier::from_raw(digest)) } } @@ -41,32 +41,30 @@ pub struct NullifierWitnessRecord { pub proof: SmtProof, } -impl TryFrom - for NullifierWitnessRecord -{ +impl TryFrom for NullifierWitnessRecord { type Error = ConversionError; fn try_from( - nullifier_witness_record: proto::block_producer_store::block_inputs::NullifierWitness, + nullifier_witness_record: proto::store::block_inputs::NullifierWitness, ) -> Result { Ok(Self { nullifier: nullifier_witness_record .nullifier - .ok_or(proto::block_producer_store::block_inputs::NullifierWitness::missing_field( - stringify!(nullifier), - ))? + .ok_or(proto::store::block_inputs::NullifierWitness::missing_field(stringify!( + nullifier + )))? .try_into()?, proof: nullifier_witness_record .opening - .ok_or(proto::block_producer_store::block_inputs::NullifierWitness::missing_field( - stringify!(opening), - ))? + .ok_or(proto::store::block_inputs::NullifierWitness::missing_field(stringify!( + opening + )))? .try_into()?, }) } } -impl From for proto::block_producer_store::block_inputs::NullifierWitness { +impl From for proto::store::block_inputs::NullifierWitness { fn from(value: NullifierWitnessRecord) -> Self { Self { nullifier: Some(value.nullifier.into()), diff --git a/crates/proto/src/domain/proof_request.rs b/crates/proto/src/domain/proof_request.rs new file mode 100644 index 0000000000..f6a40d7537 --- /dev/null +++ b/crates/proto/src/domain/proof_request.rs @@ -0,0 +1,39 @@ +// PROOF REQUEST +// ================================================================================================ + +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs}; +use miden_protocol::utils::{ + ByteReader, + ByteWriter, + Deserializable, + DeserializationError, + Serializable, +}; + +pub struct BlockProofRequest { + pub tx_batches: OrderedBatches, + pub block_header: BlockHeader, + pub block_inputs: BlockInputs, +} + +impl Serializable for BlockProofRequest { + fn write_into(&self, target: &mut W) { + let Self { tx_batches, block_header, block_inputs } = self; + tx_batches.write_into(target); + block_header.write_into(target); + block_inputs.write_into(target); + } +} + +impl Deserializable for BlockProofRequest { + fn read_from(source: &mut R) -> Result { + let block = Self { + tx_batches: OrderedBatches::read_from(source)?, + block_header: BlockHeader::read_from(source)?, + block_inputs: BlockInputs::read_from(source)?, + }; + + Ok(block) + } +} diff --git a/crates/proto/src/domain/transaction.rs b/crates/proto/src/domain/transaction.rs index 53ccf6b0c8..4b2e29362f 100644 --- a/crates/proto/src/domain/transaction.rs +++ b/crates/proto/src/domain/transaction.rs @@ -1,5 +1,5 @@ -use miden_objects::Word; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::transaction::TransactionId; use crate::errors::ConversionError; use crate::generated as proto; @@ -39,7 +39,7 @@ impl TryFrom for TransactionId { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(digest.into()) + Ok(TransactionId::from_raw(digest)) } } diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index 5e461315cd..d2fc936167 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -3,9 +3,9 @@ use std::num::TryFromIntError; // Re-export the GrpcError derive macro for convenience pub use miden_node_grpc_error_macro::GrpcError; -use miden_objects::crypto::merkle::{SmtLeafError, SmtProofError}; -use miden_objects::utils::DeserializationError; -use miden_objects::{AssetError, FeeError}; +use miden_protocol::crypto::merkle::smt::{SmtLeafError, SmtProofError}; +use miden_protocol::errors::{AccountError, AssetError, FeeError, NoteError, StorageSlotNameError}; +use miden_protocol::utils::DeserializationError; use thiserror::Error; use crate::domain::note::NetworkNoteError; @@ -17,18 +17,24 @@ mod test_macro; pub enum ConversionError { #[error("asset error")] AssetError(#[from] AssetError), + #[error("account code missing")] + AccountCodeMissing, + #[error("account error")] + AccountError(#[from] AccountError), #[error("fee parameters error")] FeeError(#[from] FeeError), #[error("hex error")] HexError(#[from] hex::FromHexError), #[error("note error")] - NoteError(#[from] miden_objects::NoteError), + NoteError(#[from] NoteError), #[error("network note error")] NetworkNoteError(#[from] NetworkNoteError), #[error("SMT leaf error")] SmtLeafError(#[from] SmtLeafError), #[error("SMT proof error")] SmtProofError(#[from] SmtProofError), + #[error("storage slot name error")] + StorageSlotNameError(#[from] StorageSlotNameError), #[error("integer conversion error: {0}")] TryFromIntError(#[from] TryFromIntError), #[error("too much data, expected {expected}, got {got}")] @@ -38,14 +44,12 @@ pub enum ConversionError { #[error("value is not in the range 0..MODULUS")] NotAValidFelt, #[error("merkle error")] - MerkleError(#[from] miden_objects::crypto::merkle::MerkleError), + MerkleError(#[from] miden_protocol::crypto::merkle::MerkleError), #[error("field `{entity}::{field_name}` is missing")] MissingFieldInProtobufRepresentation { entity: &'static str, field_name: &'static str, }, - #[error("MMR error")] - MmrError(#[from] miden_objects::crypto::merkle::MmrError), #[error("failed to deserialize {entity}")] DeserializationError { entity: &'static str, diff --git a/crates/proto/src/generated/account.rs b/crates/proto/src/generated/account.rs index d30e8c888c..6ff6135626 100644 --- a/crates/proto/src/generated/account.rs +++ b/crates/proto/src/generated/account.rs @@ -7,7 +7,7 @@ #[prost(skip_debug)] pub struct AccountId { /// 15 bytes (120 bits) encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::account::account_id::AccountId\]. + /// \[miden_protocol::account::account_id::AccountId\]. #[prost(bytes = "vec", tag = "1")] pub id: ::prost::alloc::vec::Vec, } @@ -27,20 +27,25 @@ pub struct AccountSummary { /// Represents the storage header of an account. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountStorageHeader { - /// Storage slots with their types and commitments. + /// Storage slots with their types and data. #[prost(message, repeated, tag = "1")] pub slots: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `AccountStorageHeader`. pub mod account_storage_header { /// A single storage slot in the account storage header. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct StorageSlot { + /// The name of the storage slot. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, /// The type of the storage slot. - #[prost(uint32, tag = "1")] + #[prost(uint32, tag = "2")] pub slot_type: u32, - /// The commitment (Word) for this storage slot. - #[prost(message, optional, tag = "2")] + /// The data (Word) for this storage slot. + /// For value slots (slot_type=0), this is the actual value stored in the slot. + /// For map slots (slot_type=1), this is the root of the storage map. + #[prost(message, optional, tag = "3")] pub commitment: ::core::option::Option, } } @@ -51,7 +56,7 @@ pub struct AccountDetails { #[prost(message, optional, tag = "1")] pub summary: ::core::option::Option, /// Account details encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::account::Account\]. + /// \[miden_protocol::account::Account\]. #[prost(bytes = "vec", optional, tag = "2")] pub details: ::core::option::Option<::prost::alloc::vec::Vec>, } diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs index 5771d5510a..9c95e6a75c 100644 --- a/crates/proto/src/generated/block_producer.rs +++ b/crates/proto/src/generated/block_producer.rs @@ -1,27 +1,4 @@ // This file is @generated by prost-build. -/// Represents the status of the block producer. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockProducerStatus { - /// The block producer's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The block producer's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, -} -/// Represents the result of submitting proven transaction. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SubmitProvenTransactionResponse { - /// The node's current block height. - #[prost(fixed32, tag = "1")] - pub block_height: u32, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SubmitProvenBatchResponse { - /// The node's current block height. - #[prost(fixed32, tag = "1")] - pub block_height: u32, -} /// Request to subscribe to mempool events. #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct MempoolSubscriptionRequest { @@ -68,7 +45,7 @@ pub mod mempool_event { /// Changes to a network account, if any. This includes creation of new network accounts. /// /// The account delta is encoded using \[winter_utils::Serializable\] implementation - /// for \[miden_objects::account::delta::AccountDelta\]. + /// for \[miden_protocol::account::delta::AccountDelta\]. #[prost(bytes = "vec", optional, tag = "4")] pub network_account_delta: ::core::option::Option<::prost::alloc::vec::Vec>, } @@ -186,7 +163,7 @@ pub mod api_client { &mut self, request: impl tonic::IntoRequest<()>, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -205,14 +182,14 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("block_producer.Api", "Status")); self.inner.unary(req, path, codec).await } - /// Submits proven transaction to the Miden network + /// Submits proven transaction to the Miden network. Returns the node's current block height. pub async fn submit_proven_transaction( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransaction, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -244,13 +221,15 @@ pub mod api_client { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. pub async fn submit_proven_batch( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransactionBatch, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -324,15 +303,15 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; - /// Submits proven transaction to the Miden network + /// Submits proven transaction to the Miden network. Returns the node's current block height. async fn submit_proven_transaction( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Submits a proven batch to the Miden network. @@ -345,11 +324,13 @@ pub mod api_server { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. async fn submit_proven_batch( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Server streaming response type for the MempoolSubscription method. @@ -456,7 +437,7 @@ pub mod api_server { #[allow(non_camel_case_types)] struct StatusSvc(pub Arc); impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::BlockProducerStatus; + type Response = super::super::rpc::BlockProducerStatus; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -499,7 +480,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransaction, > for SubmitProvenTransactionSvc { - type Response = super::SubmitProvenTransactionResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -547,7 +528,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransactionBatch, > for SubmitProvenBatchSvc { - type Response = super::SubmitProvenBatchResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, diff --git a/crates/proto/src/generated/block_producer_store.rs b/crates/proto/src/generated/block_producer_store.rs deleted file mode 100644 index 3603ca50cb..0000000000 --- a/crates/proto/src/generated/block_producer_store.rs +++ /dev/null @@ -1,789 +0,0 @@ -// This file is @generated by prost-build. -/// Returns data required to prove the next block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputsRequest { - /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - #[prost(message, repeated, tag = "1")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. - /// - /// Due to note erasure it will generally not be possible to know the exact set of nullifiers - /// a block will create, unless we pre-execute note erasure. So in practice, this set of - /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a - /// superset of the nullifiers the block may create. - /// - /// However, if it is known that a certain note will be erased, it would not be necessary to - /// provide a nullifier witness for it. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, - /// Array of block numbers referenced by all batches in the block. - #[prost(fixed32, repeated, tag = "4")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting block inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputs { - /// The latest block header. - #[prost(message, optional, tag = "1")] - pub latest_block_header: ::core::option::Option, - /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - /// the store**. - #[prost(message, repeated, tag = "2")] - pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< - super::note::NoteInclusionInBlockProof, - >, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the requested blocks - /// referenced by the batches in the block. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, - /// The state commitments of the requested accounts and their authentication paths. - #[prost(message, repeated, tag = "4")] - pub account_witnesses: ::prost::alloc::vec::Vec, - /// The requested nullifiers and their authentication paths. - #[prost(message, repeated, tag = "5")] - pub nullifier_witnesses: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `BlockInputs`. -pub mod block_inputs { - /// A nullifier returned as a response to the `GetBlockInputs`. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NullifierWitness { - /// The nullifier. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. - #[prost(message, optional, tag = "2")] - pub opening: ::core::option::Option, - } -} -/// Returns the inputs for a transaction batch. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputsRequest { - /// List of unauthenticated note commitments to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub note_commitments: ::prost::alloc::vec::Vec, - /// Set of block numbers referenced by transactions. - #[prost(fixed32, repeated, tag = "2")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting batch inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputs { - /// The block header that the transaction batch should reference. - #[prost(message, optional, tag = "1")] - pub batch_reference_block_header: ::core::option::Option< - super::blockchain::BlockHeader, - >, - /// Proof of each *found* unauthenticated note's inclusion in a block. - #[prost(message, repeated, tag = "2")] - pub note_proofs: ::prost::alloc::vec::Vec, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced - /// by the transactions in the batch. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, -} -/// Returns data required to validate a new transaction. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputsRequest { - /// ID of the account against which a transaction is executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of nullifiers consumed by this transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Set of unauthenticated note commitments to check for existence on-chain. - /// - /// These are notes which were not on-chain at the state the transaction was proven, - /// but could by now be present. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting transaction inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputs { - /// Account state proof. - #[prost(message, optional, tag = "1")] - pub account_state: ::core::option::Option< - transaction_inputs::AccountTransactionInputRecord, - >, - /// List of nullifiers that have been consumed. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec< - transaction_inputs::NullifierTransactionInputRecord, - >, - /// List of unauthenticated notes that were not found in the database. - #[prost(message, repeated, tag = "3")] - pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, - /// The node's current block height. - #[prost(fixed32, tag = "4")] - pub block_height: u32, - /// Whether the account ID prefix is unique. Only relevant for account creation requests. - /// - /// TODO: Replace this with an error. When a general error message exists. - #[prost(bool, optional, tag = "5")] - pub new_account_id_prefix_is_unique: ::core::option::Option, -} -/// Nested message and enum types in `TransactionInputs`. -pub mod transaction_inputs { - /// An account returned as a response to the `GetTransactionInputs`. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct AccountTransactionInputRecord { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The latest account commitment, zero commitment if the account doesn't exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - } - /// A nullifier returned as a response to the `GetTransactionInputs`. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierTransactionInputRecord { - /// The nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The block at which the nullifier has been consumed, zero if not consumed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Generated client implementations. -pub mod block_producer_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the BlockProducer component - #[derive(Debug, Clone)] - pub struct BlockProducerClient { - inner: tonic::client::Grpc, - } - impl BlockProducerClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl BlockProducerClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> BlockProducerClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - BlockProducerClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Applies changes of a new block to the DB and in-memory data structures. - pub async fn apply_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/ApplyBlock", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("block_producer_store.BlockProducer", "ApplyBlock"), - ); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBlockHeaderByNumber", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to prove the next block. - pub async fn get_block_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBlockInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBlockInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the inputs for a transaction batch. - pub async fn get_batch_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBatchInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBatchInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to validate a new transaction. - pub async fn get_transaction_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetTransactionInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetTransactionInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod block_producer_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. - #[async_trait] - pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { - /// Applies changes of a new block to the DB and in-memory data structures. - async fn apply_block( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns data required to prove the next block. - async fn get_block_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns the inputs for a transaction batch. - async fn get_batch_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns data required to validate a new transaction. - async fn get_transaction_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the BlockProducer component - #[derive(Debug)] - pub struct BlockProducerServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl BlockProducerServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for BlockProducerServer - where - T: BlockProducer, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/block_producer_store.BlockProducer/ApplyBlock" => { - #[allow(non_camel_case_types)] - struct ApplyBlockSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for ApplyBlockSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::apply_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ApplyBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBlockInputs" => { - #[allow(non_camel_case_types)] - struct GetBlockInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBlockInputsSvc { - type Response = super::BlockInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBatchInputs" => { - #[allow(non_camel_case_types)] - struct GetBatchInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBatchInputsSvc { - type Response = super::BatchInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_batch_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBatchInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetTransactionInputs" => { - #[allow(non_camel_case_types)] - struct GetTransactionInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetTransactionInputsSvc { - type Response = super::TransactionInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_transaction_inputs( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetTransactionInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for BlockProducerServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "block_producer_store.BlockProducer"; - impl tonic::server::NamedService for BlockProducerServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 1f11528964..69bbe2e28e 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -3,15 +3,23 @@ #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::block::Block\]. + /// \[miden_protocol::block::Block\]. #[prost(bytes = "vec", tag = "1")] pub block: ::prost::alloc::vec::Vec, } +/// Represents a proposed block. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ProposedBlock { + /// Block data encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_protocol::block::ProposedBlock\]. + #[prost(bytes = "vec", tag = "1")] + pub proposed_block: ::prost::alloc::vec::Vec, +} /// Represents a block or nothing. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct MaybeBlock { /// The requested block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::block::Block\]. + /// \[miden_protocol::block::Block\]. #[prost(bytes = "vec", optional, tag = "1")] pub block: ::core::option::Option<::prost::alloc::vec::Vec>, } @@ -56,9 +64,9 @@ pub struct BlockHeader { /// A commitment to a set of IDs of transactions which affected accounts in this block. #[prost(message, optional, tag = "8")] pub tx_commitment: ::core::option::Option, - /// A commitment to a STARK proof attesting to the correct state transition. + /// The validator's ECDSA public key. #[prost(message, optional, tag = "9")] - pub proof_commitment: ::core::option::Option, + pub validator_key: ::core::option::Option, /// A commitment to all transaction kernels supported by this block. #[prost(message, optional, tag = "10")] pub tx_kernel_commitment: ::core::option::Option, @@ -69,6 +77,22 @@ pub struct BlockHeader { #[prost(fixed32, tag = "12")] pub timestamp: u32, } +/// Validator ECDSA public key. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ValidatorPublicKey { + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::PublicKey\]. + #[prost(bytes = "vec", tag = "1")] + pub validator_key: ::prost::alloc::vec::Vec, +} +/// Block ECDSA Signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockSignature { + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::Signature\]. + #[prost(bytes = "vec", tag = "1")] + pub signature: ::prost::alloc::vec::Vec, +} /// Definition of the fee parameters. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FeeParameters { @@ -79,3 +103,11 @@ pub struct FeeParameters { #[prost(fixed32, tag = "2")] pub verification_base_fee: u32, } +/// Represents a block body. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockBody { + /// Block body data encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_protocol::block::BlockBody\]. + #[prost(bytes = "vec", tag = "1")] + pub block_body: ::prost::alloc::vec::Vec, +} diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index ab0567476f..61e3a53790 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -3,14 +3,11 @@ pub mod account; pub mod block_producer; -pub mod block_producer_store; pub mod blockchain; pub mod note; -pub mod ntx_builder_store; pub mod primitives; pub mod remote_prover; pub mod rpc; -pub mod rpc_store; -pub mod shared; +pub mod store; pub mod transaction; pub mod validator; diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs index 097c5f94d6..83d56aeb6b 100644 --- a/crates/proto/src/generated/note.rs +++ b/crates/proto/src/generated/note.rs @@ -24,17 +24,14 @@ pub struct NoteMetadata { pub note_type: u32, /// A value which can be used by the recipient(s) to identify notes intended for them. /// - /// See `miden_objects::note::note_tag` for more info. + /// See `miden_protocol::note::note_tag` for more info. #[prost(fixed32, tag = "3")] pub tag: u32, - /// Specifies when a note is ready to be consumed. + /// Serialized note attachment /// - /// See `miden_objects::note::execution_hint` for more info. - #[prost(fixed64, tag = "4")] - pub execution_hint: u64, - /// An arbitrary user-defined value. - #[prost(fixed64, tag = "5")] - pub aux: u64, + /// See `miden_protocol::note::NoteAttachment` for more info. + #[prost(bytes = "vec", tag = "4")] + pub attachment: ::prost::alloc::vec::Vec, } /// Represents a note. /// diff --git a/crates/proto/src/generated/ntx_builder_store.rs b/crates/proto/src/generated/ntx_builder_store.rs deleted file mode 100644 index 3beb83076d..0000000000 --- a/crates/proto/src/generated/ntx_builder_store.rs +++ /dev/null @@ -1,843 +0,0 @@ -// This file is @generated by prost-build. -/// Account ID prefix. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountIdPrefix { - /// Account ID prefix. - #[prost(fixed32, tag = "1")] - pub account_id_prefix: u32, -} -/// Represents the result of getting network account details by prefix. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeAccountDetails { - /// Account details. - #[prost(message, optional, tag = "1")] - pub details: ::core::option::Option, -} -/// Returns a list of unconsumed network notes using pagination. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesRequest { - /// An opaque token used to paginate through the notes. - /// - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, -} -/// Returns a paginated list of unconsumed network notes for an account. -/// -/// Notes created or consumed after the specified block are excluded from the result. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesForAccountRequest { - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - /// - /// Note that this token is only valid if used with the same parameters. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, - /// The network account ID prefix to filter notes by. - #[prost(uint32, tag = "3")] - pub network_account_id_prefix: u32, - /// The block number to filter the returned notes by. - /// - /// Notes that are created or consumed after this block are excluded from the result. - #[prost(fixed32, tag = "4")] - pub block_num: u32, -} -/// Represents the result of getting the unconsumed network notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnconsumedNetworkNotes { - /// An opaque pagination token. - /// - /// Use this in your next request to get the next - /// set of data. - /// - /// Will be null once there is no more data remaining. - #[prost(uint64, optional, tag = "1")] - pub next_token: ::core::option::Option, - /// The list of unconsumed network notes. - #[prost(message, repeated, tag = "2")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Current blockchain data based on the requested block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CurrentBlockchainData { - /// Commitments that represent the current state according to the MMR. - #[prost(message, repeated, tag = "1")] - pub current_peaks: ::prost::alloc::vec::Vec, - /// Current block header. - #[prost(message, optional, tag = "2")] - pub current_block_header: ::core::option::Option, -} -/// Generated client implementations. -pub mod ntx_builder_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the network transaction builder component - #[derive(Debug, Clone)] - pub struct NtxBuilderClient { - inner: tonic::client::Grpc, - } - impl NtxBuilderClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl NtxBuilderClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> NtxBuilderClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetBlockHeaderByNumber", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of unconsumed network notes. - pub async fn get_unconsumed_network_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetUnconsumedNetworkNotes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - pub async fn get_unconsumed_network_notes_for_account( - &mut self, - request: impl tonic::IntoRequest< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotesForAccount", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetUnconsumedNetworkNotesForAccount", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - pub async fn get_current_blockchain_data( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetCurrentBlockchainData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetCurrentBlockchainData", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of a network account with the specified account prefix. - pub async fn get_network_account_details_by_prefix( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetNetworkAccountDetailsByPrefix", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetNetworkAccountDetailsByPrefix", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetNoteScriptByRoot", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod ntx_builder_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. - #[async_trait] - pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of unconsumed network notes. - async fn get_unconsumed_network_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - async fn get_unconsumed_network_notes_for_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - async fn get_current_blockchain_data( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of a network account with the specified account prefix. - async fn get_network_account_details_by_prefix( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the network transaction builder component - #[derive(Debug)] - pub struct NtxBuilderServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl NtxBuilderServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for NtxBuilderServer - where - T: NtxBuilder, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/ntx_builder_store.NtxBuilder/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotes" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetUnconsumedNetworkNotesSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotesForAccount" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesForAccountSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::UnconsumedNetworkNotesForAccountRequest, - > for GetUnconsumedNetworkNotesForAccountSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes_for_account( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesForAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetCurrentBlockchainData" => { - #[allow(non_camel_case_types)] - struct GetCurrentBlockchainDataSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::blockchain::MaybeBlockNumber, - > for GetCurrentBlockchainDataSvc { - type Response = super::CurrentBlockchainData; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::MaybeBlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_current_blockchain_data( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetCurrentBlockchainDataSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByPrefixSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByPrefixSvc { - type Response = super::MaybeAccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_details_by_prefix( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountDetailsByPrefixSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for NtxBuilderServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "ntx_builder_store.NtxBuilder"; - impl tonic::server::NamedService for NtxBuilderServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/primitives.rs b/crates/proto/src/generated/primitives.rs index 907ef856a9..ea7f5a1a17 100644 --- a/crates/proto/src/generated/primitives.rs +++ b/crates/proto/src/generated/primitives.rs @@ -15,10 +15,10 @@ pub struct SmtLeafEntry { #[prost(message, optional, tag = "2")] pub value: ::core::option::Option, } -/// Represents multiple leaf entries in an SMT. +/// Multiple leaf entries when hash collisions occur at the same leaf position. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SmtLeafEntryList { - /// The entries list. + /// The list of entries at this leaf. #[prost(message, repeated, tag = "1")] pub entries: ::prost::alloc::vec::Vec, } diff --git a/crates/proto/src/generated/remote_prover.rs b/crates/proto/src/generated/remote_prover.rs index 210b691537..b504804c3e 100644 --- a/crates/proto/src/generated/remote_prover.rs +++ b/crates/proto/src/generated/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 5c6a4ce4f1..3e3ef1d0d6 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -10,13 +10,598 @@ pub struct RpcStatus { pub genesis_commitment: ::core::option::Option, /// The store status. #[prost(message, optional, tag = "3")] - pub store: ::core::option::Option, + pub store: ::core::option::Option, /// The block producer status. #[prost(message, optional, tag = "4")] - pub block_producer: ::core::option::Option< - super::block_producer::BlockProducerStatus, + pub block_producer: ::core::option::Option, +} +/// Represents the status of the block producer. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockProducerStatus { + /// The block producer's running version. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// The block producer's status. + #[prost(string, tag = "2")] + pub status: ::prost::alloc::string::String, + /// The block producer's current view of the chain tip height. + /// + /// This is the height of the latest block that the block producer considers + /// to be part of the canonical chain. + #[prost(fixed32, tag = "4")] + pub chain_tip: u32, + /// Statistics about the mempool. + #[prost(message, optional, tag = "3")] + pub mempool_stats: ::core::option::Option, +} +/// Statistics about the mempool. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MempoolStats { + /// Number of transactions currently in the mempool waiting to be batched. + #[prost(uint64, tag = "1")] + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + #[prost(uint64, tag = "2")] + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + #[prost(uint64, tag = "3")] + pub proven_batches: u64, +} +/// Represents the status of the store. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StoreStatus { + /// The store's running version. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// The store's status. + #[prost(string, tag = "2")] + pub status: ::prost::alloc::string::String, + /// Number of the latest block in the chain. + #[prost(fixed32, tag = "3")] + pub chain_tip: u32, +} +/// Returns the block header corresponding to the requested block number, as well as the merkle +/// path and current forest which validate the block's inclusion in the chain. +/// +/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockHeaderByNumberRequest { + /// The target block height, defaults to latest if not provided. + #[prost(uint32, optional, tag = "1")] + pub block_num: ::core::option::Option, + /// Whether or not to return authentication data for the block header. + #[prost(bool, optional, tag = "2")] + pub include_mmr_proof: ::core::option::Option, +} +/// Represents the result of getting a block header by block number. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockHeaderByNumberResponse { + /// The requested block header. + #[prost(message, optional, tag = "1")] + pub block_header: ::core::option::Option, + /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + #[prost(message, optional, tag = "2")] + pub mmr_path: ::core::option::Option, + /// Current chain length. + #[prost(fixed32, optional, tag = "3")] + pub chain_length: ::core::option::Option, +} +/// Represents a note script or nothing. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MaybeNoteScript { + /// The script for a note by its root. + #[prost(message, optional, tag = "1")] + pub script: ::core::option::Option, +} +/// Defines the request for account details. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountRequest { + /// ID of the account for which we want to get data + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Optional block height at which to return the proof. + /// + /// Defaults to current chain tip if unspecified. + #[prost(message, optional, tag = "2")] + pub block_num: ::core::option::Option, + /// Request for additional account details; valid only for public accounts. + #[prost(message, optional, tag = "3")] + pub details: ::core::option::Option, +} +/// Nested message and enum types in `AccountRequest`. +pub mod account_request { + /// Request the details for a public account. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDetailRequest { + /// Last known code commitment to the requester. The response will include account code + /// only if its commitment is different from this value. + /// + /// If the field is ommiteed, the response will not include the account code. + #[prost(message, optional, tag = "1")] + pub code_commitment: ::core::option::Option, + /// Last known asset vault commitment to the requester. The response will include asset vault data + /// only if its commitment is different from this value. If the value is not present in the + /// request, the response will not contain one either. + /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested + /// separately, which is signaled in the response message with dedicated flag. + #[prost(message, optional, tag = "2")] + pub asset_vault_commitment: ::core::option::Option< + super::super::primitives::Digest, + >, + /// Additional request per storage map. + #[prost(message, repeated, tag = "3")] + pub storage_maps: ::prost::alloc::vec::Vec< + account_detail_request::StorageMapDetailRequest, + >, + } + /// Nested message and enum types in `AccountDetailRequest`. + pub mod account_detail_request { + /// Represents a storage slot index and the associated map keys. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageMapDetailRequest { + /// Storage slot name. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, + #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] + pub slot_data: ::core::option::Option, + } + /// Nested message and enum types in `StorageMapDetailRequest`. + pub mod storage_map_detail_request { + /// Indirection required for use in `oneof {..}` block. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MapKeys { + /// A list of map keys associated with this storage slot. + #[prost(message, repeated, tag = "1")] + pub map_keys: ::prost::alloc::vec::Vec< + super::super::super::super::primitives::Digest, + >, + } + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum SlotData { + /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + /// the response will not contain them but must be requested separately. + #[prost(bool, tag = "2")] + AllEntries(bool), + /// A list of map keys associated with the given storage slot identified by `slot_name`. + #[prost(message, tag = "3")] + MapKeys(MapKeys), + } + } + } +} +/// Represents the result of getting account proof. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountResponse { + /// The block number at which the account witness was created and the account details were observed. + #[prost(message, optional, tag = "1")] + pub block_num: ::core::option::Option, + /// Account ID, current state commitment, and SMT path. + #[prost(message, optional, tag = "2")] + pub witness: ::core::option::Option, + /// Additional details for public accounts. + #[prost(message, optional, tag = "3")] + pub details: ::core::option::Option, +} +/// Nested message and enum types in `AccountResponse`. +pub mod account_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDetails { + /// Account header. + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Account storage data + #[prost(message, optional, tag = "2")] + pub storage_details: ::core::option::Option, + /// Account code; empty if code commitments matched or none was requested. + #[prost(bytes = "vec", optional, tag = "3")] + pub code: ::core::option::Option<::prost::alloc::vec::Vec>, + /// Account asset vault data; empty if vault commitments matched or the requester + /// omitted it in the request. + #[prost(message, optional, tag = "4")] + pub vault_details: ::core::option::Option, + } +} +/// Account vault details for AccountResponse +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountVaultDetails { + /// A flag that is set to true if the account contains too many assets. This indicates + /// to the user that `SyncAccountVault` endpoint should be used to retrieve the + /// account's assets + #[prost(bool, tag = "1")] + pub too_many_assets: bool, + /// When too_many_assets == false, this will contain the list of assets in the + /// account's vault + #[prost(message, repeated, tag = "2")] + pub assets: ::prost::alloc::vec::Vec, +} +/// Account storage details for AccountResponse +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountStorageDetails { + /// Account storage header (storage slot info for up to 256 slots) + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Additional data for the requested storage maps + #[prost(message, repeated, tag = "2")] + pub map_details: ::prost::alloc::vec::Vec< + account_storage_details::AccountStorageMapDetails, >, } +/// Nested message and enum types in `AccountStorageDetails`. +pub mod account_storage_details { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountStorageMapDetails { + /// Storage slot name. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, + /// True when the number of entries exceeds the response limit. + /// When set, clients should use the `SyncStorageMaps` endpoint. + #[prost(bool, tag = "2")] + pub too_many_entries: bool, + /// The map entries (with or without proofs). Empty when too_many_entries is true. + #[prost(oneof = "account_storage_map_details::Entries", tags = "3, 4")] + pub entries: ::core::option::Option, + } + /// Nested message and enum types in `AccountStorageMapDetails`. + pub mod account_storage_map_details { + /// Wrapper for repeated storage map entries including their proofs. + /// Used when specific keys are requested to enable client-side verification. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MapEntriesWithProofs { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec< + map_entries_with_proofs::StorageMapEntryWithProof, + >, + } + /// Nested message and enum types in `MapEntriesWithProofs`. + pub mod map_entries_with_proofs { + /// Definition of individual storage entries including a proof. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageMapEntryWithProof { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "3")] + pub proof: ::core::option::Option< + super::super::super::super::primitives::SmtOpening, + >, + } + } + /// Wrapper for repeated storage map entries (without proofs). + /// Used when all entries are requested for small maps. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AllMapEntries { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + } + /// Nested message and enum types in `AllMapEntries`. + pub mod all_map_entries { + /// Definition of individual storage entries. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct StorageMapEntry { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + } + } + /// The map entries (with or without proofs). Empty when too_many_entries is true. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Entries { + /// All storage entries without proofs (for small maps or full requests). + #[prost(message, tag = "3")] + AllEntries(AllMapEntries), + /// Specific entries with their SMT proofs (for partial requests). + #[prost(message, tag = "4")] + EntriesWithProofs(MapEntriesWithProofs), + } + } +} +/// List of nullifiers to return proofs for. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NullifierList { + /// List of nullifiers to return proofs for. + #[prost(message, repeated, tag = "1")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Represents the result of checking nullifiers. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckNullifiersResponse { + /// Each requested nullifier has its corresponding nullifier proof at the same position. + #[prost(message, repeated, tag = "1")] + pub proofs: ::prost::alloc::vec::Vec, +} +/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncNullifiersRequest { + /// Block number from which the nullifiers are requested (inclusive). + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Number of bits used for nullifier prefix. Currently the only supported value is 16. + #[prost(uint32, tag = "2")] + pub prefix_len: u32, + /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal + /// to `prefix_len`. + #[prost(uint32, repeated, tag = "3")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing nullifiers. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncNullifiersResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of nullifiers matching the prefixes specified in the request. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `SyncNullifiersResponse`. +pub mod sync_nullifiers_response { + /// Represents a single nullifier update. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct NullifierUpdate { + /// Nullifier ID. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// Block number. + #[prost(fixed32, tag = "2")] + pub block_num: u32, + } +} +/// Account vault synchronization request. +/// +/// Allows requesters to sync asset values for specific public accounts within a block range. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncAccountVaultRequest { + /// Block range from which to start synchronizing. + /// + /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + /// otherwise an error will be returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Account for which we want to sync asset vault. + #[prost(message, optional, tag = "2")] + pub account_id: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncAccountVaultResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of asset updates for the account. + /// + /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` + /// is expected to be retained by the caller. + #[prost(message, repeated, tag = "2")] + pub updates: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct AccountVaultUpdate { + /// Vault key associated with the asset. + #[prost(message, optional, tag = "1")] + pub vault_key: ::core::option::Option, + /// Asset value related to the vault key. + /// If not present, the asset was removed from the vault. + #[prost(message, optional, tag = "2")] + pub asset: ::core::option::Option, + /// Block number at which the above asset was updated in the account vault. + #[prost(fixed32, tag = "3")] + pub block_num: u32, +} +/// Note synchronization request. +/// +/// Specifies note tags that requester is interested in. The server will return the first block which +/// contains a note matching `note_tags` or the chain tip. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncNotesRequest { + /// Block range from which to start synchronizing. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Specifies the tags which the requester is interested in. + #[prost(fixed32, repeated, tag = "2")] + pub note_tags: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing notes request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncNotesResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// Block header of the block with the first note matching the specified criteria. + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + /// + /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of + /// an MMR of forest `chain_tip` with this path. + #[prost(message, optional, tag = "3")] + pub mmr_path: ::core::option::Option, + /// List of all notes together with the Merkle paths from `response.block_header.note_root`. + #[prost(message, repeated, tag = "4")] + pub notes: ::prost::alloc::vec::Vec, +} +/// State synchronization request. +/// +/// Specifies state updates the requester is interested in. The server will return the first block which +/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to +/// `account_ids` for that block range. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateRequest { + /// Last block known by the requester. The response will contain data starting from the next block, + /// until the first block which contains a note of matching the requested tag, or the chain tip + /// if there are no notes. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// Accounts' commitment to include in the response. + /// + /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is + /// possible there was an update to the account for the given range, but if it is not the latest, + /// it won't be included in the response. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Specifies the tags which the requester is interested in. + #[prost(fixed32, repeated, tag = "3")] + pub note_tags: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing state request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateResponse { + /// Number of the latest block in the chain. + #[prost(fixed32, tag = "1")] + pub chain_tip: u32, + /// Block header of the block with the first note matching the specified criteria. + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. + #[prost(message, optional, tag = "3")] + pub mmr_delta: ::core::option::Option, + /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. + #[prost(message, repeated, tag = "5")] + pub accounts: ::prost::alloc::vec::Vec, + /// List of transactions executed against requested accounts between `request.block_num + 1` and + /// `response.block_header.block_num`. + #[prost(message, repeated, tag = "6")] + pub transactions: ::prost::alloc::vec::Vec, + /// List of all notes together with the Merkle paths from `response.block_header.note_root`. + #[prost(message, repeated, tag = "7")] + pub notes: ::prost::alloc::vec::Vec, +} +/// Storage map synchronization request. +/// +/// Allows requesters to sync storage map values for specific public accounts within a block range, +/// with support for cursor-based pagination to handle large storage maps. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncStorageMapsRequest { + /// Block range from which to start synchronizing. + /// + /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + /// otherwise an error will be returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Account for which we want to sync storage maps. + #[prost(message, optional, tag = "3")] + pub account_id: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStorageMapsResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// The list of storage map updates. + /// + /// Multiple updates can be returned for a single slot index and key combination, and the one + /// with a higher `block_num` is expected to be retained by the caller. + #[prost(message, repeated, tag = "2")] + pub updates: ::prost::alloc::vec::Vec, +} +/// Represents a single storage map update. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StorageMapUpdate { + /// Block number in which the slot was updated. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// Storage slot name. + #[prost(string, tag = "2")] + pub slot_name: ::prost::alloc::string::String, + /// The storage map key. + #[prost(message, optional, tag = "3")] + pub key: ::core::option::Option, + /// The storage map value. + #[prost(message, optional, tag = "4")] + pub value: ::core::option::Option, +} +/// Represents a block range. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockRange { + /// Block number from which to start (inclusive). + #[prost(fixed32, tag = "1")] + pub block_from: u32, + /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. + #[prost(fixed32, optional, tag = "2")] + pub block_to: ::core::option::Option, +} +/// Represents pagination information for chunked responses. +/// +/// Pagination is done using block numbers as the axis, allowing requesters to request +/// data in chunks by specifying block ranges and continuing from where the previous +/// response left off. +/// +/// To request the next chunk, the requester should use `block_num + 1` from the previous response +/// as the `block_from` for the next request. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct PaginationInfo { + /// Current chain tip + #[prost(fixed32, tag = "1")] + pub chain_tip: u32, + /// The block number of the last check included in this response. + /// + /// For chunked responses, this may be less than `request.block_range.block_to`. + /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request + /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + #[prost(fixed32, tag = "2")] + pub block_num: u32, +} +/// Transactions synchronization request. +/// +/// Allows requesters to sync transactions for specific accounts within a block range. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncTransactionsRequest { + /// Block range from which to start synchronizing. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Accounts to sync transactions for. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing transactions request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncTransactionsResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of transaction records. + #[prost(message, repeated, tag = "2")] + pub transactions: ::prost::alloc::vec::Vec, +} +/// Represents a transaction record. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionRecord { + /// Block number in which the transaction was included. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// A transaction header. + #[prost(message, optional, tag = "2")] + pub header: ::core::option::Option, +} +/// Represents the query parameter limits for RPC endpoints. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RpcLimits { + /// Maps RPC endpoint names to their parameter limits. + /// Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + /// Value: map of parameter names to their limit values + #[prost(map = "string, message", tag = "1")] + pub endpoints: ::std::collections::HashMap< + ::prost::alloc::string::String, + EndpointLimits, + >, +} +/// Represents the parameter limits for a single endpoint. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EndpointLimits { + /// Maps parameter names to their limit values. + /// Key: parameter name (e.g., "nullifier", "account_id") + /// Value: limit value + #[prost(map = "string, uint32", tag = "1")] + pub parameters: ::std::collections::HashMap<::prost::alloc::string::String, u32>, +} /// Generated client implementations. pub mod api_client { #![allow( @@ -128,12 +713,24 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); self.inner.unary(req, path, codec).await } - /// Returns a nullifier proof for each of the requested nullifiers. + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + /// + /// Verify proofs against the nullifier tree root in the latest block header. pub async fn check_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -150,38 +747,12 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "CheckNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns the latest state of an account with the specified ID. - pub async fn get_account_details( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc.Api/GetAccountDetails", - ); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccountDetails")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state proof of the specified account. - pub async fn get_account_proof( + /// Returns the latest details of the specified account. + pub async fn get_account( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::AccountProofRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -193,9 +764,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetAccountProof"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetAccount"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccountProof")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetAccount")); self.inner.unary(req, path, codec).await } /// Returns raw block data for the specified block number. @@ -224,11 +795,9 @@ pub mod api_client { /// and current chain length to authenticate the block's inclusion. pub async fn get_block_header_by_number( &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -275,7 +844,7 @@ pub mod api_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -295,16 +864,14 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "GetNoteScriptByRoot")); self.inner.unary(req, path, codec).await } - /// Submits proven transaction to the Miden network. + /// Submits proven transaction to the Miden network. Returns the node's current block height. pub async fn submit_proven_transaction( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransaction, >, ) -> std::result::Result< - tonic::Response< - super::super::block_producer::SubmitProvenTransactionResponse, - >, + tonic::Response, tonic::Status, > { self.inner @@ -334,13 +901,15 @@ pub mod api_client { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. pub async fn submit_proven_batch( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransactionBatch, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -364,11 +933,9 @@ pub mod api_client { /// Note that only 16-bit prefixes are supported at this time. pub async fn sync_nullifiers( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncNullifiersRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -388,11 +955,9 @@ pub mod api_client { /// Returns account vault updates for specified account within a block range. pub async fn sync_account_vault( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncAccountVaultRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -420,9 +985,9 @@ pub mod api_client { /// tip of the chain. pub async fn sync_notes( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -456,9 +1021,9 @@ pub mod api_client { /// additional filtering of that data on its side. pub async fn sync_state( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -478,11 +1043,9 @@ pub mod api_client { /// Returns storage map updates for specified account and storage slots within a block range. pub async fn sync_storage_maps( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncStorageMapsRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -502,11 +1065,9 @@ pub mod api_client { /// Returns transactions records for specific accounts within a block range. pub async fn sync_transactions( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncTransactionsRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -523,6 +1084,29 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); self.inner.unary(req, path, codec).await } + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + pub async fn get_limits( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -543,30 +1127,31 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result, tonic::Status>; - /// Returns a nullifier proof for each of the requested nullifiers. + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + /// + /// Verify proofs against the nullifier tree root in the latest block header. async fn check_nullifiers( &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of an account with the specified ID. - async fn get_account_details( - &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; - /// Returns the latest state proof of the specified account. - async fn get_account_proof( + /// Returns the latest details of the specified account. + async fn get_account( &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; /// Returns raw block data for the specified block number. async fn get_block_by_number( &self, @@ -579,9 +1164,9 @@ pub mod api_server { /// and current chain length to authenticate the block's inclusion. async fn get_block_header_by_number( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns a list of notes matching the provided note IDs. @@ -596,18 +1181,13 @@ pub mod api_server { async fn get_note_script_by_root( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits proven transaction to the Miden network. + ) -> std::result::Result, tonic::Status>; + /// Submits proven transaction to the Miden network. Returns the node's current block height. async fn submit_proven_transaction( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response< - super::super::block_producer::SubmitProvenTransactionResponse, - >, + tonic::Response, tonic::Status, >; /// Submits a proven batch of transactions to the Miden network. @@ -620,11 +1200,13 @@ pub mod api_server { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. async fn submit_proven_batch( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. @@ -632,17 +1214,17 @@ pub mod api_server { /// Note that only 16-bit prefixes are supported at this time. async fn sync_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns account vault updates for specified account within a block range. async fn sync_account_vault( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. @@ -656,9 +1238,9 @@ pub mod api_server { /// tip of the chain. async fn sync_notes( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the latest state of the chain @@ -678,27 +1260,36 @@ pub mod api_server { /// additional filtering of that data on its side. async fn sync_state( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns storage map updates for specified account and storage slots within a block range. async fn sync_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns transactions records for specific accounts within a block range. async fn sync_transactions( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + async fn get_limits( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; } /// RPC API for the RPC component #[derive(Debug)] @@ -819,20 +1410,16 @@ pub mod api_server { "/rpc.Api/CheckNullifiers" => { #[allow(non_camel_case_types)] struct CheckNullifiersSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService + impl tonic::server::UnaryService for CheckNullifiersSvc { - type Response = super::super::rpc_store::CheckNullifiersResponse; + type Response = super::CheckNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::NullifierList, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -863,73 +1450,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/GetAccountDetails" => { - #[allow(non_camel_case_types)] - struct GetAccountDetailsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for GetAccountDetailsSvc { - type Response = super::super::account::AccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_details(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountDetailsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetAccountProof" => { + "/rpc.Api/GetAccount" => { #[allow(non_camel_case_types)] - struct GetAccountProofSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::AccountProofRequest, - > for GetAccountProofSvc { - type Response = super::super::rpc_store::AccountProofResponse; + struct GetAccountSvc(pub Arc); + impl tonic::server::UnaryService + for GetAccountSvc { + type Response = super::AccountResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::AccountProofRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_account_proof(&inner, request).await + ::get_account(&inner, request).await }; Box::pin(fut) } @@ -940,7 +1477,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetAccountProofSvc(inner); + let method = GetAccountSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1008,19 +1545,16 @@ pub mod api_server { struct GetBlockHeaderByNumberSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; + > tonic::server::UnaryService + for GetBlockHeaderByNumberSvc { + type Response = super::BlockHeaderByNumberResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1104,7 +1638,7 @@ pub mod api_server { T: Api, > tonic::server::UnaryService for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; + type Response = super::MaybeNoteScript; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1150,7 +1684,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransaction, > for SubmitProvenTransactionSvc { - type Response = super::super::block_producer::SubmitProvenTransactionResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1198,7 +1732,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransactionBatch, > for SubmitProvenBatchSvc { - type Response = super::super::block_producer::SubmitProvenBatchResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1243,19 +1777,16 @@ pub mod api_server { struct SyncNullifiersSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncNullifiersRequest, - > for SyncNullifiersSvc { - type Response = super::super::rpc_store::SyncNullifiersResponse; + > tonic::server::UnaryService + for SyncNullifiersSvc { + type Response = super::SyncNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncNullifiersRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1291,19 +1822,16 @@ pub mod api_server { struct SyncAccountVaultSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncAccountVaultRequest, - > for SyncAccountVaultSvc { - type Response = super::super::rpc_store::SyncAccountVaultResponse; + > tonic::server::UnaryService + for SyncAccountVaultSvc { + type Response = super::SyncAccountVaultResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncAccountVaultRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1337,21 +1865,16 @@ pub mod api_server { "/rpc.Api/SyncNotes" => { #[allow(non_camel_case_types)] struct SyncNotesSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncNotesRequest, - > for SyncNotesSvc { - type Response = super::super::rpc_store::SyncNotesResponse; + impl tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::SyncNotesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncNotesRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1385,21 +1908,16 @@ pub mod api_server { "/rpc.Api/SyncState" => { #[allow(non_camel_case_types)] struct SyncStateSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncStateRequest, - > for SyncStateSvc { - type Response = super::super::rpc_store::SyncStateResponse; + impl tonic::server::UnaryService + for SyncStateSvc { + type Response = super::SyncStateResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncStateRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1435,19 +1953,16 @@ pub mod api_server { struct SyncStorageMapsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncStorageMapsRequest, - > for SyncStorageMapsSvc { - type Response = super::super::rpc_store::SyncStorageMapsResponse; + > tonic::server::UnaryService + for SyncStorageMapsSvc { + type Response = super::SyncStorageMapsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncStorageMapsRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1483,19 +1998,16 @@ pub mod api_server { struct SyncTransactionsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncTransactionsRequest, - > for SyncTransactionsSvc { - type Response = super::super::rpc_store::SyncTransactionsResponse; + > tonic::server::UnaryService + for SyncTransactionsSvc { + type Response = super::SyncTransactionsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncTransactionsRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1526,6 +2038,45 @@ pub mod api_server { }; Box::pin(fut) } + "/rpc.Api/GetLimits" => { + #[allow(non_camel_case_types)] + struct GetLimitsSvc(pub Arc); + impl tonic::server::UnaryService<()> for GetLimitsSvc { + type Response = super::RpcLimits; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_limits(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetLimitsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/crates/proto/src/generated/rpc_store.rs b/crates/proto/src/generated/rpc_store.rs deleted file mode 100644 index 187f559ef0..0000000000 --- a/crates/proto/src/generated/rpc_store.rs +++ /dev/null @@ -1,1811 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the store. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StoreStatus { - /// The store's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The store's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "3")] - pub chain_tip: u32, -} -/// Returns the latest state proof of the specified account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofRequest { - /// ID of the account for which we want to get data - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Block at which we'd like to get this data. If present, must be close to the chain tip. - /// If not present, data from the latest block will be returned. - #[prost(message, optional, tag = "2")] - pub block_num: ::core::option::Option, - /// Request for additional account details; valid only for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountProofRequest`. -pub mod account_proof_request { - /// Request the details for a public account. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetailRequest { - /// Last known code commitment to the requester. The response will include account code - /// only if its commitment is different from this value. - /// - /// If the field is ommiteed, the response will not include the account code. - #[prost(message, optional, tag = "1")] - pub code_commitment: ::core::option::Option, - /// Last known asset vault commitment to the requester. The response will include asset vault data - /// only if its commitment is different from this value. If the value is not present in the - /// request, the response will not contain one either. - /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested - /// separately, which is signaled in the response message with dedicated flag. - #[prost(message, optional, tag = "2")] - pub asset_vault_commitment: ::core::option::Option< - super::super::primitives::Digest, - >, - /// Additional request per storage map. - #[prost(message, repeated, tag = "3")] - pub storage_maps: ::prost::alloc::vec::Vec< - account_detail_request::StorageMapDetailRequest, - >, - } - /// Nested message and enum types in `AccountDetailRequest`. - pub mod account_detail_request { - /// Represents a storage slot index and the associated map keys. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapDetailRequest { - /// Storage slot index (`\[0..255\]`). - #[prost(uint32, tag = "1")] - pub slot_index: u32, - #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] - pub slot_data: ::core::option::Option, - } - /// Nested message and enum types in `StorageMapDetailRequest`. - pub mod storage_map_detail_request { - /// Indirection required for use in `oneof {..}` block. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapKeys { - /// A list of map keys associated with this storage slot. - #[prost(message, repeated, tag = "1")] - pub map_keys: ::prost::alloc::vec::Vec< - super::super::super::super::primitives::Digest, - >, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SlotData { - /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - /// the response will not contain them but must be requested separately. - #[prost(bool, tag = "2")] - AllEntries(bool), - /// A list of map keys associated with the given storage slot identified by `slot_index`. - #[prost(message, tag = "3")] - MapKeys(MapKeys), - } - } - } -} -/// Represents the result of getting account proof. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofResponse { - /// The block number at which the account witness was created and the account details were observed. - #[prost(message, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Account ID, current state commitment, and SMT path. - #[prost(message, optional, tag = "2")] - pub witness: ::core::option::Option, - /// Additional details for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountProofResponse`. -pub mod account_proof_response { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetails { - /// Account header. - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Account storage data - #[prost(message, optional, tag = "2")] - pub storage_details: ::core::option::Option, - /// Account code; empty if code commitments matched or none was requested. - #[prost(bytes = "vec", optional, tag = "3")] - pub code: ::core::option::Option<::prost::alloc::vec::Vec>, - /// Account asset vault data; empty if vault commitments matched or the requester - /// omitted it in the request. - #[prost(message, optional, tag = "4")] - pub vault_details: ::core::option::Option, - } -} -/// Account vault details for AccountProofResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountVaultDetails { - /// A flag that is set to true if the account contains too many assets. This indicates - /// to the user that `SyncAccountVault` endpoint should be used to retrieve the - /// account's assets - #[prost(bool, tag = "1")] - pub too_many_assets: bool, - /// When too_many_assets == false, this will contain the list of assets in the - /// account's vault - #[prost(message, repeated, tag = "2")] - pub assets: ::prost::alloc::vec::Vec, -} -/// Account storage details for AccountProofResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageDetails { - /// Account storage header (storage slot info for up to 256 slots) - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Additional data for the requested storage maps - #[prost(message, repeated, tag = "2")] - pub map_details: ::prost::alloc::vec::Vec< - account_storage_details::AccountStorageMapDetails, - >, -} -/// Nested message and enum types in `AccountStorageDetails`. -pub mod account_storage_details { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountStorageMapDetails { - /// slot index of the storage map - #[prost(uint32, tag = "1")] - pub slot_index: u32, - /// A flag that is set to `true` if the number of to-be-returned entries in the - /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - /// endpoint should be used to get all storage map data. - #[prost(bool, tag = "2")] - pub too_many_entries: bool, - /// By default we provide all storage entries. - #[prost(message, optional, tag = "3")] - pub entries: ::core::option::Option, - } - /// Nested message and enum types in `AccountStorageMapDetails`. - pub mod account_storage_map_details { - /// Wrapper for repeated storage map entries - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapEntries { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - } - /// Nested message and enum types in `MapEntries`. - pub mod map_entries { - /// Definition of individual storage entries. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageMapEntry { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - } - } - } -} -/// List of nullifiers to return proofs for. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NullifierList { - /// List of nullifiers to return proofs for. - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of checking nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNullifiersRequest { - /// Block number from which the nullifiers are requested (inclusive). - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Number of bits used for nullifier prefix. Currently the only supported value is 16. - #[prost(uint32, tag = "2")] - pub prefix_len: u32, - /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal - /// to `prefix_len`. - #[prost(uint32, repeated, tag = "3")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNullifiersResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of nullifiers matching the prefixes specified in the request. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SyncNullifiersResponse`. -pub mod sync_nullifiers_response { - /// Represents a single nullifier update. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierUpdate { - /// Nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// Block number. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// State synchronization request. -/// -/// Specifies state updates the requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -/// `account_ids` for that block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateRequest { - /// Last block known by the requester. The response will contain data starting from the next block, - /// until the first block which contains a note of matching the requested tag, or the chain tip - /// if there are no notes. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Accounts' commitment to include in the response. - /// - /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is - /// possible there was an update to the account for the given range, but if it is not the latest, - /// it won't be included in the response. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "3")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing state request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateResponse { - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - #[prost(message, optional, tag = "3")] - pub mmr_delta: ::core::option::Option, - /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - #[prost(message, repeated, tag = "5")] - pub accounts: ::prost::alloc::vec::Vec, - /// List of transactions executed against requested accounts between `request.block_num + 1` and - /// `response.block_header.block_num`. - #[prost(message, repeated, tag = "6")] - pub transactions: ::prost::alloc::vec::Vec, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "7")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Account vault synchronization request. -/// -/// Allows requesters to sync asset values for specific public accounts within a block range. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountVaultRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync asset vault. - #[prost(message, optional, tag = "2")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountVaultResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of asset updates for the account. - /// - /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` - /// is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountVaultUpdate { - /// Vault key associated with the asset. - #[prost(message, optional, tag = "1")] - pub vault_key: ::core::option::Option, - /// Asset value related to the vault key. - /// If not present, the asset was removed from the vault. - #[prost(message, optional, tag = "2")] - pub asset: ::core::option::Option, - /// Block number at which the above asset was updated in the account vault. - #[prost(fixed32, tag = "3")] - pub block_num: u32, -} -/// Note synchronization request. -/// -/// Specifies note tags that requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNotesRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "2")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing notes request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNotesResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - /// - /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of - /// an MMR of forest `chain_tip` with this path. - #[prost(message, optional, tag = "3")] - pub mmr_path: ::core::option::Option, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "4")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Storage map synchronization request. -/// -/// Allows requesters to sync storage map values for specific public accounts within a block range, -/// with support for cursor-based pagination to handle large storage maps. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncStorageMapsRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync storage maps. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStorageMapsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of storage map updates. - /// - /// Multiple updates can be returned for a single slot index and key combination, and the one - /// with a higher `block_num` is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -/// Represents a single storage map update. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapUpdate { - /// Block number in which the slot was updated. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Slot index (\[0..255\]). - #[prost(uint32, tag = "2")] - pub slot_index: u32, - /// The storage map key. - #[prost(message, optional, tag = "3")] - pub key: ::core::option::Option, - /// The storage map value. - #[prost(message, optional, tag = "4")] - pub value: ::core::option::Option, -} -/// Represents a block range. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockRange { - /// Block number from which to start (inclusive). - #[prost(fixed32, tag = "1")] - pub block_from: u32, - /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. - #[prost(fixed32, optional, tag = "2")] - pub block_to: ::core::option::Option, -} -/// Represents pagination information for chunked responses. -/// -/// Pagination is done using block numbers as the axis, allowing requesters to request -/// data in chunks by specifying block ranges and continuing from where the previous -/// response left off. -/// -/// To request the next chunk, the requester should use `block_num + 1` from the previous response -/// as the `block_from` for the next request. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct PaginationInfo { - /// Current chain tip - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// The block number of the last check included in this response. - /// - /// For chunked responses, this may be less than `request.block_range.block_to`. - /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request - /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Transactions synchronization request. -/// -/// Allows requesters to sync transactions for specific accounts within a block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Accounts to sync transactions for. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing transactions request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of transaction records. - #[prost(message, repeated, tag = "2")] - pub transaction_records: ::prost::alloc::vec::Vec, -} -/// Represents a transaction record. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionRecord { - /// Block number in which the transaction was included. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// A transaction header. - #[prost(message, optional, tag = "2")] - pub transaction_header: ::core::option::Option< - super::transaction::TransactionHeader, - >, -} -/// Generated client implementations. -pub mod rpc_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the RPC component - #[derive(Debug, Clone)] - pub struct RpcClient { - inner: tonic::client::Grpc, - } - impl RpcClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl RpcClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> RpcClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - RpcClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns a nullifier proof for each of the requested nullifiers. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/CheckNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of an account with the specified ID. - pub async fn get_account_details( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetAccountDetails", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetAccountDetails")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state proof of the specified account. - pub async fn get_account_proof( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetAccountProof", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetAccountProof")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetBlockByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of committed notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetNotesById", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - pub async fn sync_state( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/SyncState"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "SyncState")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncAccountVault", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_storage_maps( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncTransactions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod rpc_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. - #[async_trait] - pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Returns a nullifier proof for each of the requested nullifiers. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of an account with the specified ID. - async fn get_account_details( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state proof of the specified account. - async fn get_account_proof( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of committed notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - async fn sync_state( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the RPC component - #[derive(Debug)] - pub struct RpcServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl RpcServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for RpcServer - where - T: Rpc, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/rpc_store.Rpc/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::StoreStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetAccountDetails" => { - #[allow(non_camel_case_types)] - struct GetAccountDetailsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetAccountDetailsSvc { - type Response = super::super::account::AccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_details(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountDetailsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetAccountProof" => { - #[allow(non_camel_case_types)] - struct GetAccountProofSvc(pub Arc); - impl tonic::server::UnaryService - for GetAccountProofSvc { - type Response = super::AccountProofResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_proof(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountProofSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncState" => { - #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_state(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStateSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncStorageMapsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncStorageMapsSvc { - type Response = super::SyncStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for RpcServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "rpc_store.Rpc"; - impl tonic::server::NamedService for RpcServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/shared.rs b/crates/proto/src/generated/shared.rs deleted file mode 100644 index f79b9117e5..0000000000 --- a/crates/proto/src/generated/shared.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is @generated by prost-build. -/// Returns the block header corresponding to the requested block number, as well as the merkle -/// path and current forest which validate the block's inclusion in the chain. -/// -/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeaderByNumberRequest { - /// The target block height, defaults to latest if not provided. - #[prost(uint32, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Whether or not to return authentication data for the block header. - #[prost(bool, optional, tag = "2")] - pub include_mmr_proof: ::core::option::Option, -} -/// Represents the result of getting a block header by block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockHeaderByNumberResponse { - /// The requested block header. - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - #[prost(message, optional, tag = "2")] - pub mmr_path: ::core::option::Option, - /// Current chain length. - #[prost(fixed32, optional, tag = "3")] - pub chain_length: ::core::option::Option, -} -/// Represents a note script or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeNoteScript { - /// The script for a note by its root. - #[prost(message, optional, tag = "1")] - pub script: ::core::option::Option, -} diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs new file mode 100644 index 0000000000..4892b7b9c9 --- /dev/null +++ b/crates/proto/src/generated/store.rs @@ -0,0 +1,3207 @@ +// This file is @generated by prost-build. +/// Returns data required to prove the next block. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockInputsRequest { + /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. + #[prost(message, repeated, tag = "1")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. + /// + /// Due to note erasure it will generally not be possible to know the exact set of nullifiers + /// a block will create, unless we pre-execute note erasure. So in practice, this set of + /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a + /// superset of the nullifiers the block may create. + /// + /// However, if it is known that a certain note will be erased, it would not be necessary to + /// provide a nullifier witness for it. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, + /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. + #[prost(message, repeated, tag = "3")] + pub unauthenticated_notes: ::prost::alloc::vec::Vec, + /// Array of block numbers referenced by all batches in the block. + #[prost(fixed32, repeated, tag = "4")] + pub reference_blocks: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting block inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockInputs { + /// The latest block header. + #[prost(message, optional, tag = "1")] + pub latest_block_header: ::core::option::Option, + /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in + /// the store**. + #[prost(message, repeated, tag = "2")] + pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< + super::note::NoteInclusionInBlockProof, + >, + /// The serialized chain MMR which includes proofs for all blocks referenced by the + /// above note inclusion proofs as well as proofs for inclusion of the requested blocks + /// referenced by the batches in the block. + #[prost(bytes = "vec", tag = "3")] + pub partial_block_chain: ::prost::alloc::vec::Vec, + /// The state commitments of the requested accounts and their authentication paths. + #[prost(message, repeated, tag = "4")] + pub account_witnesses: ::prost::alloc::vec::Vec, + /// The requested nullifiers and their authentication paths. + #[prost(message, repeated, tag = "5")] + pub nullifier_witnesses: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `BlockInputs`. +pub mod block_inputs { + /// A nullifier returned as a response to the `GetBlockInputs`. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct NullifierWitness { + /// The nullifier. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. + #[prost(message, optional, tag = "2")] + pub opening: ::core::option::Option, + } +} +/// Returns the inputs for a transaction batch. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchInputsRequest { + /// List of unauthenticated note commitments to be queried from the database. + #[prost(message, repeated, tag = "1")] + pub note_commitments: ::prost::alloc::vec::Vec, + /// Set of block numbers referenced by transactions. + #[prost(fixed32, repeated, tag = "2")] + pub reference_blocks: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting batch inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchInputs { + /// The block header that the transaction batch should reference. + #[prost(message, optional, tag = "1")] + pub batch_reference_block_header: ::core::option::Option< + super::blockchain::BlockHeader, + >, + /// Proof of each *found* unauthenticated note's inclusion in a block. + #[prost(message, repeated, tag = "2")] + pub note_proofs: ::prost::alloc::vec::Vec, + /// The serialized chain MMR which includes proofs for all blocks referenced by the + /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced + /// by the transactions in the batch. + #[prost(bytes = "vec", tag = "3")] + pub partial_block_chain: ::prost::alloc::vec::Vec, +} +/// Returns data required to validate a new transaction. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionInputsRequest { + /// ID of the account against which a transaction is executed. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Set of nullifiers consumed by this transaction. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, + /// Set of unauthenticated note commitments to check for existence on-chain. + /// + /// These are notes which were not on-chain at the state the transaction was proven, + /// but could by now be present. + #[prost(message, repeated, tag = "3")] + pub unauthenticated_notes: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting transaction inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionInputs { + /// Account state proof. + #[prost(message, optional, tag = "1")] + pub account_state: ::core::option::Option< + transaction_inputs::AccountTransactionInputRecord, + >, + /// List of nullifiers that have been consumed. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec< + transaction_inputs::NullifierTransactionInputRecord, + >, + /// List of unauthenticated notes that were not found in the database. + #[prost(message, repeated, tag = "3")] + pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, + /// The node's current block height. + #[prost(fixed32, tag = "4")] + pub block_height: u32, + /// Whether the account ID prefix is unique. Only relevant for account creation requests. + /// + /// TODO: Replace this with an error. When a general error message exists. + #[prost(bool, optional, tag = "5")] + pub new_account_id_prefix_is_unique: ::core::option::Option, +} +/// Nested message and enum types in `TransactionInputs`. +pub mod transaction_inputs { + /// An account returned as a response to the `GetTransactionInputs`. + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] + pub struct AccountTransactionInputRecord { + /// The account ID. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// The latest account commitment, zero commitment if the account doesn't exist. + #[prost(message, optional, tag = "2")] + pub account_commitment: ::core::option::Option, + } + /// A nullifier returned as a response to the `GetTransactionInputs`. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct NullifierTransactionInputRecord { + /// The nullifier ID. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// The block at which the nullifier has been consumed, zero if not consumed. + #[prost(fixed32, tag = "2")] + pub block_num: u32, + } +} +/// Account ID prefix. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct AccountIdPrefix { + /// Account ID prefix. + #[prost(fixed32, tag = "1")] + pub account_id_prefix: u32, +} +/// Represents the result of getting network account details by prefix. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MaybeAccountDetails { + /// Account details. + #[prost(message, optional, tag = "1")] + pub details: ::core::option::Option, +} +/// Returns a paginated list of unconsumed network notes for an account. +/// +/// Notes created or consumed after the specified block are excluded from the result. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct UnconsumedNetworkNotesRequest { + /// This should be null on the first call, and set to the response token until the response token + /// is null, at which point all data has been fetched. + /// + /// Note that this token is only valid if used with the same parameters. + #[prost(uint64, optional, tag = "1")] + pub page_token: ::core::option::Option, + /// Number of notes to retrieve per page. + #[prost(uint64, tag = "2")] + pub page_size: u64, + /// The full account ID to filter notes by. + #[prost(message, optional, tag = "3")] + pub account_id: ::core::option::Option, + /// The block number to filter the returned notes by. + /// + /// Notes that are created or consumed after this block are excluded from the result. + #[prost(fixed32, tag = "4")] + pub block_num: u32, +} +/// Represents the result of getting the unconsumed network notes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnconsumedNetworkNotes { + /// An opaque pagination token. + /// + /// Use this in your next request to get the next + /// set of data. + /// + /// Will be null once there is no more data remaining. + #[prost(uint64, optional, tag = "1")] + pub next_token: ::core::option::Option, + /// The list of unconsumed network notes. + #[prost(message, repeated, tag = "2")] + pub notes: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting the network account ids. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NetworkAccountIdList { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// The list of network account ids. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, +} +/// Current blockchain data based on the requested block number. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CurrentBlockchainData { + /// Commitments that represent the current state according to the MMR. + #[prost(message, repeated, tag = "1")] + pub current_peaks: ::prost::alloc::vec::Vec, + /// Current block header. + #[prost(message, optional, tag = "2")] + pub current_block_header: ::core::option::Option, +} +/// Request for vault asset witnesses for a specific account. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VaultAssetWitnessesRequest { + /// The account ID for which to retrieve vault asset witnesses. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Set of asset vault keys to retrieve witnesses for. + #[prost(message, repeated, tag = "2")] + pub vault_keys: ::prost::alloc::vec::Vec, + /// The witnesses returned correspond to the account state at the specified block number. + /// + /// Optional block number. If not provided, uses the latest state. + /// + /// The specified block number should be relatively near the chain tip else an error will be + /// returned. + #[prost(fixed32, optional, tag = "3")] + pub block_num: ::core::option::Option, +} +/// Response containing vault asset witnesses. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VaultAssetWitnessesResponse { + /// Block number at which the witnesses were generated. + /// + /// The witnesses returned corresponds to the account state at the specified block number. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// List of asset witnesses. + #[prost(message, repeated, tag = "2")] + pub asset_witnesses: ::prost::alloc::vec::Vec< + vault_asset_witnesses_response::VaultAssetWitness, + >, +} +/// Nested message and enum types in `VaultAssetWitnessesResponse`. +pub mod vault_asset_witnesses_response { + /// A vault asset witness containing the asset and its proof. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct VaultAssetWitness { + /// The SMT opening proof for the asset's inclusion in the vault. + #[prost(message, optional, tag = "1")] + pub proof: ::core::option::Option, + } +} +/// Request for a storage map witness for a specific account and storage slot. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StorageMapWitnessRequest { + /// The account ID for which to retrieve the storage map witness. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// The raw, user-provided storage map key for which to retrieve the witness. + #[prost(message, optional, tag = "2")] + pub map_key: ::core::option::Option, + /// Optional block number. If not provided, uses the latest state. + /// + /// The witness returned corresponds to the account state at the specified block number. + /// + /// The specified block number should be relatively near the chain tip else an error will be + /// returned. + #[prost(fixed32, optional, tag = "3")] + pub block_num: ::core::option::Option, + /// The storage slot name for the map. + #[prost(string, tag = "4")] + pub slot_name: ::prost::alloc::string::String, +} +/// Response containing a storage map witness. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StorageMapWitnessResponse { + /// The storage map witness. + #[prost(message, optional, tag = "1")] + pub witness: ::core::option::Option, + /// Block number at which the witness was generated. + #[prost(fixed32, tag = "2")] + pub block_num: u32, +} +/// Nested message and enum types in `StorageMapWitnessResponse`. +pub mod storage_map_witness_response { + /// Storage map witness data. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageWitness { + /// The raw, user-provided storage map key. + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option, + /// The SMT opening proof for the key-value pair. + #[prost(message, optional, tag = "3")] + pub proof: ::core::option::Option, + } +} +/// Generated client implementations. +pub mod rpc_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the RPC component + #[derive(Debug, Clone)] + pub struct RpcClient { + inner: tonic::client::Grpc, + } + impl RpcClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl RpcClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> RpcClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + RpcClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Returns the status info. + pub async fn status( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/Status"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "Status")); + self.inner.unary(req, path, codec).await + } + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof + /// * `single` or `multiple`: Inclusion proof if the nullifier key is present + /// + /// Verify proofs against the nullifier tree root in the latest block header. + pub async fn check_nullifiers( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/CheckNullifiers", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest details the specified account. + pub async fn get_account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetAccount"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccount")); + self.inner.unary(req, path, codec).await + } + /// Returns raw block data for the specified block number. + pub async fn get_block_by_number( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetBlockByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetBlockByNumber")); + self.inner.unary(req, path, codec).await + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetBlockHeaderByNumber")); + self.inner.unary(req, path, codec).await + } + /// Returns a list of committed notes matching the provided note IDs. + pub async fn get_notes_by_id( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetNotesById"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetNotesById")); + self.inner.unary(req, path, codec).await + } + /// Returns the script for a note by its root. + pub async fn get_note_script_by_root( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetNoteScriptByRoot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetNoteScriptByRoot")); + self.inner.unary(req, path, codec).await + } + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + /// + /// Note that only 16-bit prefixes are supported at this time. + pub async fn sync_nullifiers( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNullifiers"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNullifiers")); + self.inner.unary(req, path, codec).await + } + /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + /// + /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + /// tip of the chain. + pub async fn sync_notes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNotes"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); + self.inner.unary(req, path, codec).await + } + /// Returns info which can be used by the requester to sync up to the latest state of the chain + /// for the objects (accounts, notes, nullifiers) the requester is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. requester is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the requester is fully synchronized with the chain. + /// + /// Each request also returns info about new notes, nullifiers etc. created. It also returns + /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + /// MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags and nullifiers filters contain only high + /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + /// additional filtering of that data on its side. + pub async fn sync_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncState"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncState")); + self.inner.unary(req, path, codec).await + } + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncAccountVault", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncAccountVault")); + self.inner.unary(req, path, codec).await + } + /// Returns storage map updates for specified account and storage slots within a block range. + pub async fn sync_storage_maps( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncStorageMaps", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncStorageMaps")); + self.inner.unary(req, path, codec).await + } + /// Returns transactions records for specific accounts within a block range. + pub async fn sync_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncTransactions")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod rpc_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. + #[async_trait] + pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { + /// Returns the status info. + async fn status( + &self, + request: tonic::Request<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof + /// * `single` or `multiple`: Inclusion proof if the nullifier key is present + /// + /// Verify proofs against the nullifier tree root in the latest block header. + async fn check_nullifiers( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest details the specified account. + async fn get_account( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns raw block data for the specified block number. + async fn get_block_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of committed notes matching the provided note IDs. + async fn get_notes_by_id( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the script for a note by its root. + async fn get_note_script_by_root( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + /// + /// Note that only 16-bit prefixes are supported at this time. + async fn sync_nullifiers( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + /// + /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + /// tip of the chain. + async fn sync_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the requester to sync up to the latest state of the chain + /// for the objects (accounts, notes, nullifiers) the requester is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. requester is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the requester is fully synchronized with the chain. + /// + /// Each request also returns info about new notes, nullifiers etc. created. It also returns + /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + /// MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags and nullifiers filters contain only high + /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + /// additional filtering of that data on its side. + async fn sync_state( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns account vault updates for specified account within a block range. + async fn sync_account_vault( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns storage map updates for specified account and storage slots within a block range. + async fn sync_storage_maps( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns transactions records for specific accounts within a block range. + async fn sync_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the RPC component + #[derive(Debug)] + pub struct RpcServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl RpcServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for RpcServer + where + T: Rpc, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.Rpc/Status" => { + #[allow(non_camel_case_types)] + struct StatusSvc(pub Arc); + impl tonic::server::UnaryService<()> for StatusSvc { + type Response = super::super::rpc::StoreStatus; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::status(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = StatusSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/CheckNullifiers" => { + #[allow(non_camel_case_types)] + struct CheckNullifiersSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for CheckNullifiersSvc { + type Response = super::super::rpc::CheckNullifiersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::check_nullifiers(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = CheckNullifiersSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetAccount" => { + #[allow(non_camel_case_types)] + struct GetAccountSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetAccountSvc { + type Response = super::super::rpc::AccountResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetBlockByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockByNumberSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetBlockByNumberSvc { + type Response = super::super::blockchain::MaybeBlock; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::BlockNumber, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_by_number(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetNotesById" => { + #[allow(non_camel_case_types)] + struct GetNotesByIdSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetNotesByIdSvc { + type Response = super::super::note::CommittedNoteList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_notes_by_id(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNotesByIdSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetNoteScriptByRoot" => { + #[allow(non_camel_case_types)] + struct GetNoteScriptByRootSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetNoteScriptByRootSvc { + type Response = super::super::rpc::MaybeNoteScript; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_note_script_by_root(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNoteScriptByRootSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncNullifiers" => { + #[allow(non_camel_case_types)] + struct SyncNullifiersSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncNullifiersRequest, + > for SyncNullifiersSvc { + type Response = super::super::rpc::SyncNullifiersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncNullifiersRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_nullifiers(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncNullifiersSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncNotes" => { + #[allow(non_camel_case_types)] + struct SyncNotesSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::super::rpc::SyncNotesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_notes(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncNotesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncState" => { + #[allow(non_camel_case_types)] + struct SyncStateSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for SyncStateSvc { + type Response = super::super::rpc::SyncStateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_state(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncStateSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncAccountVault" => { + #[allow(non_camel_case_types)] + struct SyncAccountVaultSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncAccountVaultRequest, + > for SyncAccountVaultSvc { + type Response = super::super::rpc::SyncAccountVaultResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncAccountVaultRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_account_vault(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncAccountVaultSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncStorageMaps" => { + #[allow(non_camel_case_types)] + struct SyncStorageMapsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncStorageMapsRequest, + > for SyncStorageMapsSvc { + type Response = super::super::rpc::SyncStorageMapsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncStorageMapsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_storage_maps(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncStorageMapsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncTransactions" => { + #[allow(non_camel_case_types)] + struct SyncTransactionsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncTransactionsRequest, + > for SyncTransactionsSvc { + type Response = super::super::rpc::SyncTransactionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncTransactionsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_transactions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncTransactionsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for RpcServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.Rpc"; + impl tonic::server::NamedService for RpcServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod block_producer_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the BlockProducer component + #[derive(Debug, Clone)] + pub struct BlockProducerClient { + inner: tonic::client::Grpc, + } + impl BlockProducerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl BlockProducerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> BlockProducerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + BlockProducerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Applies changes of a new block to the DB and in-memory data structures. + pub async fn apply_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/ApplyBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "ApplyBlock")); + self.inner.unary(req, path, codec).await + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("store.BlockProducer", "GetBlockHeaderByNumber"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns data required to prove the next block. + pub async fn get_block_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBlockInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetBlockInputs")); + self.inner.unary(req, path, codec).await + } + /// Returns the inputs for a transaction batch. + pub async fn get_batch_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBatchInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetBatchInputs")); + self.inner.unary(req, path, codec).await + } + /// Returns data required to validate a new transaction. + pub async fn get_transaction_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetTransactionInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetTransactionInputs")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod block_producer_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. + #[async_trait] + pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { + /// Applies changes of a new block to the DB and in-memory data structures. + async fn apply_block( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns data required to prove the next block. + async fn get_block_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns the inputs for a transaction batch. + async fn get_batch_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns data required to validate a new transaction. + async fn get_transaction_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the BlockProducer component + #[derive(Debug)] + pub struct BlockProducerServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl BlockProducerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for BlockProducerServer + where + T: BlockProducer, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.BlockProducer/ApplyBlock" => { + #[allow(non_camel_case_types)] + struct ApplyBlockSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for ApplyBlockSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::apply_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ApplyBlockSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBlockInputs" => { + #[allow(non_camel_case_types)] + struct GetBlockInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetBlockInputsSvc { + type Response = super::BlockInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_inputs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBatchInputs" => { + #[allow(non_camel_case_types)] + struct GetBatchInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetBatchInputsSvc { + type Response = super::BatchInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_batch_inputs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBatchInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetTransactionInputs" => { + #[allow(non_camel_case_types)] + struct GetTransactionInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetTransactionInputsSvc { + type Response = super::TransactionInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transaction_inputs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for BlockProducerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.BlockProducer"; + impl tonic::server::NamedService for BlockProducerServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod ntx_builder_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the network transaction builder component + #[derive(Debug, Clone)] + pub struct NtxBuilderClient { + inner: tonic::client::Grpc, + } + impl NtxBuilderClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl NtxBuilderClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> NtxBuilderClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetBlockHeaderByNumber")); + self.inner.unary(req, path, codec).await + } + /// Returns a paginated list of unconsumed network notes. + pub async fn get_unconsumed_network_notes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetUnconsumedNetworkNotes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("store.NtxBuilder", "GetUnconsumedNetworkNotes"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + /// header for executing network transactions. If the block number is not provided, the latest + /// header and peaks will be retrieved. + pub async fn get_current_blockchain_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetCurrentBlockchainData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetCurrentBlockchainData")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state of a network account with the specified account prefix. + pub async fn get_network_account_details_by_prefix( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "store.NtxBuilder", + "GetNetworkAccountDetailsByPrefix", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns a list of all network account ids. + pub async fn get_network_account_ids( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNetworkAccountIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountIds")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest details of the specified account. + pub async fn get_account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetAccount", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetAccount")); + self.inner.unary(req, path, codec).await + } + /// Returns the script for a note by its root. + pub async fn get_note_script_by_root( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNoteScriptByRoot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetNoteScriptByRoot")); + self.inner.unary(req, path, codec).await + } + /// Returns vault asset witnesses for the specified account. + pub async fn get_vault_asset_witnesses( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetVaultAssetWitnesses", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetVaultAssetWitnesses")); + self.inner.unary(req, path, codec).await + } + /// Returns a storage map witness for the specified account and storage map entry. + pub async fn get_storage_map_witness( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetStorageMapWitness", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetStorageMapWitness")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod ntx_builder_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. + #[async_trait] + pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a paginated list of unconsumed network notes. + async fn get_unconsumed_network_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + /// header for executing network transactions. If the block number is not provided, the latest + /// header and peaks will be retrieved. + async fn get_current_blockchain_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state of a network account with the specified account prefix. + async fn get_network_account_details_by_prefix( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of all network account ids. + async fn get_network_account_ids( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest details of the specified account. + async fn get_account( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the script for a note by its root. + async fn get_note_script_by_root( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns vault asset witnesses for the specified account. + async fn get_vault_asset_witnesses( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a storage map witness for the specified account and storage map entry. + async fn get_storage_map_witness( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the network transaction builder component + #[derive(Debug)] + pub struct NtxBuilderServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl NtxBuilderServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for NtxBuilderServer + where + T: NtxBuilder, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.NtxBuilder/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetUnconsumedNetworkNotes" => { + #[allow(non_camel_case_types)] + struct GetUnconsumedNetworkNotesSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetUnconsumedNetworkNotesSvc { + type Response = super::UnconsumedNetworkNotes; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_unconsumed_network_notes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetUnconsumedNetworkNotesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetCurrentBlockchainData" => { + #[allow(non_camel_case_types)] + struct GetCurrentBlockchainDataSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::super::blockchain::MaybeBlockNumber, + > for GetCurrentBlockchainDataSvc { + type Response = super::CurrentBlockchainData; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::MaybeBlockNumber, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_current_blockchain_data( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetCurrentBlockchainDataSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { + #[allow(non_camel_case_types)] + struct GetNetworkAccountDetailsByPrefixSvc( + pub Arc, + ); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNetworkAccountDetailsByPrefixSvc { + type Response = super::MaybeAccountDetails; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_network_account_details_by_prefix( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNetworkAccountDetailsByPrefixSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNetworkAccountIds" => { + #[allow(non_camel_case_types)] + struct GetNetworkAccountIdsSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNetworkAccountIdsSvc { + type Response = super::NetworkAccountIdList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_network_account_ids(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNetworkAccountIdsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetAccount" => { + #[allow(non_camel_case_types)] + struct GetAccountSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetAccountSvc { + type Response = super::super::rpc::AccountResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNoteScriptByRoot" => { + #[allow(non_camel_case_types)] + struct GetNoteScriptByRootSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNoteScriptByRootSvc { + type Response = super::super::rpc::MaybeNoteScript; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_note_script_by_root(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNoteScriptByRootSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetVaultAssetWitnesses" => { + #[allow(non_camel_case_types)] + struct GetVaultAssetWitnessesSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetVaultAssetWitnessesSvc { + type Response = super::VaultAssetWitnessesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_vault_asset_witnesses( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetVaultAssetWitnessesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetStorageMapWitness" => { + #[allow(non_camel_case_types)] + struct GetStorageMapWitnessSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetStorageMapWitnessSvc { + type Response = super::StorageMapWitnessResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_storage_map_witness(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetStorageMapWitnessSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for NtxBuilderServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.NtxBuilder"; + impl tonic::server::NamedService for NtxBuilderServer { + const NAME: &'static str = SERVICE_NAME; + } +} diff --git a/crates/proto/src/generated/transaction.rs b/crates/proto/src/generated/transaction.rs index e02a636365..a9dc784d68 100644 --- a/crates/proto/src/generated/transaction.rs +++ b/crates/proto/src/generated/transaction.rs @@ -3,18 +3,18 @@ #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProvenTransaction { /// Transaction encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::transaction::proven_tx::ProvenTransaction\]. + /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. #[prost(bytes = "vec", tag = "1")] pub transaction: ::prost::alloc::vec::Vec, /// Transaction inputs encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::transaction::TransactionInputs\]. + /// \[miden_protocol::transaction::TransactionInputs\]. #[prost(bytes = "vec", optional, tag = "2")] pub transaction_inputs: ::core::option::Option<::prost::alloc::vec::Vec>, } #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProvenTransactionBatch { /// Encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::transaction::proven_tx::ProvenTransaction\]. + /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. #[prost(bytes = "vec", tag = "1")] pub encoded: ::prost::alloc::vec::Vec, } @@ -52,7 +52,7 @@ pub struct TransactionHeader { pub final_state_commitment: ::core::option::Option, /// Nullifiers of the input notes of the transaction. #[prost(message, repeated, tag = "4")] - pub input_notes: ::prost::alloc::vec::Vec, + pub nullifiers: ::prost::alloc::vec::Vec, /// Output notes of the transaction. #[prost(message, repeated, tag = "5")] pub output_notes: ::prost::alloc::vec::Vec, diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs index a5a31a35fd..39869d9fc3 100644 --- a/crates/proto/src/generated/validator.rs +++ b/crates/proto/src/generated/validator.rs @@ -147,6 +147,28 @@ pub mod api_client { .insert(GrpcMethod::new("validator.Api", "SubmitProvenTransaction")); self.inner.unary(req, path, codec).await } + /// Validates a proposed block and returns the block header and body. + pub async fn sign_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/validator.Api/SignBlock"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("validator.Api", "SignBlock")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -172,6 +194,14 @@ pub mod api_server { &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; + /// Validates a proposed block and returns the block header and body. + async fn sign_block( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Validator API for the Validator component. #[derive(Debug)] @@ -337,6 +367,54 @@ pub mod api_server { }; Box::pin(fut) } + "/validator.Api/SignBlock" => { + #[allow(non_camel_case_types)] + struct SignBlockSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService< + super::super::blockchain::ProposedBlock, + > for SignBlockSvc { + type Response = super::super::blockchain::BlockSignature; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::ProposedBlock, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sign_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SignBlockSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/crates/proto/src/lib.rs b/crates/proto/src/lib.rs index 5cd0afe4b4..0f5cbb8f51 100644 --- a/crates/proto/src/lib.rs +++ b/crates/proto/src/lib.rs @@ -10,4 +10,5 @@ pub mod generated; pub use domain::account::{AccountState, AccountWitnessRecord}; pub use domain::nullifier::NullifierWitnessRecord; +pub use domain::proof_request::BlockProofRequest; pub use domain::{convert, try_convert}; diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 049fb16217..f73600f276 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -14,38 +14,34 @@ version.workspace = true crate-type = ["lib"] [features] -batch-prover = ["dep:miden-objects", "dep:tokio"] -block-prover = ["dep:miden-objects", "dep:tokio"] +batch-prover = ["dep:miden-protocol", "dep:tokio"] +block-prover = ["dep:miden-protocol", "dep:tokio"] default = ["std"] -std = ["miden-objects/std", "miden-tx/std"] -tx-prover = ["dep:miden-objects", "dep:miden-tx", "dep:tokio"] +std = ["miden-protocol/std", "miden-tx/std"] +tx-prover = ["dep:miden-protocol", "dep:miden-tx", "dep:tokio"] [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] getrandom = { features = ["wasm_js"], version = "0.3" } -tonic = { default-features = false, features = ["codegen"], version = "0.14" } +tonic = { features = ["codegen"], workspace = true } tonic-web-wasm-client = { default-features = false, version = "0.8" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] -tonic = { default-features = false, features = [ - "codegen", - "tls-native-roots", - "tls-ring", - "transport", -], version = "0.14" } +tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } tonic-web = { optional = true, version = "0.14" } [lints] workspace = true [dependencies] -miden-objects = { optional = true, workspace = true } -miden-tx = { optional = true, workspace = true } -prost = { default-features = false, features = ["derive"], version = "0.14" } -thiserror = { workspace = true } -tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } -tonic-prost = { workspace = true } +miden-protocol = { optional = true, workspace = true } +miden-tx = { optional = true, workspace = true } +prost = { default-features = false, features = ["derive"], workspace = true } +thiserror = { workspace = true } +tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } +tonic-prost = { workspace = true } [build-dependencies] +fs-err = { workspace = true } miden-node-proto-build = { workspace = true } miette = { features = ["fancy"], version = "7.5" } tonic-prost-build = { workspace = true } diff --git a/crates/remote-prover-client/build.rs b/crates/remote-prover-client/build.rs index 4a6c5e2541..ffd9b2e711 100644 --- a/crates/remote-prover-client/build.rs +++ b/crates/remote-prover-client/build.rs @@ -56,7 +56,7 @@ fn build_tonic_from_descriptor( /// Replaces std references with core and alloc for nostd compatibility fn convert_to_nostd(file_path: &str) -> miette::Result<()> { - let file_content = fs::read_to_string(file_path).into_diagnostic()?; + let file_content = fs_err::read_to_string(file_path).into_diagnostic()?; let updated_content = file_content .replace("std::result", "core::result") .replace("std::marker", "core::marker") diff --git a/crates/remote-prover-client/src/remote_prover/batch_prover.rs b/crates/remote-prover-client/src/remote_prover/batch_prover.rs index 3c75097c26..b0d472656a 100644 --- a/crates/remote-prover-client/src/remote_prover/batch_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/batch_prover.rs @@ -3,9 +3,14 @@ use alloc::sync::Arc; use alloc::vec::Vec; use core::time::Duration; -use miden_objects::batch::{ProposedBatch, ProvenBatch}; -use miden_objects::transaction::{OutputNote, ProvenTransaction, TransactionHeader, TransactionId}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::transaction::{ + OutputNote, + ProvenTransaction, + TransactionHeader, + TransactionId, +}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use tokio::sync::Mutex; use super::generated::api_client::ApiClient; @@ -71,7 +76,12 @@ impl RemoteBatchProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; @@ -100,7 +110,7 @@ impl RemoteBatchProver { &self, proposed_batch: ProposedBatch, ) -> Result { - use miden_objects::utils::Serializable; + use miden_protocol::utils::Serializable; self.connect().await?; let mut client = self diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index 694f11b371..d1fa435486 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -3,10 +3,10 @@ use alloc::sync::Arc; use alloc::vec::Vec; use core::time::Duration; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{ProposedBlock, ProvenBlock}; -use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::batch::{OrderedBatches, ProvenBatch}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof, ProposedBlock, ProvenBlock}; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use tokio::sync::Mutex; use super::generated::api_client::ApiClient; @@ -72,7 +72,12 @@ impl RemoteBlockProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; @@ -99,9 +104,11 @@ impl RemoteBlockProver { impl RemoteBlockProver { pub async fn prove( &self, - proposed_block: ProposedBlock, - ) -> Result { - use miden_objects::utils::Serializable; + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { + use miden_protocol::utils::Serializable; self.connect().await?; let mut client = self @@ -114,70 +121,41 @@ impl RemoteBlockProver { })? .clone(); - // Get the set of expected transaction headers. - let proposed_txs = proposed_block.batches().to_transactions(); + let block_proof_request = + ProposedBlock::new_at(block_inputs, tx_batches.into_vec(), block_header.timestamp()) + .map_err(|err| { + RemoteProverClientError::other_with_source( + "failed to create proposed block", + err, + ) + })?; - let request = tonic::Request::new(proposed_block.into()); + let request = tonic::Request::new(block_proof_request.into()); let response = client.prove(request).await.map_err(|err| { RemoteProverClientError::other_with_source("failed to prove block", err) })?; - // Deserialize the response bytes back into a ProvenBlock. - let proven_block = ProvenBlock::try_from(response.into_inner()).map_err(|err| { + // Deserialize the response bytes back into a BlockProof. + let block_proof = BlockProof::try_from(response.into_inner()).map_err(|err| { RemoteProverClientError::other_with_source( "failed to deserialize received response from remote block prover", err, ) })?; - Self::validate_tx_headers(&proven_block, &proposed_txs)?; - - Ok(proven_block) - } - - /// Validates that the proven block's transaction headers are consistent with the transactions - /// passed in the proposed block. - /// - /// This expects that transactions from the proposed block and proven block are in the same - /// order, as define by [`OrderedTransactionHeaders`]. - fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, - ) -> Result<(), RemoteProverClientError> { - if proposed_txs.as_slice().len() != proven_block.transactions().as_slice().len() { - return Err(RemoteProverClientError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in - proposed_txs.as_slice().iter().zip(proven_block.transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(RemoteProverClientError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) + Ok(block_proof) } } // CONVERSION // ================================================================================================ -impl TryFrom for ProvenBlock { +impl TryFrom for BlockProof { type Error = DeserializationError; fn try_from(value: proto::Proof) -> Result { - ProvenBlock::read_from_bytes(&value.payload) + BlockProof::read_from_bytes(&value.payload) } } diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs index 53326a3fb2..1074dd5b8e 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs index 7f33a307f7..7be124daad 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/remote-prover-client/src/remote_prover/tx_prover.rs b/crates/remote-prover-client/src/remote_prover/tx_prover.rs index bf6239646d..3bee6199fa 100644 --- a/crates/remote-prover-client/src/remote_prover/tx_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/tx_prover.rs @@ -3,9 +3,9 @@ use alloc::string::{String, ToString}; use alloc::sync::Arc; use core::time::Duration; -use miden_objects::transaction::{ProvenTransaction, TransactionInputs}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; -use miden_objects::vm::FutureMaybeSend; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::vm::FutureMaybeSend; use miden_tx::TransactionProverError; use tokio::sync::Mutex; @@ -72,7 +72,12 @@ impl RemoteTransactionProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; @@ -99,10 +104,10 @@ impl RemoteTransactionProver { impl RemoteTransactionProver { pub fn prove( &self, - tx_inputs: TransactionInputs, + tx_inputs: &TransactionInputs, ) -> impl FutureMaybeSend> { async move { - use miden_objects::utils::Serializable; + use miden_protocol::utils::Serializable; self.connect().await.map_err(|err| { TransactionProverError::other_with_source( "failed to connect to the remote prover", @@ -148,8 +153,8 @@ impl TryFrom for ProvenTransaction { } } -impl From for proto::ProofRequest { - fn from(tx_inputs: TransactionInputs) -> Self { +impl From<&TransactionInputs> for proto::ProofRequest { + fn from(tx_inputs: &TransactionInputs) -> Self { proto::ProofRequest { proof_type: proto::ProofType::Transaction.into(), payload: tx_inputs.to_bytes(), diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 540f85eb2b..30ec4dcb84 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -22,13 +22,13 @@ mediatype = { version = "0.21" } miden-node-proto = { workspace = true } miden-node-proto-build = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { default-features = true, workspace = true } +miden-protocol = { default-features = true, workspace = true } miden-tx = { default-features = true, workspace = true } semver = { version = "1.0" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["tls-native-roots", "tls-ring"], workspace = true } +tonic = { default-features = true, features = ["tls-native-roots", "tls-ring"], workspace = true } tonic-reflection = { workspace = true } tonic-web = { version = "0.14" } tower = { workspace = true } @@ -38,10 +38,10 @@ url = { workspace = true } [dev-dependencies] miden-air = { features = ["testing"], workspace = true } -miden-lib = { workspace = true } miden-node-store = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } -miden-objects = { default-features = true, features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { workspace = true } reqwest = { version = "0.12" } rstest = { workspace = true } tempfile = { version = "3.20" } diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 408f0affb0..e3f1a6018d 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -15,10 +15,10 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [CheckNullifiers](#checknullifiers) - [SyncNullifiers](#syncnullifiers) -- [GetAccountDetails](#getaccountdetails) -- [GetAccountProofs](#getaccountproofs) +- [GetAccount](#getaccount) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) +- [GetLimits](#getlimits) - [GetNotesById](#getnotesbyid) - [GetNoteScriptByRoot](#getnotescriptbyroot) - [SubmitProvenTransaction](#submitproventransaction) @@ -36,6 +36,8 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) Returns a nullifier proof for each of the requested nullifiers. +**Limits:** `nullifier` (1000) + #### Error Handling When nullifier checking fails, detailed error information is provided through gRPC status details. The following error codes may be returned: @@ -48,15 +50,13 @@ When nullifier checking fails, detailed error information is provided through gR --- -### GetAccountDetails - -Returns the latest state of an account with the specified ID. +### GetAccount ---- +Returns an account witness (Merkle proof of inclusion in the account tree) and optionally account details. -### GetAccountProofs +The witness proves the account's state commitment in the account tree. If details are requested, the response also includes the account's header, code, vault assets, and storage data. Account details are only available for public accounts. -Returns the latest state proofs of the specified accounts. +If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. --- @@ -73,10 +73,21 @@ authenticate the block's inclusion. --- +### GetLimits + +Returns the query parameter limits configured for RPC endpoints. + +This endpoint allows clients to discover the maximum number of items that can be requested in a single call for +various endpoints. The response contains a map of endpoint names to their parameter limits. + +--- + ### GetNotesById Returns a list of notes matching the provided note IDs. +**Limits:** `note_id` (100) + #### Error Handling When note retrieval fails, detailed error information is provided through gRPC status details. The following error codes may be returned: @@ -137,6 +148,8 @@ Clients should inspect both the gRPC status code and the detailed error code in Returns nullifier synchronization data for a set of prefixes within a given block range. This method allows clients to efficiently track nullifier creation by retrieving only the nullifiers produced between two blocks. +**Limits:** `nullifier` (1000) + Caller specifies the `prefix_len` (currently only 16), the list of prefix values (`nullifiers`), and the block range (`from_start_block`, optional `to_end_block`). The response includes all matching nullifiers created within that range, the last block included in the response (`block_num`), and the current chain tip (`chain_tip`). @@ -181,6 +194,8 @@ When account vault synchronization fails, detailed error information is provided Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. +**Limits:** `note_tag` (1000) + Client specifies the `note_tags` they are interested in, and the block range from which to search for matching notes. The request will then return the next block containing any note matching the provided tags within the specified range. The response includes each note's metadata and inclusion proof. @@ -205,6 +220,8 @@ When note synchronization fails, detailed error information is provided through Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts and notes) the client is interested in. +**Limits:** `account_id` (1000), `note_tag` (1000) + This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block number in the chain. Client is expected to repeat these requests in a loop until `response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. diff --git a/crates/rpc/src/server/accept.rs b/crates/rpc/src/server/accept.rs index 5ea5650afb..01103356fb 100644 --- a/crates/rpc/src/server/accept.rs +++ b/crates/rpc/src/server/accept.rs @@ -6,10 +6,16 @@ use futures::future::BoxFuture; use http::header::{ACCEPT, ToStrError}; use mediatype::{Name, ReadParams}; use miden_node_utils::{ErrorReport, FlattenResult}; -use miden_objects::{Word, WordError}; +use miden_protocol::{Word, WordError}; use semver::{Comparator, Version, VersionReq}; use tower::{Layer, Service}; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum GenesisNegotiation { + Optional, + Mandatory, +} + /// Performs content negotiation by rejecting requests which don't match our RPC version or network. /// Clients can specify these as parameters in our `application/vnd.miden` accept media range. /// @@ -29,13 +35,18 @@ use tower::{Layer, Service}; /// /// Parameters are optional and order is not important. /// -/// ``` +/// ```text /// application/vnd.miden; version=; genesis=0x1234 /// ``` #[derive(Clone)] pub struct AcceptHeaderLayer { supported_versions: VersionReq, genesis_commitment: Word, + /// RPC method names for which the `genesis` parameter is mandatory. + /// + /// These should be gRPC method names (e.g. `SubmitProvenTransaction`), + /// matched against the end of the request path like "/rpc.Api/". + require_genesis_methods: Vec<&'static str>, } #[derive(Debug, thiserror::Error)] @@ -71,7 +82,17 @@ impl AcceptHeaderLayer { }], }; - AcceptHeaderLayer { supported_versions, genesis_commitment } + AcceptHeaderLayer { + supported_versions, + genesis_commitment, + require_genesis_methods: Vec::new(), + } + } + + /// Mark a gRPC method as requiring a `genesis` parameter in the Accept header. + pub fn with_genesis_enforced_method(mut self, method: &'static str) -> Self { + self.require_genesis_methods.push(method); + self } } @@ -89,13 +110,21 @@ impl AcceptHeaderLayer { const GRPC: Name<'static> = Name::new_unchecked("grpc"); /// Parses the `Accept` header's contents, searching for any media type compatible with our - /// RPC version and genesis commitment. - fn negotiate(&self, accept: &str) -> Result<(), AcceptHeaderError> { + /// RPC version and genesis commitment, controlling whether `genesis` is optional or mandatory. + fn negotiate( + &self, + accept: &str, + genesis_mode: GenesisNegotiation, + ) -> Result<(), AcceptHeaderError> { let mut media_types = mediatype::MediaTypeList::new(accept).peekable(); // Its debatable whether an empty header value is valid. Let's err on the side of being // gracious if the client want's to be weird. if media_types.peek().is_none() { + // If there are no media types provided and genesis is required, reject. + if matches!(genesis_mode, GenesisNegotiation::Mandatory) { + return Err(AcceptHeaderError::NoSupportedMediaRange); + } return Ok(()); } @@ -150,16 +179,16 @@ impl AcceptHeaderLayer { continue; } - // Skip if the genesis commitment does not match. + // Skip if the genesis commitment does not match, or if it is required but missing. let genesis = media_type .get_param(Self::GENESIS) .map(|value| Word::try_from(value.unquoted_str().as_ref())) .transpose() .map_err(AcceptHeaderError::InvalidGenesis)?; - if let Some(genesis) = genesis - && genesis != self.genesis_commitment - { - continue; + match (genesis_mode, genesis) { + (_, Some(value)) if value != self.genesis_commitment => continue, + (GenesisNegotiation::Mandatory, None) => continue, + _ => {}, } // All preconditions met, this is a valid media type that we can serve. @@ -195,14 +224,44 @@ where } fn call(&mut self, request: http::Request) -> Self::Future { + // Skip negotiation entirely for CORS preflight/non-gRPC requests. + // + // Browsers often automatically perform an `OPTIONS` check _before_ the client + // SDK can inject the appropriate `ACCEPT` header, causing a rejection. + // Since an `OPTIONS` request does nothing its safe for us to simply allow them. + if request.method() == http::Method::OPTIONS { + return self.inner.call(request).boxed(); + } + + // Determine if this RPC method requires the `genesis` parameter. + let path = request.uri().path(); + let method_name = path.rsplit('/').next().unwrap_or_default(); + + let requires_genesis = self.verifier.require_genesis_methods.contains(&method_name); + + // If `genesis` is required but the header is missing entirely, reject early. let Some(header) = request.headers().get(ACCEPT) else { + if requires_genesis { + let response = tonic::Status::invalid_argument( + "Accept header with 'genesis' parameter is required for write RPC methods", + ) + .into_http(); + return futures::future::ready(Ok(response)).boxed(); + } return self.inner.call(request).boxed(); }; let result = header .to_str() .map_err(AcceptHeaderError::InvalidUtf8) - .map(|header| self.verifier.negotiate(header)) + .map(|header| { + let mode = if requires_genesis { + GenesisNegotiation::Mandatory + } else { + GenesisNegotiation::Optional + }; + self.verifier.negotiate(header, mode) + }) .flatten_result(); match result { @@ -298,7 +357,7 @@ impl FromStr for QValue { #[cfg(test)] mod tests { - use miden_objects::Word; + use miden_protocol::Word; use semver::Version; use super::{AcceptHeaderLayer, QParsingError}; @@ -342,7 +401,9 @@ mod tests { #[case::quoted_network(r#"application/vnd.miden; genesis="0x00000000000000000000000000000000000000000000000000000000deadbeef""#)] #[test] fn request_should_pass(#[case] accept: &'static str) { - AcceptHeaderLayer::for_tests().negotiate(accept).unwrap(); + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Optional) + .unwrap(); } #[rstest::rstest] @@ -356,7 +417,52 @@ mod tests { #[case::wildcard_subtype("application/*")] #[test] fn request_should_be_rejected(#[case] accept: &'static str) { - AcceptHeaderLayer::for_tests().negotiate(accept).unwrap_err(); + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Optional) + .unwrap_err(); + } + + #[test] + fn write_requires_genesis_param_missing_or_empty_or_mismatch() { + let layer = AcceptHeaderLayer::for_tests(); + + // Missing genesis parameter + assert!( + layer + .negotiate("application/vnd.miden", super::GenesisNegotiation::Mandatory) + .is_err() + ); + + // Empty header value + assert!(layer.negotiate("", super::GenesisNegotiation::Mandatory).is_err()); + + // Present but mismatched genesis parameter + let mismatched = "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeee"; + assert!(layer.negotiate(mismatched, super::GenesisNegotiation::Mandatory).is_err()); + } + + #[rstest::rstest] + #[case::matching_network( + "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeef" + )] + #[case::matching_network_and_version( + "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeef; version=0.2.3" + )] + #[test] + fn request_with_mandadory_genesis_should_pass(#[case] accept: &'static str) { + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Mandatory) + .unwrap(); + } + + #[rstest::rstest] + #[case::missing_network("application/vnd.miden;")] + #[case::missing_network_wildcard("*/*")] + #[test] + fn request_with_mandadory_genesis_should_be_rejected(#[case] accept: &'static str) { + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Mandatory) + .unwrap_err(); } #[rstest::rstest] diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index a75341f56b..45e4bf8950 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -1,15 +1,10 @@ -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use std::time::Duration; use anyhow::Context; -use miden_node_proto::clients::{ - BlockProducer, - BlockProducerClient, - Builder, - StoreRpc, - StoreRpcClient, -}; +use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient, ValidatorClient}; use miden_node_proto::errors::ConversionError; +use miden_node_proto::generated::rpc::MempoolStats; use miden_node_proto::generated::rpc::api_server::{self, Api}; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; @@ -20,26 +15,20 @@ use miden_node_utils::limiter::{ QueryParamNoteIdLimit, QueryParamNoteTagLimit, QueryParamNullifierLimit, + QueryParamStorageMapKeyTotalLimit, }; -use miden_objects::account::AccountId; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, NoteRecipient, NoteScript}; -use miden_objects::transaction::{ - OutputNote, - ProvenTransaction, - ProvenTransactionBuilder, - TransactionInputs, -}; -use miden_objects::utils::serde::{Deserializable, Serializable}; -use miden_objects::{MIN_PROOF_SECURITY_LEVEL, Word}; +use miden_protocol::batch::ProvenBatch; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, NoteRecipient, NoteScript}; +use miden_protocol::transaction::{OutputNote, ProvenTransaction, ProvenTransactionBuilder}; +use miden_protocol::utils::serde::{Deserializable, Serializable}; +use miden_protocol::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; use tonic::{IntoRequest, Request, Response, Status}; -use tracing::{debug, info, instrument, warn}; +use tracing::{debug, info}; use url::Url; use crate::COMPONENT; -use crate::server::validator; // RPC SERVICE // ================================================================================================ @@ -47,11 +36,12 @@ use crate::server::validator; pub struct RpcService { store: StoreRpcClient, block_producer: Option, + validator: ValidatorClient, genesis_commitment: Option, } impl RpcService { - pub(super) fn new(store_url: Url, block_producer_url: Option) -> Self { + pub(super) fn new(store_url: Url, block_producer_url: Option, validator_url: Url) -> Self { let store = { info!(target: COMPONENT, store_endpoint = %store_url, "Initializing store client"); Builder::new(store_url) @@ -59,7 +49,8 @@ impl RpcService { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::() + .with_otel_context_injection() + .connect_lazy::() }; let block_producer = block_producer_url.map(|block_producer_url| { @@ -73,12 +64,29 @@ impl RpcService { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::() + .with_otel_context_injection() + .connect_lazy::() }); + let validator = { + info!( + target: COMPONENT, + validator_endpoint = %validator_url, + "Initializing validator client", + ); + Builder::new(validator_url) + .without_tls() + .without_timeout() + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::() + }; + Self { store, block_producer, + validator, genesis_commitment: None, } } @@ -103,7 +111,7 @@ impl RpcService { loop { let result = self .get_block_header_by_number( - proto::shared::BlockHeaderByNumberRequest { + proto::rpc::BlockHeaderByNumberRequest { block_num: Some(BlockNumber::GENESIS.as_u32()), include_mmr_proof: None, } @@ -123,16 +131,16 @@ impl RpcService { return Ok(header); }, Err(err) if err.code() == tonic::Code::Unavailable => { - // exponential backoff with base 500ms and max 30s + // Exponential backoff with base 500ms and max 30s. let backoff = Duration::from_millis(500) - .saturating_mul(1 << retry_counter) + .saturating_mul(1 << retry_counter.min(6)) .min(Duration::from_secs(30)); tracing::warn!( ?backoff, %retry_counter, %err, - "connection failed while subscribing to the mempool, retrying" + "connection failed while fetching genesis header, retrying" ); retry_counter += 1; @@ -146,18 +154,10 @@ impl RpcService { #[tonic::async_trait] impl api_server::Api for RpcService { - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.check_nullifiers", - skip_all, - ret(level = "debug"), - err - )] async fn check_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -172,18 +172,10 @@ impl api_server::Api for RpcService { self.store.clone().check_nullifiers(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_nullifiers", - skip_all, - ret(level = "debug"), - err - )] async fn sync_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -191,35 +183,19 @@ impl api_server::Api for RpcService { self.store.clone().sync_nullifiers(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_block_header_by_number", - skip_all, - ret(level = "debug"), - err - )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { info!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().get_block_header_by_number(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_state", - skip_all, - ret(level = "debug"), - err - )] async fn sync_state( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().account_ids.len())?; @@ -228,35 +204,19 @@ impl api_server::Api for RpcService { self.store.clone().sync_state(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_storage_maps", - skip_all, - ret(level = "debug"), - err - )] async fn sync_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_storage_maps(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_notes", - skip_all, - ret(level = "debug"), - err - )] async fn sync_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().note_tags.len())?; @@ -264,14 +224,6 @@ impl api_server::Api for RpcService { self.store.clone().sync_notes(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_notes_by_id", - skip_all, - ret(level = "debug"), - err - )] async fn get_notes_by_id( &self, request: Request, @@ -293,31 +245,20 @@ impl api_server::Api for RpcService { self.store.clone().get_notes_by_id(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_account_vault", - skip_all, - ret(level = "debug"), - err - )] async fn sync_account_vault( &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + request: tonic::Request, + ) -> std::result::Result, tonic::Status> + { debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_account_vault(request).await } - #[instrument(parent = None, target = COMPONENT, name = "rpc.server.submit_proven_transaction", skip_all, err)] async fn submit_proven_transaction( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); let Some(block_producer) = &self.block_producer else { @@ -354,7 +295,7 @@ impl api_server::Api for RpcService { let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); - let new_note = Note::new(note.assets().clone(), *note.metadata(), recipient); + let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, other => other.clone(), @@ -384,39 +325,18 @@ impl api_server::Api for RpcService { })?; // If transaction inputs are provided, re-execute the transaction to validate it. - if let Some(tx_inputs_bytes) = &request.transaction_inputs { - // Deserialize the transaction inputs. - let tx_inputs = TransactionInputs::read_from_bytes(tx_inputs_bytes).map_err(|err| { - Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) - })?; - // Re-execute the transaction. - match validator::re_execute_transaction(tx_inputs).await { - Ok(_executed_tx) => { - debug!( - target = COMPONENT, - tx_id = %tx.id().to_hex(), - "Transaction re-execution successful" - ); - }, - Err(e) => { - warn!( - target = COMPONENT, - tx_id = %tx.id().to_hex(), - error = %e, - "Transaction re-execution failed, but continuing with submission" - ); - }, - } + if request.transaction_inputs.is_some() { + // Re-execute the transaction via the Validator. + self.validator.clone().submit_proven_transaction(request.clone()).await?; } block_producer.clone().submit_proven_transaction(request).await } - #[instrument(parent = None, target = COMPONENT, name = "rpc.server.submit_proven_batch", skip_all, err)] async fn submit_proven_batch( &self, request: tonic::Request, - ) -> Result, Status> { + ) -> Result, Status> { let Some(block_producer) = &self.block_producer else { return Err(Status::unavailable("Batch submission not available in read-only mode")); }; @@ -437,7 +357,8 @@ impl api_server::Api for RpcService { let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); - let new_note = Note::new(note.assets().clone(), *note.metadata(), recipient); + let new_note = + Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, other => other.clone(), @@ -470,39 +391,6 @@ impl api_server::Api for RpcService { block_producer.clone().submit_proven_batch(request).await } - /// Returns details for public (public) account by id. - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_account_details", - skip_all, - ret(level = "debug"), - err - )] - async fn get_account_details( - &self, - request: Request, - ) -> std::result::Result, Status> { - debug!(target: COMPONENT, request = ?request.get_ref()); - - // Validating account using conversion: - let _account_id: AccountId = request - .get_ref() - .clone() - .try_into() - .map_err(|err| Status::invalid_argument(format!("Invalid account id: {err}")))?; - - self.store.clone().get_account_details(request).await - } - - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_block_by_number", - skip_all, - ret(level = "debug"), - err - )] async fn get_block_by_number( &self, request: Request, @@ -514,33 +402,36 @@ impl api_server::Api for RpcService { self.store.clone().get_block_by_number(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_account_proof", - skip_all, - ret(level = "debug"), - err - )] - async fn get_account_proof( + async fn get_account( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ + SlotData::MapKeys as ProtoMapKeys, + SlotData::AllEntries as ProtoMapAllEntries + }; + let request = request.into_inner(); debug!(target: COMPONENT, ?request); - self.store.clone().get_account_proof(request).await + // Validate total storage map key limit before forwarding to store + if let Some(details) = &request.details { + let total_keys: usize = details + .storage_maps + .iter() + .filter_map(|m| m.slot_data.as_ref()) + .filter_map(|d| match d { + ProtoMapKeys(keys) => Some(keys.map_keys.len()), + ProtoMapAllEntries(_) => None, + }) + .sum(); + check::(total_keys)?; + } + + self.store.clone().get_account(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.status", - skip_all, - ret(level = "debug"), - err - )] async fn status( &self, request: Request<()>, @@ -562,54 +453,47 @@ impl api_server::Api for RpcService { Ok(Response::new(proto::rpc::RpcStatus { version: env!("CARGO_PKG_VERSION").to_string(), - store: store_status.or(Some(proto::rpc_store::StoreStatus { + store: store_status.or(Some(proto::rpc::StoreStatus { status: "unreachable".to_string(), chain_tip: 0, version: "-".to_string(), })), - block_producer: block_producer_status.or(Some( - proto::block_producer::BlockProducerStatus { - status: "unreachable".to_string(), - version: "-".to_string(), - }, - )), + block_producer: block_producer_status.or(Some(proto::rpc::BlockProducerStatus { + status: "unreachable".to_string(), + version: "-".to_string(), + chain_tip: 0, + mempool_stats: Some(MempoolStats::default()), + })), genesis_commitment: self.genesis_commitment.map(Into::into), })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err - )] async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); self.store.clone().get_note_script_by_root(request).await } - #[instrument( - parent = None, - target = COMPONENT, - name = "rpc.server.sync_transactions", - skip_all, - ret(level = "debug"), - err - )] async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); self.store.clone().sync_transactions(request).await } + + async fn get_limits( + &self, + request: Request<()>, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request); + + Ok(Response::new(RPC_LIMITS.clone())) + } } // LIMIT HELPERS @@ -625,3 +509,47 @@ fn out_of_range_error(err: E) -> Status { fn check(n: usize) -> Result<(), Status> { ::check(n).map_err(out_of_range_error) } + +/// Helper to build an [`EndpointLimits`](proto::rpc::EndpointLimits) from (name, limit) pairs. +fn endpoint_limits(params: &[(&str, usize)]) -> proto::rpc::EndpointLimits { + proto::rpc::EndpointLimits { + parameters: params.iter().map(|(k, v)| ((*k).to_string(), *v as u32)).collect(), + } +} + +/// Cached RPC query parameter limits. +static RPC_LIMITS: LazyLock = LazyLock::new(|| { + use { + QueryParamAccountIdLimit as AccountId, + QueryParamNoteIdLimit as NoteId, + QueryParamNoteTagLimit as NoteTag, + QueryParamNullifierLimit as Nullifier, + QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal, + }; + + proto::rpc::RpcLimits { + endpoints: std::collections::HashMap::from([ + ( + "CheckNullifiers".into(), + endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), + ), + ( + "SyncNullifiers".into(), + endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), + ), + ( + "SyncState".into(), + endpoint_limits(&[ + (AccountId::PARAM_NAME, AccountId::LIMIT), + (NoteTag::PARAM_NAME, NoteTag::LIMIT), + ]), + ), + ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), + ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), + ( + "GetAccount".into(), + endpoint_limits(&[(StorageMapKeyTotal::PARAM_NAME, StorageMapKeyTotal::LIMIT)]), + ), + ]), + } +}); diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index c6b6349be8..2299072073 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -21,17 +21,17 @@ use crate::server::health::HealthCheckLayer; mod accept; mod api; mod health; -mod validator; /// The RPC server component. /// /// On startup, binds to the provided listener and starts serving the RPC API. -/// It connects lazily to the store and block producer components as needed. +/// It connects lazily to the store, validator and block producer components as needed. /// Requests will fail if the components are not available. pub struct Rpc { pub listener: TcpListener, pub store_url: Url, pub block_producer_url: Option, + pub validator_url: Url, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. @@ -44,7 +44,11 @@ impl Rpc { /// Note: Executes in place (i.e. not spawned) and will run indefinitely until /// a fatal error is encountered. pub async fn serve(self) -> anyhow::Result<()> { - let mut api = api::RpcService::new(self.store_url.clone(), self.block_producer_url.clone()); + let mut api = api::RpcService::new( + self.store_url.clone(), + self.block_producer_url.clone(), + self.validator_url, + ); let genesis = api .get_genesis_header_with_retry() @@ -80,7 +84,11 @@ impl Rpc { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .layer(HealthCheckLayer) - .layer(AcceptHeaderLayer::new(&rpc_version, genesis.commitment())) + .layer( + AcceptHeaderLayer::new(&rpc_version, genesis.commitment()) + .with_genesis_enforced_method("SubmitProvenTransaction") + .with_genesis_enforced_method("SubmitProvenBatch"), + ) .layer(cors_for_grpc_web_layer()) // Enables gRPC-web support. .layer(GrpcWebLayer::new()) diff --git a/crates/rpc/src/server/validator.rs b/crates/rpc/src/server/validator.rs deleted file mode 100644 index 2b6719c32a..0000000000 --- a/crates/rpc/src/server/validator.rs +++ /dev/null @@ -1,149 +0,0 @@ -/// NOTE: This module contains logic that will eventually be moved to the Validator component -/// when it is added to this repository. -use std::collections::BTreeSet; - -use miden_objects::Word; -use miden_objects::account::{AccountId, PartialAccount, StorageMapWitness}; -use miden_objects::asset::{AssetVaultKey, AssetWitness}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::NoteScript; -use miden_objects::transaction::{ - AccountInputs, - ExecutedTransaction, - PartialBlockchain, - TransactionInputs, -}; -use miden_objects::vm::FutureMaybeSend; -use miden_tx::auth::UnreachableAuth; -use miden_tx::{ - DataStore, - DataStoreError, - MastForestStore, - TransactionExecutor, - TransactionExecutorError, - TransactionMastStore, -}; - -/// Executes a transaction using the provided transaction inputs. -pub async fn re_execute_transaction( - tx_inputs: TransactionInputs, -) -> Result { - // Create a DataStore from the transaction inputs. - let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); - - // Execute the transaction. - let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); - let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = - TransactionExecutor::new(&data_store); - executor - .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) - .await -} - -/// A [`DataStore`] implementation that wraps [`TransactionInputs`] -struct TransactionInputsDataStore { - tx_inputs: TransactionInputs, - mast_store: TransactionMastStore, -} - -impl TransactionInputsDataStore { - fn new(tx_inputs: TransactionInputs) -> Self { - let mast_store = TransactionMastStore::new(); - mast_store.load_account_code(tx_inputs.account().code()); - Self { tx_inputs, mast_store } - } -} - -impl DataStore for TransactionInputsDataStore { - fn get_transaction_inputs( - &self, - account_id: AccountId, - _ref_blocks: BTreeSet, - ) -> impl FutureMaybeSend> - { - async move { - if self.tx_inputs.account().id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - Ok(( - self.tx_inputs.account().clone(), - self.tx_inputs.block_header().clone(), - self.tx_inputs.blockchain().clone(), - )) - } - } - - fn get_foreign_account_inputs( - &self, - foreign_account_id: AccountId, - _ref_block: BlockNumber, - ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } - } - - fn get_vault_asset_witness( - &self, - account_id: AccountId, - vault_root: Word, - vault_key: AssetVaultKey, - ) -> impl FutureMaybeSend> { - async move { - if self.tx_inputs.account().id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - if self.tx_inputs.account().vault().root() != vault_root { - return Err(DataStoreError::Other { - error_msg: "vault root mismatch".into(), - source: None, - }); - } - - match self.tx_inputs.account().vault().open(vault_key) { - Ok(vault_proof) => { - AssetWitness::new(vault_proof.into()).map_err(|err| DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(err.into()), - }) - }, - Err(err) => Err(DataStoreError::Other { - error_msg: "failed to open vault".into(), - source: Some(err.into()), - }), - } - } - } - - fn get_storage_map_witness( - &self, - account_id: AccountId, - _map_root: Word, - _map_key: Word, - ) -> impl FutureMaybeSend> { - async move { - if self.tx_inputs.account().id() != account_id { - return Err(DataStoreError::AccountNotFound(account_id)); - } - - // For partial accounts, storage map witness is not available. - Err(DataStoreError::Other { - error_msg: "storage map witness not available with partial account state".into(), - source: None, - }) - } - } - - fn get_note_script( - &self, - script_root: Word, - ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::NoteScriptNotFound(script_root)) } - } -} - -impl MastForestStore for TransactionInputsDataStore { - fn get(&self, procedure_hash: &Word) -> Option> { - self.mast_store.get(procedure_hash) - } -} diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 6eaec910a3..b35fe8b6dc 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -3,16 +3,22 @@ use std::time::Duration; use http::header::{ACCEPT, CONTENT_TYPE}; use http::{HeaderMap, HeaderValue}; -use miden_lib::account::wallets::BasicWallet; -use miden_node_proto::clients::{Builder, Rpc as RpcClientMarker, RpcClient}; +use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::rpc::api_client::ApiClient as ProtoClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::Store; use miden_node_store::genesis::config::GenesisConfig; use miden_node_utils::fee::test_fee; -use miden_objects::Word; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_node_utils::limiter::{ + QueryParamAccountIdLimit, + QueryParamLimiter, + QueryParamNoteIdLimit, + QueryParamNoteTagLimit, + QueryParamNullifierLimit, +}; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ AccountBuilder, AccountDelta, AccountId, @@ -20,10 +26,12 @@ use miden_objects::account::{ AccountStorageMode, AccountType, }; -use miden_objects::testing::noop_auth_component::NoopAuthComponent; -use miden_objects::transaction::ProvenTransactionBuilder; -use miden_objects::utils::Serializable; -use miden_objects::vm::ExecutionProof; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::noop_auth_component::NoopAuthComponent; +use miden_protocol::transaction::ProvenTransactionBuilder; +use miden_protocol::utils::Serializable; +use miden_protocol::vm::ExecutionProof; +use miden_standards::account::wallets::BasicWallet; use tempfile::TempDir; use tokio::net::TcpListener; use tokio::runtime::{self, Runtime}; @@ -36,7 +44,7 @@ use crate::Rpc; async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = { @@ -46,7 +54,7 @@ async fn rpc_server_accepts_requests_without_accept_header() { }; // Send any request to the RPC. - let request = proto::shared::BlockHeaderByNumberRequest { + let request = proto::rpc::BlockHeaderByNumberRequest { block_num: Some(0), include_mmr_proof: None, }; @@ -56,14 +64,14 @@ async fn rpc_server_accepts_requests_without_accept_header() { assert!(response.is_ok()); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] async fn rpc_server_accepts_requests_with_accept_header() { // Start the RPC. let (mut rpc_client, _, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Send any request to the RPC. let response = send_request(&mut rpc_client).await; @@ -72,7 +80,7 @@ async fn rpc_server_accepts_requests_with_accept_header() { assert!(response.is_ok()); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] @@ -80,7 +88,7 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { for version in ["1.9.0", "0.8.1", "0.8.0", "0.999.0", "99.0.0"] { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Recreate the RPC client with an invalid version. let url = rpc_addr.to_string(); @@ -91,7 +99,8 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { .with_timeout(Duration::from_secs(10)) .with_metadata_version(version.to_string()) .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect::() .await .unwrap(); @@ -104,7 +113,7 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { assert!(response.as_ref().err().unwrap().message().contains("server does not support"),); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } } @@ -121,45 +130,31 @@ async fn rpc_startup_is_robust_to_network_failures() { assert!(response.is_err()); // Start the store. - let (store_runtime, data_directory) = start_store(store_addr).await; + let (store_runtime, data_directory, _genesis) = start_store(store_addr).await; // Test: send request against RPC api and should succeed let response = send_request(&mut rpc_client).await; assert!(response.unwrap().into_inner().block_header.is_some()); // Test: shutdown the store and should fail - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; let response = send_request(&mut rpc_client).await; assert!(response.is_err()); // Test: restart the store and request should succeed - let rpc_listener = TcpListener::bind(store_addr).await.expect("Failed to bind store"); - let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind store ntx-builder gRPC endpoint"); - let block_producer_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); - task::spawn(async move { - Store { - rpc_listener, - ntx_builder_listener, - block_producer_listener, - data_directory: data_directory.path().to_path_buf(), - grpc_timeout: Duration::from_secs(10), - } - .serve() - .await - .expect("store should start serving"); - }); + let store_runtime = restart_store(store_addr, data_directory.path()).await; let response = send_request(&mut rpc_client).await; assert_eq!(response.unwrap().into_inner().block_header.unwrap().block_num, 0); + + // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + shutdown_store(store_runtime).await; } #[tokio::test] async fn rpc_server_has_web_support() { // Start server let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Send a status request let client = reqwest::Client::new(); @@ -195,21 +190,24 @@ async fn rpc_server_has_web_support() { assert!(headers.get("access-control-allow-credentials").is_some()); assert!(headers.get("access-control-expose-headers").is_some()); assert!(headers.get("vary").is_some()); - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } #[tokio::test] async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; // Override the client so that the ACCEPT header is not set. - let mut rpc_client = { - let endpoint = tonic::transport::Endpoint::try_from(format!("http://{rpc_addr}")).unwrap(); - - ProtoClient::connect(endpoint).await.unwrap() - }; + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .with_metadata_genesis(genesis.to_hex()) + .without_otel_context_injection() + .connect_lazy::(); let account_id = AccountId::dummy( [0; 15], @@ -271,25 +269,94 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { assert!(response.is_err()); // Assert that the error is due to the invalid account delta commitment. + let err = response.as_ref().unwrap_err().message(); + assert!( + err.contains("failed to validate account delta in transaction account update"), + "expected error message to contain delta commitment error but got: {err}" + ); + + // Shutdown to avoid runtime drop error. + shutdown_store(store_runtime).await; +} + +#[tokio::test] +async fn rpc_server_rejects_tx_submissions_without_genesis() { + // Start the RPC. + let (_, rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + // Override the client so that the ACCEPT header is not set. + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .without_metadata_genesis() + .without_otel_context_injection() + .connect_lazy::(); + + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account = AccountBuilder::new([0; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let account_delta: AccountDelta = account.clone().try_into().unwrap(); + + // Send any request to the RPC. + let tx = ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.commitment(), + account_delta.clone().to_commitment(), // delta commitment + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(account_delta)) + .build() + .unwrap(); + + let request = proto::transaction::ProvenTransaction { + transaction: tx.to_bytes(), + transaction_inputs: None, + }; + + let response = rpc_client.submit_proven_transaction(request).await; + + // Assert that the server rejected our request. + assert!(response.is_err()); + + // Assert that the error is due to the invalid account delta commitment. + let err = response.as_ref().unwrap_err().message(); assert!( - response - .as_ref() - .err() - .unwrap() - .message() - .contains("failed to validate account delta in transaction account update"), + err.contains( + "server does not support any of the specified application/vnd.miden content types" + ), + "expected error message to reference incompatible content media types but got: {err:?}" ); // Shutdown to avoid runtime drop error. - store_runtime.shutdown_background(); + shutdown_store(store_runtime).await; } /// Sends an arbitrary / irrelevant request to the RPC. async fn send_request( rpc_client: &mut RpcClient, -) -> std::result::Result, tonic::Status> -{ - let request = proto::shared::BlockHeaderByNumberRequest { +) -> std::result::Result, tonic::Status> { + let request = proto::rpc::BlockHeaderByNumberRequest { block_num: Some(0), include_mmr_proof: None, }; @@ -320,10 +387,13 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) let store_url = Url::parse(&format!("http://{store_addr}")).unwrap(); // SAFETY: The block_producer_addr is always valid as it is created from a `SocketAddr`. let block_producer_url = Url::parse(&format!("http://{block_producer_addr}")).unwrap(); + // SAFETY: Using dummy validator URL for test - not actually contacted in this test + let validator_url = Url::parse("http://127.0.0.1:0").unwrap(); Rpc { listener: rpc_listener, store_url, block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout: Duration::from_secs(30), } .serve() @@ -338,18 +408,21 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) .with_timeout(Duration::from_secs(10)) .without_metadata_version() .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect::() .await .expect("Failed to build client"); (rpc_client, rpc_addr, store_addr) } -async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir) { +async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let (genesis_state, _) = GenesisConfig::default().into_state().unwrap(); + let config = GenesisConfig::default(); + let signer = SecretKey::new(); + let (genesis_state, _) = config.into_state(signer).unwrap(); Store::bootstrap(genesis_state.clone(), data_directory.path()).expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); @@ -375,5 +448,100 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir) { .await .expect("store should start serving"); }); - (store_runtime, data_directory) + ( + store_runtime, + data_directory, + genesis_state.into_block().unwrap().inner().header().commitment(), + ) +} + +/// Shuts down the store runtime properly to allow `RocksDB` to flush before the temp directory is +/// deleted. +async fn shutdown_store(store_runtime: Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_secs(3))) + .await + .expect("shutdown should complete"); + // Give RocksDB time to release its lock file after the runtime shutdown + tokio::time::sleep(Duration::from_millis(200)).await; +} + +/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. +async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) -> Runtime { + let rpc_listener = TcpListener::bind(store_addr).await.expect("Failed to bind store"); + let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("Failed to bind store ntx-builder gRPC endpoint"); + let block_producer_listener = + TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); + let dir = data_directory.to_path_buf(); + let store_runtime = + runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); + store_runtime.spawn(async move { + Store { + rpc_listener, + ntx_builder_listener, + block_producer_listener, + data_directory: dir, + grpc_timeout: Duration::from_secs(10), + } + .serve() + .await + .expect("store should start serving"); + }); + store_runtime +} + +#[tokio::test] +async fn get_limits_endpoint() { + // Start the RPC and store + let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + // Call the get_limits endpoint + let response = rpc_client.get_limits(()).await.expect("get_limits should succeed"); + let limits = response.into_inner(); + + // Verify the response contains expected endpoints and limits + assert!(!limits.endpoints.is_empty(), "endpoints should not be empty"); + + // Verify CheckNullifiers endpoint + let check_nullifiers = + limits.endpoints.get("CheckNullifiers").expect("CheckNullifiers should exist"); + + assert_eq!( + check_nullifiers.parameters.get("nullifier"), + Some(&(QueryParamNullifierLimit::LIMIT as u32)), + "CheckNullifiers nullifier limit should be {}", + QueryParamNullifierLimit::LIMIT + ); + + // Verify SyncState endpoint has multiple parameters + let sync_state = limits.endpoints.get("SyncState").expect("SyncState should exist"); + assert_eq!( + sync_state.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + Some(&(QueryParamAccountIdLimit::LIMIT as u32)), + "SyncState {} limit should be {}", + QueryParamAccountIdLimit::PARAM_NAME, + QueryParamAccountIdLimit::LIMIT + ); + assert_eq!( + sync_state.parameters.get(QueryParamNoteTagLimit::PARAM_NAME), + Some(&(QueryParamNoteTagLimit::LIMIT as u32)), + "SyncState {} limit should be {}", + QueryParamNoteTagLimit::PARAM_NAME, + QueryParamNoteTagLimit::LIMIT + ); + + // Verify GetNotesById endpoint + let get_notes_by_id = limits.endpoints.get("GetNotesById").expect("GetNotesById should exist"); + assert_eq!( + get_notes_by_id.parameters.get(QueryParamNoteIdLimit::PARAM_NAME), + Some(&(QueryParamNoteIdLimit::LIMIT as u32)), + "GetNotesById {} limit should be {}", + QueryParamNoteIdLimit::PARAM_NAME, + QueryParamNoteIdLimit::LIMIT + ); + + // Shutdown to avoid runtime drop error. + shutdown_store(store_runtime).await; } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 7fc52f65ce..dd06567ea5 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -19,40 +19,54 @@ anyhow = { workspace = true } deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } deadpool-diesel = { features = ["sqlite"], version = "0.6" } deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.2" } -diesel_migrations = { features = ["sqlite"], version = "2.2" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } -miden-lib = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std"], workspace = true } -pretty_assertions = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = { features = ["derive"], version = "1" } -thiserror = { workspace = true } -tokio = { features = ["fs", "rt-multi-thread"], workspace = true } -tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } -tonic = { workspace = true } -tonic-reflection = { workspace = true } -tower-http = { features = ["util"], workspace = true } -tracing = { workspace = true } +miden-standards = { workspace = true } +# TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` +miden-protocol = { features = ["std", "testing"], workspace = true } +pretty_assertions = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { features = ["derive"], version = "1" } +thiserror = { workspace = true } +tokio = { features = ["fs", "rt-multi-thread"], workspace = true } +tokio-stream = { features = ["net"], workspace = true } +toml = { version = "0.9" } +tonic = { default-features = true, workspace = true } +tonic-reflection = { workspace = true } +tower-http = { features = ["util"], workspace = true } +tracing = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } criterion = { version = "0.5" } fs-err = { workspace = true } -miden-lib = { features = ["testing"], workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } -miden-objects = { default-features = true, features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } termtree = { version = "0.5" } +[features] +default = ["rocksdb"] +rocksdb = ["miden-crypto/rocksdb"] + [[bench]] -harness = false -name = "account_tree_historical" +harness = false +name = "account_tree" +required-features = ["rocksdb"] + +[package.metadata.cargo-machete] +# This is an indirect dependency for which we need to enable optimisations +# via feature flags. Because we don't use it directly in code, machete +# identifies it as unused. +ignored = ["miden-crypto"] diff --git a/crates/store/README.md b/crates/store/README.md index 0b12487c18..57c002fe56 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -7,6 +7,35 @@ operator must take care that the store's API endpoint is **only** exposed to the For more information on the installation and operation of this component, please see the [node's readme](/README.md). +## RocksDB Feature + +The `rocksdb` feature (enabled by default) provides disk-backed storage via RocksDB for `LargeSmt`. Building _requires_ LLVM/Clang for `bindgen`. + +### Using System Libraries + +To avoid compiling RocksDB from source and safe yourself some time, use system libraries: + +```bash +# Install system RocksDB +# (Ubuntu/Debian) +#sudo apt-get install librocksdb-dev clang llvm-dev libclang-dev +# (Fedora) +#sudo dnf install rocksdb rocksdb-devel llvm19 clang19 + +# Set environment variables to use system library +export ROCKSDB_LIB_DIR=/usr/lib +export ROCKSDB_INCLUDE_DIR=/usr/include +# export ROCKSDB_STATIC=1 (optional) +# (Ubuntu/Debian) +#export LIBCLANG_PATH=/usr/lib/llvm-14/lib +# (Fedora) +#export LIBCLANG_PATH=/usr/lib64/llvm19/lib +``` + +### Building from Source + +Without the environment variables above, `librocksdb-sys` compiles RocksDB from source, which requires a C/C++ toolchain. + ## API overview The full gRPC API can be found [here](../../proto/proto/store.proto). @@ -14,8 +43,7 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [ApplyBlock](#applyblock) - [CheckNullifiers](#checknullifiers) -- [GetAccountDetails](#getaccountdetails) -- [GetAccountProofs](#getaccountproofs) +- [GetAccount](#getaccount) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) - [GetBlockInputs](#getblockinputs) @@ -55,15 +83,13 @@ When nullifier checking fails, detailed error information is provided through gR --- -### GetAccountDetails +### GetAccount -Returns the latest state of an account with the specified ID. - ---- +Returns an account witness (Merkle proof of inclusion in the account tree) and optionally account details. -### GetAccountProofs +The witness proves the account's state commitment in the account tree. If details are requested, the response also includes the account's header, code, vault assets, and storage data. Account details are only available for public accounts. -Returns the latest state proofs of the specified accounts. +If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. --- diff --git a/crates/store/benches/account_tree_historical.rs b/crates/store/benches/account_tree.rs similarity index 78% rename from crates/store/benches/account_tree_historical.rs rename to crates/store/benches/account_tree.rs index dbb538d5a4..8c3f1009ec 100644 --- a/crates/store/benches/account_tree_historical.rs +++ b/crates/store/benches/account_tree.rs @@ -1,22 +1,44 @@ use std::hint::black_box; +use std::path::Path; +use std::sync::atomic::{AtomicUsize, Ordering}; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; -use miden_node_store::{AccountTreeWithHistory, InMemoryAccountTree}; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::crypto::hash::rpo::Rpo256; -use miden_objects::crypto::merkle::{LargeSmt, MemoryStorage}; -use miden_objects::testing::account_id::AccountIdBuilder; +use miden_crypto::merkle::smt::{RocksDbConfig, RocksDbStorage}; +use miden_node_store::AccountTreeWithHistory; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; +use miden_protocol::crypto::hash::rpo::Rpo256; +use miden_protocol::crypto::merkle::smt::LargeSmt; +use miden_protocol::testing::account_id::AccountIdBuilder; + +/// Counter for creating unique `RocksDB` directories during benchmarking. +static DB_COUNTER: AtomicUsize = AtomicUsize::new(0); // HELPER FUNCTIONS // ================================================================================================ -/// Creates a storage backend for a `LargeSmt`. -fn setup_storage() -> MemoryStorage { - // TODO migrate to RocksDB for persistence to gain meaningful numbers - MemoryStorage::default() +/// Returns the default base path for `RocksDB` benchmark storage. +fn default_storage_path() -> std::path::PathBuf { + std::path::PathBuf::from("target/bench_rocksdb") +} + +/// Creates a `RocksDB` storage instance for benchmarking. +/// +/// # Arguments +/// * `base_path` - Base directory for `RocksDB` storage. Each call creates a unique subdirectory. +fn setup_storage(base_path: &Path) -> RocksDbStorage { + let counter = DB_COUNTER.fetch_add(1, Ordering::SeqCst); + let db_path = base_path.join(format!("bench_rocksdb_{counter}")); + + // Clean up the directory if it exists + if db_path.exists() { + fs_err::remove_dir_all(&db_path).ok(); + } + fs_err::create_dir_all(&db_path).expect("Failed to create storage directory"); + + RocksDbStorage::open(RocksDbConfig::new(db_path)).expect("RocksDB failed to open file") } /// Generates a deterministic word from a seed. @@ -47,7 +69,8 @@ fn generate_account_id(seed: &mut [u8; 32]) -> AccountId { /// Sets up a vanilla `AccountTree` with specified number of accounts. fn setup_vanilla_account_tree( num_accounts: usize, -) -> (AccountTree>, Vec) { + base_path: &Path, +) -> (AccountTree>, Vec) { let mut seed = [0u8; 32]; let mut account_ids = Vec::new(); let mut entries = Vec::new(); @@ -59,7 +82,7 @@ fn setup_vanilla_account_tree( entries.push((account_id_to_smt_key(account_id), commitment)); } - let storage = setup_storage(); + let storage = setup_storage(base_path); let smt = LargeSmt::with_entries(storage, entries).expect("Failed to create LargeSmt from entries"); let tree = AccountTree::new(smt).expect("Failed to create AccountTree"); @@ -70,9 +93,10 @@ fn setup_vanilla_account_tree( fn setup_account_tree_with_history( num_accounts: usize, num_blocks: usize, -) -> (AccountTreeWithHistory, Vec) { + base_path: &Path, +) -> (AccountTreeWithHistory, Vec) { let mut seed = [0u8; 32]; - let storage = setup_storage(); + let storage = setup_storage(base_path); let smt = LargeSmt::with_entries(storage, std::iter::empty()) .expect("Failed to create empty LargeSmt"); let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); @@ -104,11 +128,12 @@ fn setup_account_tree_with_history( /// This provides a baseline for comparison with historical access operations. fn bench_vanilla_access(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_vanilla_access"); + let base_path = default_storage_path(); let account_counts = [1, 10, 50, 100, 500, 1000]; for &num_accounts in &account_counts { - let (tree, account_ids) = setup_vanilla_account_tree(num_accounts); + let (tree, account_ids) = setup_vanilla_account_tree(num_accounts, &base_path); group.bench_function(BenchmarkId::new("vanilla", num_accounts), |b| { let test_account = *account_ids.first().unwrap(); @@ -125,6 +150,7 @@ fn bench_vanilla_access(c: &mut Criterion) { /// This provides a baseline for comparison with history-tracking insertion. fn bench_vanilla_insertion(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_insertion"); + let base_path = default_storage_path(); let account_counts = [1, 10, 50, 100, 500]; @@ -132,7 +158,7 @@ fn bench_vanilla_insertion(c: &mut Criterion) { group.bench_function(BenchmarkId::new("vanilla", num_accounts), |b| { b.iter(|| { let mut seed = [0u8; 32]; - let storage = setup_storage(); + let storage = setup_storage(&base_path); let smt = LargeSmt::with_entries(storage, std::iter::empty()) .expect("Failed to create empty LargeSmt"); let mut tree = AccountTree::new(smt).expect("Failed to create AccountTree"); @@ -158,18 +184,19 @@ fn bench_vanilla_insertion(c: &mut Criterion) { /// Benchmarks historical access at different depths and account counts. fn bench_historical_access(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_historical_access"); + let base_path = default_storage_path(); let account_counts = [10, 100, 500, 2500]; let block_depths = [0, 5, 10, 20, 32]; for &num_accounts in &account_counts { for &block_depth in &block_depths { - if block_depth > AccountTreeWithHistory::::MAX_HISTORY { + if block_depth > AccountTreeWithHistory::::MAX_HISTORY { continue; } let (tree_hist, account_ids) = - setup_account_tree_with_history(num_accounts, block_depth + 1); + setup_account_tree_with_history(num_accounts, block_depth + 1, &base_path); let current_block = tree_hist.block_number_latest(); let target_block = current_block .checked_sub(u32::try_from(block_depth).unwrap()) @@ -197,6 +224,7 @@ fn bench_historical_access(c: &mut Criterion) { /// Benchmarks insertion performance with history tracking at different account counts. fn bench_insertion_with_history(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_insertion"); + let base_path = default_storage_path(); let account_counts = [1, 10, 50, 100, 500, 2500]; @@ -204,7 +232,7 @@ fn bench_insertion_with_history(c: &mut Criterion) { group.bench_function(BenchmarkId::new("with_history", num_accounts), |b| { b.iter(|| { let mut seed = [0u8; 32]; - let storage = setup_storage(); + let storage = setup_storage(&base_path); let smt = LargeSmt::with_entries(storage, std::iter::empty()) .expect("Failed to create empty LargeSmt"); let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index bf18b815a0..d015408adb 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -2,24 +2,27 @@ use std::collections::{BTreeMap, HashMap}; -use miden_objects::account::{AccountId, AccountIdPrefix}; -use miden_objects::block::account_tree::{AccountMutationSet, AccountTree}; -use miden_objects::block::{AccountWitness, BlockNumber}; -use miden_objects::crypto::merkle::{ - EmptySubtreeRoots, +use miden_protocol::account::{AccountId, AccountIdPrefix}; +use miden_protocol::block::BlockNumber; +use miden_protocol::block::account_tree::{AccountMutationSet, AccountTree, AccountWitness}; +use miden_protocol::crypto::merkle::smt::{ LargeSmt, LeafIndex, MemoryStorage, - MerkleError, - MerklePath, - NodeIndex, NodeMutation, SMT_DEPTH, SmtLeaf, SmtStorage, +}; +use miden_protocol::crypto::merkle::{ + EmptySubtreeRoots, + MerkleError, + MerklePath, + NodeIndex, SparseMerklePath, }; -use miden_objects::{AccountTreeError, EMPTY_WORD, Word}; +use miden_protocol::errors::AccountTreeError; +use miden_protocol::{EMPTY_WORD, Word}; #[cfg(test)] mod tests; @@ -27,77 +30,9 @@ mod tests; /// Convenience for an in-memory-only account tree. pub type InMemoryAccountTree = AccountTree>; -// ACCOUNT TREE STORAGE TRAIT -// ================================================================================================ - -/// Trait abstracting operations over different account tree backends. -pub trait AccountTreeStorage { - /// Returns the root hash of the tree. - fn root(&self) -> Word; - - /// Returns the number of accounts in the tree. - fn num_accounts(&self) -> usize; - - /// Opens an account and returns its witness. - fn open(&self, account_id: AccountId) -> AccountWitness; - - /// Gets the account state commitment. - fn get(&self, account_id: AccountId) -> Word; - - /// Computes mutations for applying account updates. - fn compute_mutations( - &self, - accounts: impl IntoIterator, - ) -> Result; - - /// Applies mutations with reversion data. - fn apply_mutations_with_reversion( - &mut self, - mutations: AccountMutationSet, - ) -> Result; - - /// Checks if the tree contains an account with the given prefix. - fn contains_account_id_prefix(&self, prefix: AccountIdPrefix) -> bool; -} - -impl AccountTreeStorage for AccountTree> -where - S: SmtStorage, -{ - fn root(&self) -> Word { - self.root() - } - - fn num_accounts(&self) -> usize { - self.num_accounts() - } - - fn open(&self, account_id: AccountId) -> AccountWitness { - self.open(account_id) - } - - fn get(&self, account_id: AccountId) -> Word { - self.get(account_id) - } - - fn compute_mutations( - &self, - accounts: impl IntoIterator, - ) -> Result { - self.compute_mutations(accounts) - } - - fn apply_mutations_with_reversion( - &mut self, - mutations: AccountMutationSet, - ) -> Result { - self.apply_mutations_with_reversion(mutations) - } - - fn contains_account_id_prefix(&self, prefix: AccountIdPrefix) -> bool { - self.contains_account_id_prefix(prefix) - } -} +#[cfg(feature = "rocksdb")] +/// Convenience for a persistent account tree. +pub type PersistentAccountTree = AccountTree>; // HISTORICAL ERROR TYPES // ================================================================================================ @@ -178,31 +113,25 @@ impl HistoricalOverlay { /// This structure maintains a sliding window of historical account states by storing /// reversion data (mutations that undo changes). Historical witnesses are reconstructed /// by starting from the latest state and applying reversion overlays backwards in time. -#[derive(Debug, Clone)] -pub struct AccountTreeWithHistory -where - S: AccountTreeStorage, -{ +#[derive(Debug)] +pub struct AccountTreeWithHistory { /// The current block number (latest state). block_number: BlockNumber, /// The latest account tree state. - latest: S, + latest: AccountTree>, /// Historical overlays indexed by block number, storing reversion data. overlays: BTreeMap, } -impl AccountTreeWithHistory -where - S: AccountTreeStorage, -{ +impl AccountTreeWithHistory { /// Maximum number of historical blocks to maintain. - pub const MAX_HISTORY: usize = 33; + pub const MAX_HISTORY: usize = 50; // CONSTRUCTORS // -------------------------------------------------------------------------------------------- /// Creates a new historical tree starting at the given block number. - pub fn new(account_tree: S, block_number: BlockNumber) -> Self { + pub fn new(account_tree: AccountTree>, block_number: BlockNumber) -> Self { Self { block_number, latest: account_tree, diff --git a/crates/store/src/accounts/tests.rs b/crates/store/src/accounts/tests.rs index 9d90e975e0..f709289469 100644 --- a/crates/store/src/accounts/tests.rs +++ b/crates/store/src/accounts/tests.rs @@ -6,19 +6,19 @@ #[allow(clippy::uninlined_format_args)] #[allow(clippy::cast_sign_loss)] mod account_tree_with_history_tests { - use miden_objects::Word; - use miden_objects::account::AccountId; - use miden_objects::block::BlockNumber; - use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; - use miden_objects::crypto::merkle::{LargeSmt, MemoryStorage}; - use miden_objects::testing::account_id::AccountIdBuilder; + use miden_protocol::Word; + use miden_protocol::account::AccountId; + use miden_protocol::block::BlockNumber; + use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; + use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage}; + use miden_protocol::testing::account_id::AccountIdBuilder; use super::super::*; /// Helper function to create an `AccountTree` from entries using the new API fn create_account_tree( entries: impl IntoIterator, - ) -> AccountTree> { + ) -> InMemoryAccountTree { let smt_entries = entries .into_iter() .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); @@ -84,8 +84,7 @@ mod account_tree_with_history_tests { #[test] fn test_history_limits() { - const MAX_HIST: u32 = - AccountTreeWithHistory::>>::MAX_HISTORY as u32; + const MAX_HIST: u32 = AccountTreeWithHistory::::MAX_HISTORY as u32; use assert_matches::assert_matches; let id = AccountIdBuilder::new().build_with_seed([30; 32]); diff --git a/crates/store/src/blocks.rs b/crates/store/src/blocks.rs index 5c9570252a..e771332ba9 100644 --- a/crates/store/src/blocks.rs +++ b/crates/store/src/blocks.rs @@ -2,8 +2,8 @@ use std::io::ErrorKind; use std::ops::Not; use std::path::PathBuf; -use miden_lib::utils::Serializable; -use miden_objects::block::BlockNumber; +use miden_protocol::block::BlockNumber; +use miden_protocol::utils::Serializable; use tracing::instrument; use crate::COMPONENT; @@ -31,7 +31,7 @@ impl BlockStore { fields(path = %store_dir.display()), )] pub fn bootstrap(store_dir: PathBuf, genesis_block: &GenesisBlock) -> std::io::Result { - std::fs::create_dir(&store_dir)?; + fs_err::create_dir(&store_dir)?; let block_store = Self { store_dir }; block_store.save_block_blocking(BlockNumber::GENESIS, &genesis_block.inner().to_bytes())?; @@ -55,7 +55,7 @@ impl BlockStore { /// /// See also: [`std::fs::metadata`]. pub fn load(store_dir: PathBuf) -> std::io::Result { - let meta = std::fs::metadata(&store_dir)?; + let meta = fs_err::metadata(&store_dir)?; if meta.is_dir().not() { return Err(ErrorKind::NotADirectory.into()); } @@ -101,10 +101,10 @@ impl BlockStore { ) -> Result<(), std::io::Error> { let (epoch_path, block_path) = self.epoch_block_path(block_num)?; if !epoch_path.exists() { - std::fs::create_dir_all(epoch_path)?; + fs_err::create_dir_all(epoch_path)?; } - std::fs::write(block_path, data) + fs_err::write(block_path, data) } // HELPER FUNCTIONS diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index ad78548c6e..01521e5787 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -3,6 +3,7 @@ use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; use tracing::instrument; use crate::COMPONENT; +use crate::db::schema_hash::verify_schema; // The rebuild is automatically triggered by `build.rs` as described in // . @@ -17,6 +18,8 @@ pub fn apply_migrations( tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + // Migrations applied successfully, verify schema hash + verify_schema(conn)?; return Ok(()); }; tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 7235ad1bea..b3ca25d563 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -18,49 +18,53 @@ CREATE TABLE accounts ( block_num INTEGER NOT NULL, account_commitment BLOB NOT NULL, code_commitment BLOB, - storage BLOB, - vault BLOB, nonce INTEGER, + storage_header BLOB, -- Serialized AccountStorageHeader from miden-objects + vault_root BLOB, -- Vault root commitment + is_latest BOOLEAN NOT NULL DEFAULT 0, -- Indicates if this is the latest state for this account_id + created_at_block INTEGER NOT NULL, - PRIMARY KEY (account_id), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num), - FOREIGN KEY (code_commitment) REFERENCES account_codes(code_commitment), + PRIMARY KEY (account_id, block_num), CONSTRAINT all_null_or_none_null CHECK ( - (code_commitment IS NOT NULL AND storage IS NOT NULL AND vault IS NOT NULL AND nonce IS NOT NULL) + (code_commitment IS NOT NULL AND nonce IS NOT NULL AND storage_header IS NOT NULL AND vault_root IS NOT NULL) OR - (code_commitment IS NULL AND storage IS NULL AND vault IS NULL AND nonce IS NULL) + (code_commitment IS NULL AND nonce IS NULL AND storage_header IS NULL AND vault_root IS NULL) ) ) WITHOUT ROWID; CREATE INDEX idx_accounts_network_prefix ON accounts(network_account_id_prefix) WHERE network_account_id_prefix IS NOT NULL; +CREATE INDEX idx_accounts_id_block ON accounts(account_id, block_num DESC); +CREATE INDEX idx_accounts_latest ON accounts(account_id, is_latest) WHERE is_latest = 1; +CREATE INDEX idx_accounts_created_at_block ON accounts(created_at_block); +-- Index for joining with block_headers +CREATE INDEX idx_accounts_block_num ON accounts(block_num); +-- Index for joining with account_codes +CREATE INDEX idx_accounts_code_commitment ON accounts(code_commitment) WHERE code_commitment IS NOT NULL; CREATE TABLE notes ( - committed_at INTEGER NOT NULL, -- Block number when the note was committed - batch_index INTEGER NOT NULL, -- Index of batch in block, starting from 0 - note_index INTEGER NOT NULL, -- Index of note in batch, starting from 0 - note_id BLOB NOT NULL, - note_commitment BLOB NOT NULL, - note_type INTEGER NOT NULL, -- 1-Public (0b01), 2-Private (0b10), 3-Encrypted (0b11) - sender BLOB NOT NULL, - tag INTEGER NOT NULL, - execution_mode INTEGER NOT NULL, -- 0-Network, 1-Local - aux INTEGER NOT NULL, - execution_hint INTEGER NOT NULL, - inclusion_path BLOB NOT NULL, -- Serialized sparse Merkle path of the note in the block's note tree - consumed_at INTEGER, -- Block number when the note was consumed - nullifier BLOB, -- Only known for public notes, null for private notes - assets BLOB, - inputs BLOB, - script_root BLOB, - serial_num BLOB, + committed_at INTEGER NOT NULL, -- Block number when the note was committed + batch_index INTEGER NOT NULL, -- Index of batch in block, starting from 0 + note_index INTEGER NOT NULL, -- Index of note in batch, starting from 0 + note_id BLOB NOT NULL, + note_commitment BLOB NOT NULL, + note_type INTEGER NOT NULL, -- 1-Public (0b01), 2-Private (0b10), 3-Encrypted (0b11) + sender BLOB NOT NULL, + tag INTEGER NOT NULL, + network_note_type INTEGER NOT NULL, -- 0-not a network note, 1-single account target network note + target_account_id BLOB, -- Full target account ID for single-target network notes + attachment BLOB NOT NULL, -- Serialized note attachment data + inclusion_path BLOB NOT NULL, -- Serialized sparse Merkle path of the note in the block's note tree + consumed_at INTEGER, -- Block number when the note was consumed + nullifier BLOB, -- Only known for public notes, null for private notes + assets BLOB, + inputs BLOB, + script_root BLOB, + serial_num BLOB, PRIMARY KEY (committed_at, batch_index, note_index), - FOREIGN KEY (committed_at) REFERENCES block_headers(block_num), - FOREIGN KEY (sender) REFERENCES accounts(account_id), - FOREIGN KEY (script_root) REFERENCES note_scripts(script_root), CONSTRAINT notes_type_in_enum CHECK (note_type BETWEEN 1 AND 3), - CONSTRAINT notes_execution_mode_in_enum CHECK (execution_mode BETWEEN 0 AND 1), + CONSTRAINT notes_network_note_type_in_enum CHECK (network_note_type BETWEEN 0 AND 1), CONSTRAINT notes_consumed_at_is_u32 CHECK (consumed_at BETWEEN 0 AND 0xFFFFFFFF), CONSTRAINT notes_batch_index_is_u32 CHECK (batch_index BETWEEN 0 AND 0xFFFFFFFF), CONSTRAINT notes_note_index_is_u32 CHECK (note_index BETWEEN 0 AND 0xFFFFFFFF) @@ -71,7 +75,13 @@ CREATE INDEX idx_notes_note_commitment ON notes(note_commitment); CREATE INDEX idx_notes_sender ON notes(sender, committed_at); CREATE INDEX idx_notes_tag ON notes(tag, committed_at); CREATE INDEX idx_notes_nullifier ON notes(nullifier); -CREATE INDEX idx_unconsumed_network_notes ON notes(execution_mode, consumed_at); +CREATE INDEX idx_notes_target_account ON notes(target_account_id, committed_at) WHERE target_account_id IS NOT NULL; +-- Index for joining with block_headers on committed_at +CREATE INDEX idx_notes_committed_at ON notes(committed_at); +-- Index for joining with note_scripts +CREATE INDEX idx_notes_script_root ON notes(script_root) WHERE script_root IS NOT NULL; +-- Index for joining with block_headers on consumed_at +CREATE INDEX idx_notes_consumed_at ON notes(consumed_at) WHERE consumed_at IS NOT NULL; CREATE TABLE note_scripts ( script_root BLOB NOT NULL, @@ -81,30 +91,37 @@ CREATE TABLE note_scripts ( ) WITHOUT ROWID; CREATE TABLE account_storage_map_values ( - account_id BLOB NOT NULL, + account_id BLOB NOT NULL, block_num INTEGER NOT NULL, - slot INTEGER NOT NULL, + slot_name TEXT NOT NULL, key BLOB NOT NULL, value BLOB NOT NULL, - is_latest_update BOOLEAN NOT NULL, + is_latest BOOLEAN NOT NULL, - PRIMARY KEY (account_id, block_num, slot, key), - CONSTRAINT slot_is_u8 CHECK (slot BETWEEN 0 AND 0xFF) + PRIMARY KEY (account_id, block_num, slot_name, key), + FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; -CREATE INDEX asm_latest_by_acct_block_slot_key ON account_storage_map_values(account_id, block_num); +-- Index for joining with accounts table on compound key +CREATE INDEX idx_account_storage_account_block ON account_storage_map_values(account_id, block_num); +-- Index for querying latest values +CREATE INDEX idx_account_storage_latest ON account_storage_map_values(account_id, is_latest) WHERE is_latest = 1; CREATE TABLE account_vault_assets ( account_id BLOB NOT NULL, block_num INTEGER NOT NULL, vault_key BLOB NOT NULL, asset BLOB, - is_latest_update BOOLEAN NOT NULL, + is_latest BOOLEAN NOT NULL, - PRIMARY KEY (account_id, block_num, vault_key) + PRIMARY KEY (account_id, block_num, vault_key), + FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; -CREATE INDEX idx_vault_assets_id_block ON account_vault_assets (account_id, block_num); +-- Index for joining with accounts table on compound key +CREATE INDEX idx_vault_assets_account_block ON account_vault_assets(account_id, block_num); +-- Index for querying latest assets +CREATE INDEX idx_vault_assets_latest ON account_vault_assets(account_id, is_latest) WHERE is_latest = 1; CREATE TABLE nullifiers ( nullifier BLOB NOT NULL, @@ -112,12 +129,12 @@ CREATE TABLE nullifiers ( block_num INTEGER NOT NULL, PRIMARY KEY (nullifier), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num), CONSTRAINT nullifiers_nullifier_is_digest CHECK (length(nullifier) = 32), CONSTRAINT nullifiers_nullifier_prefix_is_u16 CHECK (nullifier_prefix BETWEEN 0 AND 0xFFFF) ) WITHOUT ROWID; CREATE INDEX idx_nullifiers_prefix ON nullifiers(nullifier_prefix); +-- Index for joining with block_headers CREATE INDEX idx_nullifiers_block_num ON nullifiers(block_num); CREATE TABLE transactions ( @@ -126,14 +143,14 @@ CREATE TABLE transactions ( block_num INTEGER NOT NULL, -- Block number in which the transaction was included. initial_state_commitment BLOB NOT NULL, -- State of the account before applying the transaction. final_state_commitment BLOB NOT NULL, -- State of the account after applying the transaction. - input_notes BLOB NOT NULL, -- Serialized vector with the Nullifier of the input notes. + nullifiers BLOB NOT NULL, -- Serialized vector with the Nullifier of the input notes. output_notes BLOB NOT NULL, -- Serialized vector with the NoteId of the output notes. size_in_bytes INTEGER NOT NULL, -- Estimated size of the row in bytes, considering the size of the input and output notes. - PRIMARY KEY (transaction_id), - FOREIGN KEY (account_id) REFERENCES accounts(account_id), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num) + PRIMARY KEY (transaction_id) ) WITHOUT ROWID; +-- Index for joining with accounts (note: account may not exist in accounts table) CREATE INDEX idx_transactions_account_id ON transactions(account_id); +-- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index a94df859e4..ee7c722c8a 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,20 +1,18 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::ops::RangeInclusive; use std::path::PathBuf; use anyhow::Context; -use diesel::prelude::QueryableByName; -use diesel::{Connection, RunQueryDsl, SqliteConnection}; -use miden_lib::utils::{Deserializable, Serializable}; -use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; +use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; +use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::asset::{Asset, AssetVaultKey}; -use miden_objects::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{ +use miden_protocol::Word; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; +use miden_protocol::asset::{Asset, AssetVaultKey}; +use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{ NoteDetails, NoteId, NoteInclusionProof, @@ -22,7 +20,8 @@ use miden_objects::note::{ NoteScript, Nullifier, }; -use miden_objects::transaction::TransactionId; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::{Deserializable, Serializable}; use tokio::sync::oneshot; use tracing::{Instrument, info, instrument}; @@ -38,6 +37,7 @@ use crate::genesis::GenesisBlock; pub(crate) mod manager; mod migrations; +mod schema_hash; #[cfg(test)] mod tests; @@ -102,8 +102,8 @@ pub struct TransactionRecord { pub account_id: AccountId, pub initial_state_commitment: Word, pub final_state_commitment: Word, - pub input_notes: Vec, // Store nullifiers for input notes - pub output_notes: Vec, // Store note IDs for output notes + pub nullifiers: Vec, // Store nullifiers for input notes + pub output_notes: Vec, // Store note IDs for output notes } impl TransactionRecord { @@ -113,16 +113,15 @@ impl TransactionRecord { pub fn into_proto_with_note_records( self, note_records: Vec, - ) -> proto::rpc_store::TransactionRecord { - let output_notes: Vec = - note_records.into_iter().map(Into::into).collect(); + ) -> proto::rpc::TransactionRecord { + let output_notes = Vec::from_iter(note_records.into_iter().map(Into::into)); - proto::rpc_store::TransactionRecord { - transaction_header: Some(proto::transaction::TransactionHeader { + proto::rpc::TransactionRecord { + header: Some(proto::transaction::TransactionHeader { account_id: Some(self.account_id.into()), initial_state_commitment: Some(self.initial_state_commitment.into()), final_state_commitment: Some(self.final_state_commitment.into()), - input_notes: self.input_notes.into_iter().map(From::from).collect(), + nullifiers: self.nullifiers.into_iter().map(From::from).collect(), output_notes, }), block_num: self.block_num.as_u32(), @@ -252,8 +251,8 @@ impl Db { genesis.header(), &[], &[], - genesis.updated_accounts(), - genesis.transactions(), + genesis.body().updated_accounts(), + genesis.body().transactions(), ) }) .context("failed to insert genesis block")?; @@ -327,7 +326,7 @@ impl Db { /// Loads all the nullifiers from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_nullifiers(&self) -> Result> { + pub(crate) async fn select_all_nullifiers(&self) -> Result> { self.transact("all nullifiers", move |conn| { let nullifiers = queries::select_all_nullifiers(conn)?; Ok(nullifiers) @@ -396,7 +395,7 @@ impl Db { .await } - /// Loads all the account commitments from the DB. + /// TODO marked for removal, replace with paged version #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_all_account_commitments(&self) -> Result> { self.transact("read all account commitments", move |conn| { @@ -405,6 +404,15 @@ impl Db { .await } + /// Returns all account IDs that have public state. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_public_account_ids(&self) -> Result> { + self.transact("read all public account IDs", move |conn| { + queries::select_all_public_account_ids(conn) + }) + .await + } + /// Loads public account details from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account(&self, id: AccountId) -> Result { @@ -424,6 +432,71 @@ impl Db { .await } + /// Returns network account IDs within the specified block range (based on account creation + /// block). + /// + /// The function may return fewer accounts than exist in the range if the result would exceed + /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is + /// truncated at a block boundary to ensure all accounts from included blocks are returned. + /// + /// # Returns + /// + /// A tuple containing: + /// - A vector of network account IDs. + /// - The last block number that was fully included in the result. When truncated, this will be + /// less than the requested range end. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_network_account_ids( + &self, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber)> { + self.transact("Get all network account IDs", move |conn| { + queries::select_all_network_account_ids(conn, block_range) + }) + .await + } + + /// Queries vault assets at a specific block + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_vault_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account vault at block", move |conn| { + queries::select_account_vault_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries the account code by its commitment hash. + /// + /// Returns `None` if no code exists with that commitment. + pub async fn select_account_code_by_commitment( + &self, + code_commitment: Word, + ) -> Result>> { + self.transact("Get account code by commitment", move |conn| { + queries::select_account_code_by_commitment(conn, code_commitment) + }) + .await + } + + /// Queries the account header and storage header for a specific account at a block. + /// + /// Returns both in a single query to avoid querying the database twice. + /// Returns `None` if the account doesn't exist at that block. + pub async fn select_account_header_with_storage_header_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account header with storage header at block", move |conn| { + queries::select_account_header_with_storage_header_at_block(conn, account_id, block_num) + }) + .await + } + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_state_sync( &self, @@ -449,7 +522,7 @@ impl Db { .await } - /// Loads all the [`miden_objects::note::Note`]s matching a certain [`NoteId`] from the + /// Loads all the [`miden_protocol::note::Note`]s matching a certain [`NoteId`] from the /// database. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_notes_by_id(&self, note_ids: Vec) -> Result> { @@ -459,15 +532,14 @@ impl Db { .await } - /// Loads all the [`NoteRecord`]s matching a certain note commitment from the - /// database. + /// Returns all note commitments from the DB that match the provided ones. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_notes_by_commitment( + pub async fn select_existing_note_commitments( &self, note_commitments: Vec, - ) -> Result> { + ) -> Result> { self.transact("note by commitment", move |conn| { - queries::select_notes_by_commitment(conn, note_commitments.as_slice()) + queries::select_existing_note_commitments(conn, note_commitments.as_slice()) }) .await } @@ -502,9 +574,9 @@ impl Db { conn, block.header(), ¬es, - block.created_nullifiers(), - block.updated_accounts(), - block.transactions(), + block.body().created_nullifiers(), + block.body().updated_accounts(), + block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist @@ -535,18 +607,6 @@ impl Db { .await } - /// Runs database optimization. - #[instrument(level = "debug", target = COMPONENT, skip_all, err)] - pub async fn optimize(&self) -> Result<(), DatabaseError> { - self.transact("db optimization", |conn| { - diesel::sql_query("PRAGMA optimize") - .execute(conn) - .map_err(DatabaseError::Diesel) - }) - .await?; - Ok(()) - } - /// Emits size metrics for each table in the database, and the entire database. #[instrument(target = COMPONENT, skip_all, err)] pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { @@ -588,35 +648,17 @@ impl Db { Ok(()) } - /// Loads the network notes that have not been consumed yet, using pagination to limit the - /// number of notes returned. - pub(crate) async fn select_unconsumed_network_notes( - &self, - page: Page, - ) -> Result<(Vec, Page)> { - self.transact("unconsumed network notes", move |conn| { - models::queries::unconsumed_network_notes(conn, page) - }) - .await - } - /// Loads the network notes for an account that are unconsumed by a specified block number. /// Pagination is used to limit the number of notes returned. - pub(crate) async fn select_unconsumed_network_notes_for_account( + pub(crate) async fn select_unconsumed_network_notes( &self, - network_account_id_prefix: NetworkAccountPrefix, + account_id: AccountId, block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page)> { - // Network notes sent to a specific account have their tags set to the prefix of the target - // account ID. So we can convert the ID prefix into a note tag to query the notes for a - // given account. self.transact("unconsumed network notes for account", move |conn| { - models::queries::select_unconsumed_network_notes_by_tag( - conn, - network_account_id_prefix.into(), - block_num, - page, + models::queries::select_unconsumed_network_notes_by_account_id( + conn, account_id, block_num, page, ) }) .await diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index ffc7b80f6b..4dcd012efa 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,87 +32,109 @@ on relevant platforms" )] -use std::any::type_name; - -use miden_node_proto::domain::account::{NetworkAccountError, NetworkAccountPrefix}; -use miden_objects::Felt; -use miden_objects::block::BlockNumber; -use miden_objects::note::{NoteExecutionMode, NoteTag}; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::Felt; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteTag; #[derive(Debug, thiserror::Error)] -#[error("failed to convert a database value to it's in memory type {0}")] -pub struct DatabaseTypeConversionError(&'static str); +#[error("failed to convert from database type {from_type} into {into_type}")] +pub struct DatabaseTypeConversionError { + source: Box, + from_type: &'static str, + into_type: &'static str, +} /// Convert from and to it's database representation and back /// /// We do not assume sanity of DB types. pub(crate) trait SqlTypeConvert: Sized { type Raw: Sized; - type Error: std::error::Error + Send + Sync + 'static; + fn to_raw_sql(self) -> Self::Raw; - fn from_raw_sql(_raw: Self::Raw) -> Result; + fn from_raw_sql(_raw: Self::Raw) -> Result; + + fn map_err( + source: E, + ) -> DatabaseTypeConversionError { + DatabaseTypeConversionError { + source: Box::new(source), + from_type: std::any::type_name::(), + into_type: std::any::type_name::(), + } + } } impl SqlTypeConvert for BlockNumber { type Raw = i64; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - u32::try_from(raw) - .map(BlockNumber::from) - .map_err(|_| DatabaseTypeConversionError(type_name::())) + + fn from_raw_sql(raw: Self::Raw) -> Result { + u32::try_from(raw).map(BlockNumber::from).map_err(Self::map_err) } + fn to_raw_sql(self) -> Self::Raw { i64::from(self.as_u32()) } } -impl SqlTypeConvert for NetworkAccountPrefix { - type Raw = i64; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - NetworkAccountPrefix::try_from(raw as u32) - .map_err(|_e| DatabaseTypeConversionError(type_name::())) +/// Converts a network account ID to its 30-bit prefix for database indexing. +#[inline(always)] +pub(crate) fn network_account_id_to_prefix_sql(id: NetworkAccountId) -> i64 { + i64::from(id.prefix()) +} + +impl SqlTypeConvert for NoteTag { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[allow(clippy::cast_sign_loss)] + Ok(NoteTag::new(raw as u32)) } + + #[inline(always)] fn to_raw_sql(self) -> Self::Raw { - i64::from(self.inner()) + self.as_u32() as i32 } } -impl SqlTypeConvert for NoteExecutionMode { +impl SqlTypeConvert for StorageSlotType { type Raw = i32; - type Error = DatabaseTypeConversionError; #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + Ok(match raw { - 0 => Self::Network, - 1 => Self::Local, - _ => return Err(DatabaseTypeConversionError(type_name::())), + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, }) } #[inline(always)] fn to_raw_sql(self) -> Self::Raw { match self { - NoteExecutionMode::Network => 0, - NoteExecutionMode::Local => 1, + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, } } } -impl SqlTypeConvert for NoteTag { - type Raw = i32; - type Error = DatabaseTypeConversionError; +impl SqlTypeConvert for StorageSlotName { + type Raw = String; - #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { - #[allow(clippy::cast_sign_loss)] - Ok(NoteTag::from(raw as u32)) + fn from_raw_sql(raw: Self::Raw) -> Result { + StorageSlotName::new(raw).map_err(Self::map_err) } - #[inline(always)] fn to_raw_sql(self) -> Self::Raw { - u32::from(self) as i32 + String::from(self) } } @@ -130,25 +152,15 @@ pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { } #[inline(always)] -pub(crate) fn raw_sql_to_nonce(raw: i64) -> u64 { +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { debug_assert!(raw >= 0); - raw as u64 + Felt::new(raw as u64) } #[inline(always)] pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { nonce.as_int() as i64 } -#[inline(always)] -pub(crate) fn raw_sql_to_slot(raw: i32) -> u8 { - debug_assert!(raw >= 0); - raw as u8 -} -#[inline(always)] -pub(crate) fn slot_to_raw_sql(slot: u8) -> i32 { - i32::from(slot) -} - #[inline(always)] pub(crate) fn raw_sql_to_fungible_delta(raw: i64) -> i64 { raw @@ -168,24 +180,6 @@ pub(crate) fn note_type_to_raw_sql(note_type: u8) -> i32 { i32::from(note_type) } -#[inline(always)] -pub(crate) fn raw_sql_to_execution_hint(raw: i64) -> u64 { - raw as u64 -} -#[inline(always)] -pub(crate) fn execution_hint_to_raw_sql(hint: u64) -> i64 { - hint as i64 -} - -#[inline(always)] -pub(crate) fn raw_sql_to_aux(raw: i64) -> Felt { - Felt::try_from(raw as u64).unwrap() -} -#[inline(always)] -pub(crate) fn aux_to_raw_sql(hint: Felt) -> i64 { - hint.inner() as i64 -} - #[inline(always)] pub(crate) fn raw_sql_to_idx(raw: i32) -> usize { raw as usize diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 9a0ad4fdd8..6568d5723e 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -8,8 +9,6 @@ use diesel::{ BoolExpressionMethods, ExpressionMethods, Insertable, - JoinOnDsl, - NullableExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, @@ -17,42 +16,95 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_proto as proto; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; -use miden_node_utils::limiter::{QueryParamAccountIdLimit, QueryParamLimiter}; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, + QueryParamAccountIdLimit, + QueryParamLimiter, +}; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ Account, AccountCode, AccountDelta, AccountId, AccountStorage, + AccountStorageHeader, NonFungibleDeltaAction, + StorageMap, StorageSlot, + StorageSlotContent, + StorageSlotName, + StorageSlotType, }; -use miden_objects::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; -use miden_objects::block::{BlockAccountUpdate, BlockNumber}; -use miden_objects::{Felt, Word}; +use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; +use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; +use miden_protocol::utils::{Deserializable, Serializable}; use crate::COMPONENT; use crate::db::models::conv::{ SqlTypeConvert, + network_account_id_to_prefix_sql, nonce_to_raw_sql, raw_sql_to_nonce, - raw_sql_to_slot, - slot_to_raw_sql, }; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; -/// Select the latest account details by account id from the DB using the given -/// [`SqliteConnection`]. +mod at_block; +pub(crate) use at_block::{ + select_account_header_with_storage_header_at_block, + select_account_vault_at_block, +}; + +#[cfg(test)] +mod tests; + +type StorageMapValueRow = (i64, String, Vec, Vec); + +// ACCOUNT CODE +// ================================================================================================ + +/// Select account code by its commitment hash from the `account_codes` table. +/// +/// # Returns +/// +/// The account code bytes if found, or `None` if no code exists with that commitment. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT code FROM account_codes WHERE code_commitment = ?1 +/// ``` +pub(crate) fn select_account_code_by_commitment( + conn: &mut SqliteConnection, + code_commitment: Word, +) -> Result>, DatabaseError> { + use schema::account_codes; + + let code_commitment_bytes = code_commitment.to_bytes(); + + let result: Option> = SelectDsl::select( + account_codes::table.filter(account_codes::code_commitment.eq(&code_commitment_bytes)), + account_codes::code, + ) + .first(conn) + .optional()?; + + Ok(result) +} + +// ACCOUNT RETRIEVAL +// ================================================================================================ + +/// Select account by ID from the DB using the given [`SqliteConnection`]. /// /// # Returns /// -/// The latest account details, or an error. +/// The latest account info, or an error. /// /// # Raw SQL /// @@ -60,46 +112,107 @@ use crate::errors::DatabaseError; /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// account_id = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account( conn: &mut SqliteConnection, account_id: AccountId, ) -> Result { - let raw = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result::(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + + let summary: AccountSummary = raw.try_into()?; + + // Backfill account details from database + // For private accounts, we don't store full details in the database + let details = if account_id.has_public_state() { + Some(select_full_account(conn, account_id)?) + } else { + None + }; + + Ok(AccountInfo { summary, details }) +} + +/// Reconstruct full Account from database tables for the latest account state +/// +/// This function queries the database tables to reconstruct a complete Account object: +/// - Code from `account_codes` table +/// - Nonce and storage header from `accounts` table +/// - Storage map entries from `account_storage_map_values` table +/// - Vault from `account_vault_assets` table +/// +/// # Note +/// +/// A stop-gap solution to retain store API and construct `AccountInfo` types. +/// The function should ultimately be removed, and any queries be served from the +/// `State` which contains an `SmtForest` to serve the latest and most recent +/// historical data. +// TODO: remove eventually once refactoring is complete +fn select_full_account( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + // Get account metadata (nonce, code_commitment) and code in a single join query + let (nonce, code_bytes): (Option, Vec) = SelectDsl::select( + schema::accounts::table.inner_join(schema::account_codes::table), + (schema::accounts::nonce, schema::account_codes::code), ) .filter(schema::accounts::account_id.eq(account_id.to_bytes())) - .get_result::<(AccountRaw, Option>)>(conn) + .filter(schema::accounts::is_latest.eq(true)) + .get_result(conn) .optional()? .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; - let info = AccountWithCodeRawJoined::from(raw).try_into()?; - Ok(info) + + let nonce = raw_sql_to_nonce(nonce.ok_or_else(|| { + DatabaseError::DataCorrupted(format!("No nonce found for account {account_id}")) + })?); + + let code = AccountCode::read_from_bytes(&code_bytes)?; + + // Reconstruct storage using existing helper function + let storage = select_latest_account_storage(conn, account_id)?; + + // Reconstruct vault from account_vault_assets table + let vault_entries: Vec<(Vec, Option>)> = SelectDsl::select( + schema::account_vault_assets::table, + (schema::account_vault_assets::vault_key, schema::account_vault_assets::asset), + ) + .filter(schema::account_vault_assets::account_id.eq(account_id.to_bytes())) + .filter(schema::account_vault_assets::is_latest.eq(true)) + .load(conn)?; + + let mut assets = Vec::new(); + for (_key_bytes, maybe_asset_bytes) in vault_entries { + if let Some(asset_bytes) = maybe_asset_bytes { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + let vault = AssetVault::new(&assets)?; + + Ok(Account::new(account_id, vault, storage, code, nonce, None)?) } -/// Select the latest account details by account ID prefix from the DB using the given -/// [`SqliteConnection`] This method is meant to be used by the network transaction builder. Because -/// network notes get matched through accounts through the account's 30-bit prefix, it is possible -/// that multiple accounts match against a single prefix. In this scenario, the first account is -/// returned. +/// Select the latest account info by account ID prefix from the DB using the given +/// [`SqliteConnection`]. Meant to be used by the network transaction builder. +/// Because network notes get matched through accounts through the account's 30-bit prefix, it is +/// possible that multiple accounts match against a single prefix. In this scenario, the first +/// account is returned. /// /// # Returns /// -/// The latest account details, `None` if the account was not found, or an error. +/// The latest account info, `None` if the account was not found, or an error. /// /// # Raw SQL /// @@ -107,40 +220,34 @@ pub(crate) fn select_account( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// network_account_id_prefix = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account_by_id_prefix( conn: &mut SqliteConnection, id_prefix: u32, ) -> Result, DatabaseError> { - let maybe_info = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) - .get_result::<(AccountRaw, Option>)>(conn) - .optional() - .map_err(DatabaseError::Diesel)?; - - let result: Result, DatabaseError> = maybe_info - .map(AccountWithCodeRawJoined::from) - .map(std::convert::TryInto::::try_into) - .transpose(); - - result + let maybe_summary = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) + .get_result::(conn) + .optional() + .map_err(DatabaseError::Diesel)?; + + match maybe_summary { + None => Ok(None), + Some(raw) => { + let summary: AccountSummary = raw.try_into()?; + let account_id = summary.account_id; + // Backfill account details from database + let details = select_full_account(conn, account_id).ok(); + Ok(Some(AccountInfo { summary, details })) + }, + } } /// Select all account commitments from the DB using the given [`SqliteConnection`]. @@ -157,6 +264,8 @@ pub(crate) fn select_account_by_id_prefix( /// account_commitment /// FROM /// accounts +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -167,6 +276,7 @@ pub(crate) fn select_all_account_commitments( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) + .filter(schema::accounts::is_latest.eq(true)) .order_by(schema::accounts::block_num.asc()) .load::<(Vec, Vec)>(conn)?; @@ -177,6 +287,47 @@ pub(crate) fn select_all_account_commitments( )) } +/// Select all account IDs that have public state. +/// +/// This filters accounts in-memory after loading only the account IDs (not commitments), +/// which is more efficient than loading full commitments when only IDs are needed. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// account_id +/// FROM +/// accounts +/// WHERE +/// is_latest = 1 +/// ORDER BY +/// block_num ASC +/// ``` +pub(crate) fn select_all_public_account_ids( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + // We could technically use a `LIKE` constraint for both postgres and sqlite backends, + // but diesel doesn't expose that. + let raw: Vec> = + SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::>(conn)?; + + Result::from_iter( + raw.into_iter() + .map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }) + .filter_map(|result| match result { + Ok(id) if id.has_public_state() => Some(Ok(id)), + Ok(_) => None, + Err(e) => Some(Err(e)), + }), + ) +} + /// Select account vault assets within a block range (inclusive). /// /// # Parameters @@ -212,9 +363,8 @@ pub(crate) fn select_account_vault_assets( use schema::account_vault_assets as t; // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details - const MAX_PAYLOAD_BYTES: usize = 2 * 1024 * 1024; // 2 MB const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::(); // key + asset + block_num - const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); @@ -241,7 +391,7 @@ pub(crate) fn select_account_vault_assets( // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() >= MAX_ROWS + && raw.len() > MAX_ROWS { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -319,16 +469,11 @@ pub fn select_accounts_by_block_range( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -336,23 +481,106 @@ pub fn select_accounts_by_block_range( pub(crate) fn select_all_accounts( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { - let accounts_raw = QueryDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .load::<(AccountRaw, Option>)>(conn)?; - let account_infos = vec_raw_try_into::( - accounts_raw.into_iter().map(AccountWithCodeRawJoined::from), - )?; + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::(conn)?; + + let summaries: Vec = vec_raw_try_into(raw)?; + + // Backfill account details from database + let account_infos = summaries + .into_iter() + .map(|summary| { + let account_id = summary.account_id; + let details = select_full_account(conn, account_id).ok(); + AccountInfo { summary, details } + }) + .collect(); + Ok(account_infos) } +/// Returns network account IDs within the specified block range (based on account creation +/// block). +/// +/// The function may return fewer accounts than exist in the range if the result would exceed +/// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is +/// truncated at a block boundary to ensure all accounts from included blocks are returned. +/// +/// # Returns +/// +/// A tuple containing: +/// - A vector of network account IDs. +/// - The last block number that was fully included in the result. When truncated, this will be less +/// than the requested range end. +pub(crate) fn select_all_network_account_ids( + conn: &mut SqliteConnection, + block_range: RangeInclusive, +) -> Result<(Vec, BlockNumber), DatabaseError> { + const ROW_OVERHEAD_BYTES: usize = AccountId::SERIALIZED_SIZE; + const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + + const _: () = assert!( + MAX_ROWS > miden_protocol::MAX_ACCOUNTS_PER_BLOCK, + "Block pagination limit must exceed maximum block capacity to uphold assumed logic invariant" + ); + + if block_range.is_empty() { + return Err(DatabaseError::InvalidBlockRange { + from: *block_range.start(), + to: *block_range.end(), + }); + } + + let account_ids_raw: Vec<(Vec, i64)> = Box::new( + QueryDsl::select( + schema::accounts::table + .filter(schema::accounts::network_account_id_prefix.is_not_null()) + .filter(schema::accounts::is_latest.eq(true)), + (schema::accounts::account_id, schema::accounts::created_at_block), + ) + .filter( + schema::accounts::block_num + .between(block_range.start().to_raw_sql(), block_range.end().to_raw_sql()), + ) + .order(schema::accounts::created_at_block.asc()) + .limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64")), + ) + .load::<(Vec, i64)>(conn)?; + + if account_ids_raw.len() > MAX_ROWS { + // SAFETY: We just checked that len > MAX_ROWS, so the vec is not empty. + let last_created_at_block = account_ids_raw.last().expect("vec is not empty").1; + + let account_ids = account_ids_raw + .into_iter() + .take_while(|(_, created_at_block)| *created_at_block != last_created_at_block) + .map(|(id_bytes, _)| { + AccountId::read_from_bytes(&id_bytes).map_err(DatabaseError::DeserializationError) + }) + .collect::, DatabaseError>>()?; + + let last_block_included = + BlockNumber::from_raw_sql(last_created_at_block.saturating_sub(1))?; + + Ok((account_ids, last_block_included)) + } else { + let account_ids = account_ids_raw + .into_iter() + .map(|(id_bytes, _)| { + AccountId::read_from_bytes(&id_bytes).map_err(DatabaseError::DeserializationError) + }) + .collect::, DatabaseError>>()?; + + Ok((account_ids, *block_range.end())) + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapValue { pub block_num: BlockNumber, - pub slot_index: u8, + pub slot_name: StorageSlotName, pub key: Word, pub value: Word, } @@ -366,11 +594,11 @@ pub struct StorageMapValuesPage { } impl StorageMapValue { - pub fn from_raw_row(row: (i64, i32, Vec, Vec)) -> Result { - let (block_num, slot_index, key, value) = row; + pub fn from_raw_row(row: StorageMapValueRow) -> Result { + let (block_num, slot_name, key, value) = row; Ok(Self { block_num: BlockNumber::from_raw_sql(block_num)?, - slot_index: raw_sql_to_slot(slot_index), + slot_name: StorageSlotName::from_raw_sql(slot_name)?, key: Word::read_from_bytes(&key)?, value: Word::read_from_bytes(&value)?, }) @@ -381,7 +609,7 @@ impl StorageMapValue { /// /// # Returns /// -/// A vector of tuples containing `(slot, key, value, is_latest_update)` for the given account. +/// A vector of tuples containing `(slot, key, value, is_latest)` for the given account. /// Each row contains one of: /// /// - the historical value for a slot and key specifically on block `block_to` @@ -427,10 +655,9 @@ pub(crate) fn select_account_storage_map_values( // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details - pub const MAX_PAYLOAD_BYTES: usize = 2 * 1024 * 1024; // 2 MB pub const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); @@ -443,8 +670,8 @@ pub(crate) fn select_account_storage_map_values( }); } - let raw: Vec<(i64, i32, Vec, Vec)> = - SelectDsl::select(t::table, (t::block_num, t::slot, t::key, t::value)) + let raw: Vec = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) .filter( t::account_id .eq(account_id.to_bytes()) @@ -458,7 +685,7 @@ pub(crate) fn select_account_storage_map_values( // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() >= MAX_ROWS + && raw.len() > MAX_ROWS { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -481,6 +708,76 @@ pub(crate) fn select_account_storage_map_values( Ok(StorageMapValuesPage { last_block_included, values }) } +/// Select latest account storage by querying `accounts.storage_header` where `is_latest=true` +/// and reconstructing full storage from the header plus map values from +/// `account_storage_map_values`. +pub(crate) fn select_latest_account_storage( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + + // Query storage header blob for this account where is_latest = true + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::is_latest.eq(true)) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all latest map values for this account + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::is_latest.eq(true)) + .load(conn)?; + + // Group map values by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for (slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} + +// ACCOUNT MUTATION +// ================================================================================================ + #[derive(Queryable, Selectable)] #[diesel(table_name = crate::db::schema::account_vault_assets)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] @@ -502,73 +799,6 @@ impl TryFrom for AccountVaultValue { } } -#[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] -#[diesel(table_name = schema::accounts)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct AccountRaw { - pub account_id: Vec, - pub account_commitment: Vec, - pub block_num: i64, - pub storage: Option>, - pub vault: Option>, - pub nonce: Option, -} - -#[derive(Debug, Clone, QueryableByName)] -pub struct AccountWithCodeRawJoined { - #[diesel(embed)] - pub account: AccountRaw, - #[diesel(embed)] - pub code: Option>, -} - -impl From<(AccountRaw, Option>)> for AccountWithCodeRawJoined { - fn from((account, code): (AccountRaw, Option>)) -> Self { - Self { account, code } - } -} - -impl TryInto for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result { - use proto::domain::account::{AccountInfo, AccountSummary}; - - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - let account_commitment = Word::read_from_bytes(&self.account.account_commitment[..])?; - let block_num = BlockNumber::from_raw_sql(self.account.block_num)?; - let summary = AccountSummary { - account_id, - account_commitment, - block_num, - }; - let maybe_account = self.try_into()?; - Ok(AccountInfo { summary, details: maybe_account }) - } -} - -impl TryInto> for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result, Self::Error> { - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - - let details = if let (Some(vault), Some(storage), Some(nonce), Some(code)) = - (self.account.vault, self.account.storage, self.account.nonce, self.code) - { - let vault = AssetVault::read_from_bytes(&vault)?; - let storage = AccountStorage::read_from_bytes(&storage)?; - let code = AccountCode::read_from_bytes(&code)?; - let nonce = raw_sql_to_nonce(nonce); - let nonce = Felt::new(nonce); - let account = Account::new_unchecked(account_id, vault, storage, code, nonce, None); - Some(account) - } else { - // a private account - None - }; - Ok(details) - } -} - #[derive(Debug, Clone, PartialEq, Eq, Selectable, Queryable, QueryableByName)] #[diesel(table_name = schema::accounts)] #[diesel(check_for_backend(Sqlite))] @@ -595,8 +825,8 @@ impl TryInto for AccountSummaryRaw { /// Insert an account vault asset row into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest_update=true` for the new row and update any existing -/// row with the same `(account_id, vault_key)` tuple to `is_latest_update=false`. +/// Sets `is_latest=true` for the new row and updates any existing +/// row with the same `(account_id, vault_key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -612,16 +842,16 @@ pub(crate) fn insert_account_vault_asset( diesel::Connection::transaction(conn, |conn| { // First, update any existing rows with the same (account_id, vault_key) to set - // is_latest_update=false + // is_latest=false let vault_key: Word = vault_key.into(); let update_count = diesel::update(schema::account_vault_assets::table) .filter( schema::account_vault_assets::account_id .eq(&account_id.to_bytes()) .and(schema::account_vault_assets::vault_key.eq(&vault_key.to_bytes())) - .and(schema::account_vault_assets::is_latest_update.eq(true)), + .and(schema::account_vault_assets::is_latest.eq(true)), ) - .set(schema::account_vault_assets::is_latest_update.eq(false)) + .set(schema::account_vault_assets::is_latest.eq(false)) .execute(conn)?; // Insert the new latest row @@ -635,8 +865,8 @@ pub(crate) fn insert_account_vault_asset( /// Insert an account storage map value into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest_update=true` for the new row and update any existing -/// row with the same `(account_id, slot, key)` tuple to `is_latest_update=false`. +/// Sets `is_latest=true` for the new row and updates any existing +/// row with the same `(account_id, slot_index, key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -645,34 +875,34 @@ pub(crate) fn insert_account_storage_map_value( conn: &mut SqliteConnection, account_id: AccountId, block_num: BlockNumber, - slot: u8, + slot_name: StorageSlotName, key: Word, value: Word, ) -> Result { let account_id = account_id.to_bytes(); let key = key.to_bytes(); let value = value.to_bytes(); - let slot = slot_to_raw_sql(slot); + let slot_name = slot_name.to_raw_sql(); let block_num = block_num.to_raw_sql(); let update_count = diesel::update(schema::account_storage_map_values::table) .filter( schema::account_storage_map_values::account_id .eq(&account_id) - .and(schema::account_storage_map_values::slot.eq(slot)) + .and(schema::account_storage_map_values::slot_name.eq(&slot_name)) .and(schema::account_storage_map_values::key.eq(&key)) - .and(schema::account_storage_map_values::is_latest_update.eq(true)), + .and(schema::account_storage_map_values::is_latest.eq(true)), ) - .set(schema::account_storage_map_values::is_latest_update.eq(false)) + .set(schema::account_storage_map_values::is_latest.eq(false)) .execute(conn)?; let record = AccountStorageMapRowInsert { account_id, key, value, - slot, + slot_name, block_num, - is_latest_update: true, + is_latest: true, }; let insert_count = diesel::insert_into(schema::account_storage_map_values::table) .values(record) @@ -693,46 +923,42 @@ pub(crate) fn upsert_accounts( accounts: &[BlockAccountUpdate], block_num: BlockNumber, ) -> Result { - use proto::domain::account::NetworkAccountPrefix; - - fn select_details_stmt( - conn: &mut SqliteConnection, - account_id: AccountId, - ) -> Result, DatabaseError> { - let account_id = account_id.to_bytes(); - let accounts = SelectDsl::select( - schema::accounts::table.left_join( - schema::account_codes::table.on(schema::accounts::code_commitment - .eq(schema::account_codes::code_commitment.nullable())), - ), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::account_id.eq(account_id)) - .get_results::<(AccountRaw, Option>)>(conn)?; - - // SELECT .. FROM accounts LEFT JOIN account_codes - // ON accounts.code_commitment == account_codes.code_commitment - - let accounts = Result::from_iter(accounts.into_iter().filter_map(|x| { - let account_with_code = AccountWithCodeRawJoined::from(x); - account_with_code.try_into().transpose() - }))?; - Ok(accounts) - } + use proto::domain::account::NetworkAccountId; let mut count = 0; for update in accounts { let account_id = update.account_id(); - // Extract the 30-bit prefix to provide easy look ups for NTB - // Do not store prefix for accounts that are not network - let network_account_id_prefix = if account_id.is_network() { - Some(NetworkAccountPrefix::try_from(account_id)?) + let account_id_bytes = account_id.to_bytes(); + let block_num_raw = block_num.to_raw_sql(); + + let network_account_id = if account_id.is_network() { + Some(NetworkAccountId::try_from(account_id)?) } else { None }; - let full_account: Option = match update.details() { - AccountUpdateDetails::Private => None, + // Preserve the original creation block when updating existing accounts. + let created_at_block = QueryDsl::select( + schema::accounts::table.filter( + schema::accounts::account_id + .eq(&account_id_bytes) + .and(schema::accounts::is_latest.eq(true)), + ), + schema::accounts::created_at_block, + ) + .first::(conn) + .optional() + .map_err(DatabaseError::Diesel)? + .unwrap_or(block_num_raw); + + // NOTE: we collect storage / asset inserts to apply them only after the account row is + // written. The storage and vault tables have FKs pointing to `accounts (account_id, + // block_num)`, so inserting them earlier would violate those constraints when inserting a + // brand-new account. + let (full_account, pending_storage_inserts, pending_asset_inserts) = match update.details() + { + AccountUpdateDetails::Private => (None, vec![], vec![]), + AccountUpdateDetails::Delta(delta) if delta.is_full_state() => { let account = Account::try_from(delta)?; debug_assert_eq!(account_id, account.id()); @@ -744,69 +970,60 @@ pub(crate) fn upsert_accounts( }); } - for (slot_idx, slot) in account.storage().slots().iter().enumerate() { - match slot { - StorageSlot::Value(_) => {}, - StorageSlot::Map(storage_map) => { - for (key, value) in storage_map.entries() { - // SAFETY: We can safely unwrap the conversion to u8 because - // accounts have a limit of 255 storage elements - insert_account_storage_map_value( - conn, - account_id, - block_num, - u8::try_from(slot_idx).unwrap(), - *key, - *value, - )?; - } - }, + // collect storage-map inserts to apply after account upsert + let mut storage = Vec::new(); + for slot in account.storage().slots() { + if let StorageSlotContent::Map(storage_map) = slot.content() { + for (key, value) in storage_map.entries() { + storage.push((account_id, slot.name().clone(), *key, *value)); + } } } - Some(account) + // collect vault-asset inserts to apply after account upsert + let mut assets = Vec::new(); + for asset in account.vault().assets() { + // Only insert assets with non-zero values for fungible assets + let should_insert = match asset { + Asset::Fungible(fungible) => fungible.amount() > 0, + Asset::NonFungible(_) => true, + }; + if should_insert { + assets.push((account_id, asset.vault_key(), Some(asset))); + } + } + + (Some(account), storage, assets) }, + AccountUpdateDetails::Delta(delta) => { - let mut rows = select_details_stmt(conn, account_id)?.into_iter(); - let Some(account) = rows.next() else { - return Err(DatabaseError::AccountNotFoundInDb(account_id)); - }; + // Reconstruct the full account from database tables + let account = select_full_account(conn, account_id)?; - // --- process storage map updates ---------------------------- + // --- collect storage map updates ---------------------------- - for (&slot, map_delta) in delta.storage().maps() { + let mut storage = Vec::new(); + for (slot_name, map_delta) in delta.storage().maps() { for (key, value) in map_delta.entries() { - insert_account_storage_map_value( - conn, - account_id, - block_num, - slot, - (*key).into(), - *value, - )?; + storage.push((account_id, slot_name.clone(), (*key).into(), *value)); } } // apply delta to the account; we need to do this before we process asset updates // because we currently need to get the current value of fungible assets from the // account - let account = apply_delta(account, delta, &update.final_state_commitment())?; + let account_after = apply_delta(account, delta, &update.final_state_commitment())?; // --- process asset updates ---------------------------------- + let mut assets = Vec::new(); + for (faucet_id, _) in delta.vault().fungible().iter() { - let current_amount = account.vault().get_balance(*faucet_id).unwrap(); + let current_amount = account_after.vault().get_balance(*faucet_id).unwrap(); let asset: Asset = FungibleAsset::new(*faucet_id, current_amount)?.into(); - let asset_update_or_removal = - if current_amount == 0 { None } else { Some(asset) }; - - insert_account_vault_asset( - conn, - account.id(), - block_num, - asset.vault_key(), - asset_update_or_removal, - )?; + let update_or_remove = if current_amount == 0 { None } else { Some(asset) }; + + assets.push((account_id, asset.vault_key(), update_or_remove)); } for (asset, delta_action) in delta.vault().non_fungible().iter() { @@ -814,16 +1031,10 @@ pub(crate) fn upsert_accounts( NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), NonFungibleDeltaAction::Remove => None, }; - insert_account_vault_asset( - conn, - account.id(), - block_num, - asset.vault_key(), - asset_update, - )?; + assets.push((account_id, asset.vault_key(), asset_update)); } - Some(account) + (Some(account_after), storage, assets) }, }; @@ -839,31 +1050,48 @@ pub(crate) fn upsert_accounts( .execute(conn)?; } + // mark previous rows as non-latest and insert NEW account row + diesel::update(schema::accounts::table) + .filter( + schema::accounts::account_id + .eq(&account_id_bytes) + .and(schema::accounts::is_latest.eq(true)), + ) + .set(schema::accounts::is_latest.eq(false)) + .execute(conn)?; + let account_value = AccountRowInsert { - account_id: account_id.to_bytes(), - network_account_id_prefix: network_account_id_prefix - .map(NetworkAccountPrefix::to_raw_sql), + account_id: account_id_bytes, + network_account_id_prefix: network_account_id.map(network_account_id_to_prefix_sql), account_commitment: update.final_state_commitment().to_bytes(), - block_num: block_num.to_raw_sql(), + block_num: block_num_raw, nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), - storage: full_account.as_ref().map(|account| account.storage().to_bytes()), - vault: full_account.as_ref().map(|account| account.vault().to_bytes()), code_commitment: full_account .as_ref() .map(|account| account.code().commitment().to_bytes()), + // Store only the header (slot metadata + map roots), not full storage with map contents + storage_header: full_account + .as_ref() + .map(|account| account.storage().to_header().to_bytes()), + vault_root: full_account.as_ref().map(|account| account.vault().root().to_bytes()), + is_latest: true, + created_at_block, }; - let v = account_value.clone(); - let inserted = diesel::insert_into(schema::accounts::table) - .values(&v) - .on_conflict(schema::accounts::account_id) - .do_update() - .set(account_value) + diesel::insert_into(schema::accounts::table) + .values(&account_value) .execute(conn)?; - debug_assert_eq!(inserted, 1); + // insert pending storage map entries + for (acc_id, slot_name, key, value) in pending_storage_inserts { + insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; + } + + for (acc_id, vault_key, update) in pending_asset_inserts { + insert_account_vault_asset(conn, acc_id, block_num, vault_key, update)?; + } - count += inserted; + count += 1; } Ok(count) @@ -903,9 +1131,11 @@ pub(crate) struct AccountRowInsert { pub(crate) block_num: i64, pub(crate) account_commitment: Vec, pub(crate) code_commitment: Option>, - pub(crate) storage: Option>, - pub(crate) vault: Option>, pub(crate) nonce: Option, + pub(crate) storage_header: Option>, + pub(crate) vault_root: Option>, + pub(crate) is_latest: bool, + pub(crate) created_at_block: i64, } #[derive(Insertable, AsChangeset, Debug, Clone)] @@ -915,7 +1145,7 @@ pub(crate) struct AccountAssetRowInsert { pub(crate) block_num: i64, pub(crate) vault_key: Vec, pub(crate) asset: Option>, - pub(crate) is_latest_update: bool, + pub(crate) is_latest: bool, } impl AccountAssetRowInsert { @@ -924,7 +1154,7 @@ impl AccountAssetRowInsert { vault_key: &AssetVaultKey, block_num: BlockNumber, asset: Option, - is_latest_update: bool, + is_latest: bool, ) -> Self { let account_id = account_id.to_bytes(); let vault_key: Word = (*vault_key).into(); @@ -936,7 +1166,7 @@ impl AccountAssetRowInsert { block_num, vault_key, asset, - is_latest_update, + is_latest, } } } @@ -946,8 +1176,8 @@ impl AccountAssetRowInsert { pub(crate) struct AccountStorageMapRowInsert { pub(crate) account_id: Vec, pub(crate) block_num: i64, - pub(crate) slot: i32, + pub(crate) slot_name: String, pub(crate) key: Vec, pub(crate) value: Vec, - pub(crate) is_latest_update: bool, + pub(crate) is_latest: bool, } diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs new file mode 100644 index 0000000000..41ec035f3f --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -0,0 +1,164 @@ +use diesel::prelude::{Queryable, QueryableByName}; +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; +use miden_protocol::asset::Asset; +use miden_protocol::block::BlockNumber; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, FieldElement, Word}; + +use crate::db::models::conv::{SqlTypeConvert, raw_sql_to_nonce}; +use crate::db::schema; +use crate::errors::DatabaseError; + +// ACCOUNT HEADER +// ================================================================================================ + +#[derive(Debug, Clone, Queryable)] +struct AccountHeaderDataRaw { + code_commitment: Option>, + nonce: Option, + storage_header: Option>, + vault_root: Option>, +} + +/// Queries the account header for a specific account at a specific block number. +/// +/// This reconstructs the `AccountHeader` by reading from the `accounts` table: +/// - `account_id`, `nonce`, `code_commitment`, `storage_header`, `vault_root` +/// +/// Returns `None` if the account doesn't exist at that block. +/// +/// # Arguments +/// +/// * `conn` - Database connection +/// * `account_id` - The account ID to query +/// * `block_num` - The block number at which to query the account header +/// +/// # Returns +/// +/// * `Ok(Some((AccountHeader, AccountStorageHeader)))` - The headers if found +/// * `Ok(None)` - If account doesn't exist at that block +/// * `Err(DatabaseError)` - If there's a database error +pub(crate) fn select_account_header_with_storage_header_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::accounts; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + let account_data: Option = SelectDsl::select( + accounts::table + .filter(accounts::account_id.eq(&account_id_bytes)) + .filter(accounts::block_num.le(block_num_sql)) + .order(accounts::block_num.desc()) + .limit(1), + ( + accounts::code_commitment, + accounts::nonce, + accounts::storage_header, + accounts::vault_root, + ), + ) + .first(conn) + .optional()?; + + let Some(AccountHeaderDataRaw { + code_commitment: code_commitment_bytes, + nonce: nonce_raw, + storage_header: storage_header_blob, + vault_root: vault_root_bytes, + }) = account_data + else { + return Ok(None); + }; + + let storage_header = match &storage_header_blob { + Some(blob) => AccountStorageHeader::read_from_bytes(blob)?, + None => AccountStorageHeader::new(Vec::new())?, + }; + + let storage_commitment = storage_header.to_commitment(); + + let code_commitment = code_commitment_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + let nonce = nonce_raw.map_or(Felt::ZERO, raw_sql_to_nonce); + + let vault_root = vault_root_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + let account_header = + AccountHeader::new(account_id, nonce, vault_root, storage_commitment, code_commitment); + + Ok(Some((account_header, storage_header))) +} + +// ACCOUNT VAULT +// ================================================================================================ + +/// Query vault assets at a specific block by finding the most recent update for each `vault_key`. +/// +/// Uses a single raw SQL query with a subquery join: +/// ```sql +/// SELECT a.asset FROM account_vault_assets a +/// INNER JOIN ( +/// SELECT vault_key, MAX(block_num) as max_block +/// FROM account_vault_assets +/// WHERE account_id = ? AND block_num <= ? +/// GROUP BY vault_key +/// ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block +/// WHERE a.account_id = ? +/// ``` +pub(crate) fn select_account_vault_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use diesel::sql_types::{BigInt, Binary}; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + let entries: Vec>> = diesel::sql_query( + r" + SELECT a.asset FROM account_vault_assets a + INNER JOIN ( + SELECT vault_key, MAX(block_num) as max_block + FROM account_vault_assets + WHERE account_id = ? AND block_num <= ? + GROUP BY vault_key + ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block + WHERE a.account_id = ? + ", + ) + .bind::(&account_id_bytes) + .bind::(block_num_sql) + .bind::(&account_id_bytes) + .load::(conn)? + .into_iter() + .map(|row| row.asset) + .collect(); + + // Convert to assets, filtering out deletions (None values) + let mut assets = Vec::new(); + for asset_bytes in entries.into_iter().flatten() { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + + Ok(assets) +} + +#[derive(QueryableByName)] +struct AssetRow { + #[diesel(sql_type = diesel::sql_types::Nullable)] + asset: Option>, +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs new file mode 100644 index 0000000000..9206311a15 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -0,0 +1,792 @@ +//! Tests for the `accounts` module, specifically for account storage and historical queries. + +use std::collections::BTreeMap; + +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ + BoolExpressionMethods, + Connection, + ExpressionMethods, + OptionalExtension, + QueryDsl, + RunQueryDsl, +}; +use diesel_migrations::MigrationHarness; +use miden_node_utils::fee::test_fee_params; +use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ + Account, + AccountBuilder, + AccountComponent, + AccountDelta, + AccountId, + AccountIdVersion, + AccountStorage, + AccountStorageHeader, + AccountStorageMode, + AccountType, + StorageMap, + StorageSlot, + StorageSlotName, + StorageSlotType, +}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::code_builder::CodeBuilder; + +use super::*; +use crate::db::migrations::MIGRATIONS; +use crate::db::models::conv::SqlTypeConvert; +use crate::db::schema; +use crate::errors::DatabaseError; + +fn setup_test_db() -> SqliteConnection { + let mut conn = + SqliteConnection::establish(":memory:").expect("Failed to create in-memory database"); + + conn.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations"); + + conn +} + +/// Test helper: reconstructs account storage at a given block from DB. +/// +/// Reads `accounts.storage_header` and `account_storage_map_values` to reconstruct +/// the full `AccountStorage` at the specified block. +fn reconstruct_account_storage_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Query storage header blob for this account at or before this block + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::block_num.le(block_num_sql)) + .order(schema::accounts::block_num.desc()) + .limit(1) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + return Ok(AccountStorage::new(Vec::new())?); + }; + + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all map values for this account up to and including this block. + let map_values: Vec<(i64, String, Vec, Vec)> = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) + .load(conn)?; + + // For each (slot_name, key) pair, keep only the latest entry + let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + for (_, slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + latest_map_entries.entry((slot_name, key)).or_insert(value); + } + + // Group entries by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for ((slot_name, key), value) in latest_map_entries { + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} + +fn create_test_account_with_storage() -> (Account, AccountId) { + // Create a simple public account with one value storage slot + let account_id = AccountId::dummy( + [1u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let storage_value = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let component_storage = vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + (account, account_id) +} + +fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { + use crate::db::schema::block_headers; + + let block_header = BlockHeader::new( + 1_u8.into(), + Word::default(), + block_num, + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + SecretKey::new().public_key(), + test_fee_params(), + 0_u8.into(), + ); + + diesel::insert_into(block_headers::table) + .values(( + block_headers::block_num.eq(i64::from(block_num.as_u32())), + block_headers::block_header.eq(block_header.to_bytes()), + )) + .execute(conn) + .expect("Failed to insert block header"); +} + +// ACCOUNT HEADER AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_header_at_block_returns_none_for_nonexistent() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let account_id = AccountId::dummy( + [99u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + // Query for a non-existent account + let result = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(result.is_none(), "Should return None for non-existent account"); +} + +#[test] +fn test_select_account_header_at_block_returns_correct_header() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query the account header + let (header, _storage_header) = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed") + .expect("Header should exist"); + + assert_eq!(header.id(), account_id, "Account ID should match"); + assert_eq!(header.nonce(), account.nonce(), "Nonce should match"); + assert_eq!( + header.code_commitment(), + account.code().commitment(), + "Code commitment should match" + ); +} + +#[test] +fn test_select_account_header_at_block_historical_query() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Insert the account at block 1 + let nonce_1 = account.nonce(); + let delta_1 = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_1 = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Query at block 1 - should return the account + let (header_1, _) = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num_1) + .expect("Query should succeed") + .expect("Header should exist at block 1"); + + assert_eq!(header_1.nonce(), nonce_1, "Nonce at block 1 should match"); + + // Query at block 2 - should return the same account (most recent before block 2) + let (header_2, _) = + select_account_header_with_storage_header_at_block(&mut conn, account_id, block_num_2) + .expect("Query should succeed") + .expect("Header should exist at block 2"); + + assert_eq!(header_2.nonce(), nonce_1, "Nonce at block 2 should match block 1"); +} + +// ACCOUNT VAULT AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_vault_at_block_empty() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert account without vault assets + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query vault - should return empty (the test account has no assets) + let assets = select_account_vault_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(assets.is_empty(), "Account should have no assets"); +} + +// ACCOUNT STORAGE AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_upsert_accounts_inserts_storage_header() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment_original = account.storage().to_commitment(); + let storage_slots_len = account.storage().slots().len(); + let account_commitment = account.commitment(); + + // Create full state delta from the account + let delta = AccountDelta::try_from(account).unwrap(); + assert!(delta.is_full_state(), "Delta should be full state"); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + // Upsert account + let result = upsert_accounts(&mut conn, &[account_update], block_num); + assert!(result.is_ok(), "upsert_accounts failed: {:?}", result.err()); + assert_eq!(result.unwrap(), 1, "Expected 1 account to be inserted"); + + // Query storage header back + let queried_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query storage header"); + + // Verify storage commitment matches + assert_eq!( + queried_storage.to_commitment(), + storage_commitment_original, + "Storage commitment mismatch" + ); + + // Verify number of slots matches + assert_eq!(queried_storage.slots().len(), storage_slots_len, "Storage slots count mismatch"); + + // Verify exactly 1 latest account with storage exists + let header_count: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::storage_header.is_not_null()) + .count() + .get_result(&mut conn) + .expect("Failed to count accounts with storage"); + + assert_eq!(header_count, 1, "Expected exactly 1 latest account with storage"); +} + +#[test] +fn test_upsert_accounts_updates_is_latest_flag() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 and 2 + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Save storage commitment before moving account + let storage_commitment_1 = account.storage().to_commitment(); + let account_commitment_1 = account.commitment(); + + // First update with original account - full state delta + let delta_1 = AccountDelta::try_from(account).unwrap(); + + let account_update_1 = BlockAccountUpdate::new( + account_id, + account_commitment_1, + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Create modified account with different storage value + let storage_value_modified = + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]); + let component_storage_modified = + vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value_modified)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component_2 = AccountComponent::new(account_component_code, component_storage_modified) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account_2 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component_2) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_commitment_2 = account_2.storage().to_commitment(); + let account_commitment_2 = account_2.commitment(); + + // Second update with modified account - full state delta + let delta_2 = AccountDelta::try_from(account_2).unwrap(); + + let account_update_2 = BlockAccountUpdate::new( + account_id, + account_commitment_2, + AccountUpdateDetails::Delta(delta_2), + ); + + upsert_accounts(&mut conn, &[account_update_2], block_num_2).expect("Second upsert failed"); + + // Verify 2 total account rows exist (both historical records) + let total_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .count() + .get_result(&mut conn) + .expect("Failed to count total accounts"); + + assert_eq!(total_accounts, 2, "Expected 2 total account records"); + + // Verify only 1 is marked as latest + let latest_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .count() + .get_result(&mut conn) + .expect("Failed to count latest accounts"); + + assert_eq!(latest_accounts, 1, "Expected exactly 1 latest account"); + + // Verify latest storage matches second update + let latest_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query latest storage"); + + assert_eq!( + latest_storage.to_commitment(), + storage_commitment_2, + "Latest storage should match second update" + ); + + // Verify historical query returns first update + let storage_at_block_1 = + reconstruct_account_storage_at_block(&mut conn, account_id, block_num_1) + .expect("Failed to query storage at block 1"); + + assert_eq!( + storage_at_block_1.to_commitment(), + storage_commitment_1, + "Storage at block 1 should match first update" + ); +} + +#[test] +fn test_upsert_accounts_with_multiple_storage_slots() { + let mut conn = setup_test_db(); + + // Create account with 3 storage slots + let account_id = AccountId::dummy( + [2u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let slot_value_1 = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let slot_value_2 = Word::from([Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)]); + let slot_value_3 = Word::from([Felt::new(9), Felt::new(10), Felt::new(11), Felt::new(12)]); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), slot_value_1), + StorageSlot::with_value(StorageSlotName::mock(1), slot_value_2), + StorageSlot::with_value(StorageSlotName::mock(2), slot_value_3), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with multiple storage slots failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch" + ); + + // Note: Auth component adds 1 storage slot, so 3 component slots + 1 auth = 4 total + assert_eq!( + queried_storage.slots().len(), + 4, + "Expected 4 storage slots (3 component + 1 auth)" + ); + + // The storage commitment matching proves that all values are correctly preserved. + // We don't check individual slot values by index since slot ordering may vary. +} + +#[test] +fn test_upsert_accounts_with_empty_storage() { + let mut conn = setup_test_db(); + + // Create account with no component storage slots (only auth slot) + let account_id = AccountId::dummy( + [3u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with empty storage failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch for empty storage" + ); + + // Note: Auth component adds 1 storage slot, so even "empty" accounts have 1 slot + assert_eq!(queried_storage.slots().len(), 1, "Expected 1 storage slot (auth component)"); + + // Verify the storage header blob exists in database + let storage_header_exists: Option = SelectDsl::select( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)), + schema::accounts::storage_header.is_not_null(), + ) + .first(&mut conn) + .optional() + .expect("Failed to check storage header existence"); + + assert_eq!( + storage_header_exists, + Some(true), + "Storage header blob should exist even for empty storage" + ); +} + +// VAULT AT BLOCK HISTORICAL QUERY TESTS +// ================================================================================================ + +/// Tests that querying vault at an older block returns the correct historical state, +/// even when the same `vault_key` has been updated in later blocks. +/// +/// Focuses on deduplication logic that relies on ordering by (`vault_key` ASC and `block_num` +/// DESC). +#[test] +fn test_select_account_vault_at_block_historical_with_updates() { + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + // Faucet ID is needed for creating FungibleAssets + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + let block_3 = BlockNumber::from_epoch(2); + + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + insert_block_header(&mut conn, block_3); + + // Insert account at block 1 + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + // Insert vault asset at block 1: vault_key_1 = 1000 tokens + let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(1), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset_v1 = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); + + insert_account_vault_asset(&mut conn, account_id, block_1, vault_key_1, Some(asset_v1)) + .expect("insert vault asset failed"); + + // Update vault asset at block 2: vault_key_1 = 2000 tokens (updated value) + let asset_v2 = Asset::Fungible(FungibleAsset::new(faucet_id, 2000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_1, Some(asset_v2)) + .expect("insert vault asset update failed"); + + // Add a second vault_key at block 2 + let vault_key_2 = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(2), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset_key2 = Asset::Fungible(FungibleAsset::new(faucet_id, 500).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_2, Some(asset_key2)) + .expect("insert second vault asset failed"); + + // Update vault_key_1 again at block 3: vault_key_1 = 3000 tokens + let asset_v3 = Asset::Fungible(FungibleAsset::new(faucet_id, 3000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_3, vault_key_1, Some(asset_v3)) + .expect("insert vault asset update 2 failed"); + + // Query at block 1: should only see vault_key_1 with 1000 tokens + let assets_at_block_1 = select_account_vault_at_block(&mut conn, account_id, block_1) + .expect("Query at block 1 should succeed"); + + assert_eq!(assets_at_block_1.len(), 1, "Should have 1 asset at block 1"); + assert_matches!(&assets_at_block_1[0], Asset::Fungible(f) if f.amount() == 1000); + + // Query at block 2: should see vault_key_1 with 2000 tokens AND vault_key_2 with 500 tokens + let assets_at_block_2 = select_account_vault_at_block(&mut conn, account_id, block_2) + .expect("Query at block 2 should succeed"); + + assert_eq!(assets_at_block_2.len(), 2, "Should have 2 assets at block 2"); + + // Find the amounts (order may vary) + let amounts: Vec = assets_at_block_2 + .iter() + .map(|a| assert_matches!(a, Asset::Fungible(f) => f.amount())) + .collect(); + + assert!(amounts.contains(&2000), "Block 2 should have vault_key_1 with 2000 tokens"); + assert!(amounts.contains(&500), "Block 2 should have vault_key_2 with 500 tokens"); + + // Query at block 3: should see vault_key_1 with 3000 tokens AND vault_key_2 with 500 tokens + let assets_at_block_3 = select_account_vault_at_block(&mut conn, account_id, block_3) + .expect("Query at block 3 should succeed"); + + assert_eq!(assets_at_block_3.len(), 2, "Should have 2 assets at block 3"); + + let amounts: Vec = assets_at_block_3 + .iter() + .map(|a| assert_matches!(a, Asset::Fungible(f) => f.amount())) + .collect(); + + assert!(amounts.contains(&3000), "Block 3 should have vault_key_1 with 3000 tokens"); + assert!(amounts.contains(&500), "Block 3 should have vault_key_2 with 500 tokens"); +} + +/// Tests that deleted vault assets (asset = None) are correctly excluded from results, +/// and that the deduplication handles deletion entries properly. +#[test] +fn test_select_account_vault_at_block_with_deletion() { + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + // Faucet ID is needed for creating FungibleAssets + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + let block_3 = BlockNumber::from_epoch(2); + + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + insert_block_header(&mut conn, block_3); + + // Insert account at block 1 + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + // Insert vault asset at block 1 + let vault_key = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(1), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); + + insert_account_vault_asset(&mut conn, account_id, block_1, vault_key, Some(asset)) + .expect("insert vault asset failed"); + + // Delete the vault asset at block 2 (insert with asset = None) + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key, None) + .expect("delete vault asset failed"); + + // Re-add the vault asset at block 3 with different amount + let asset_v3 = Asset::Fungible(FungibleAsset::new(faucet_id, 2000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_3, vault_key, Some(asset_v3)) + .expect("re-add vault asset failed"); + + // Query at block 1: should see the asset + let assets_at_block_1 = select_account_vault_at_block(&mut conn, account_id, block_1) + .expect("Query at block 1 should succeed"); + assert_eq!(assets_at_block_1.len(), 1, "Should have 1 asset at block 1"); + + // Query at block 2: should NOT see the asset (it was deleted) + let assets_at_block_2 = select_account_vault_at_block(&mut conn, account_id, block_2) + .expect("Query at block 2 should succeed"); + assert!(assets_at_block_2.is_empty(), "Should have no assets at block 2 (deleted)"); + + // Query at block 3: should see the re-added asset with new amount + let assets_at_block_3 = select_account_vault_at_block(&mut conn, account_id, block_3) + .expect("Query at block 3 should succeed"); + assert_eq!(assets_at_block_3.len(), 1, "Should have 1 asset at block 3"); + assert_matches!(&assets_at_block_3[0], Asset::Fungible(f) if f.amount() == 2000); +} diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index c791b2a626..3c295c72b8 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,9 +11,9 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; -use miden_objects::block::{BlockHeader, BlockNumber}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::utils::{Deserializable, Serializable}; use super::DatabaseError; use crate::COMPONENT; diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0d40dd8c42..0f29b00157 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -31,10 +31,10 @@ )] use diesel::SqliteConnection; -use miden_objects::account::AccountId; -use miden_objects::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; -use miden_objects::note::Nullifier; -use miden_objects::transaction::OrderedTransactionHeaders; +use miden_protocol::account::AccountId; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::OrderedTransactionHeaders; use super::DatabaseError; use crate::db::{NoteRecord, StateSyncUpdate}; diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 191c5c7114..a2ab7b1bb0 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -3,7 +3,7 @@ reason = "We will not approach the item count where i64 and usize cause issues" )] -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::ops::RangeInclusive; use diesel::prelude::{ @@ -25,21 +25,20 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, QueryParamNoteCommitmentLimit, QueryParamNoteTagLimit, }; -use miden_objects::account::AccountId; -use miden_objects::block::{BlockNoteIndex, BlockNumber}; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{ +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::{BlockNoteIndex, BlockNumber}; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{ NoteAssets, + NoteAttachment, NoteDetails, - NoteExecutionHint, - NoteExecutionMode, NoteId, NoteInclusionProof, NoteInputs, @@ -50,13 +49,12 @@ use miden_objects::note::{ NoteType, Nullifier, }; -use miden_objects::{Felt, Word}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_standards::note::NetworkAccountTarget; use crate::COMPONENT; use crate::db::models::conv::{ SqlTypeConvert, - aux_to_raw_sql, - execution_hint_to_raw_sql, idx_to_raw_sql, note_type_to_raw_sql, raw_sql_to_idx, @@ -66,6 +64,25 @@ use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{DatabaseError, NoteRecord, NoteSyncRecord, NoteSyncUpdate, Page, schema}; use crate::errors::NoteSyncError; +// NETWORK NOTE TYPE +// ================================================================================================ + +/// Classifies network notes for database storage. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(i32)] +pub(crate) enum NetworkNoteType { + /// Not a network note. + None = 0, + /// Single account target network note (has `NetworkAccountTarget` attachment). + SingleTarget = 1, +} + +impl From for i32 { + fn from(value: NetworkNoteType) -> Self { + value as i32 + } +} + /// Select notes matching the tags and account IDs search criteria within a block range. /// /// # Parameters @@ -97,8 +114,7 @@ use crate::errors::NoteSyncError; /// note_type, /// sender, /// tag, -/// aux, -/// execution_hint, +/// attachment, /// inclusion_path /// FROM /// notes @@ -185,8 +201,7 @@ pub(crate) fn select_notes_since_block_by_tag_and_sender( /// notes.note_type, /// notes.sender, /// notes.tag, -/// notes.aux, -/// notes.execution_hint, +/// notes.attachment, /// notes.assets, /// notes.inputs, /// notes.serial_num, @@ -218,26 +233,34 @@ pub(crate) fn select_notes_by_id( Ok(records) } -pub(crate) fn select_notes_by_commitment( +/// Select the subset of note commitments that already exist in the notes table +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// notes.note_commitment +/// FROM notes +/// WHERE note_commitment IN (?1) +/// ``` +pub(crate) fn select_existing_note_commitments( conn: &mut SqliteConnection, note_commitments: &[Word], -) -> Result, DatabaseError> { +) -> Result, DatabaseError> { + QueryParamNoteCommitmentLimit::check(note_commitments.len())?; + let note_commitments = serialize_vec(note_commitments.iter()); - let q = schema::notes::table - .left_join( - schema::note_scripts::table - .on(schema::notes::script_root.eq(schema::note_scripts::script_root.nullable())), - ) - .filter(schema::notes::note_commitment.eq_any(¬e_commitments)); - let raw: Vec<_> = SelectDsl::select( - q, - (NoteRecordRawRow::as_select(), schema::note_scripts::script.nullable()), - ) - .load::<(NoteRecordRawRow, Option>)>(conn)?; - let records = vec_raw_try_into::( - raw.into_iter().map(NoteRecordWithScriptRawJoined::from), - )?; - Ok(records) + + let raw_commitments = SelectDsl::select(schema::notes::table, schema::notes::note_commitment) + .filter(schema::notes::note_commitment.eq_any(¬e_commitments)) + .load::>(conn)?; + + let commitments = raw_commitments + .into_iter() + .map(|commitment| Word::read_from_bytes(&commitment[..])) + .collect::, _>>()?; + + Ok(commitments) } /// Select all notes from the DB using the given [`SqliteConnection`]. @@ -258,8 +281,7 @@ pub(crate) fn select_notes_by_commitment( /// notes.note_type, /// notes.sender, /// notes.tag, -/// notes.aux, -/// notes.execution_hint, +/// notes.attachment, /// notes.assets, /// notes.inputs, /// notes.serial_num, @@ -379,115 +401,6 @@ pub(crate) fn select_note_script_by_root( .map_err(Into::into) } -/// Returns a paginated batch of network notes that have not yet been consumed. -/// -/// # Returns -/// -/// A set of unconsumed network notes with maximum length of `size` and the page to get -/// the next set. -/// -/// Attention: uses the _implicit_ column `rowid`, which requires to use a few raw SQL nugget -/// statements -/// -/// # Raw SQL -/// -/// ```sql -/// SELECT -/// notes.committed_at, -/// notes.batch_index, -/// notes.note_index, -/// notes.note_id, -/// notes.note_type, -/// notes.sender, -/// notes.tag, -/// notes.aux, -/// notes.execution_hint, -/// notes.assets, -/// notes.inputs, -/// notes.serial_num, -/// notes.inclusion_path, -/// note_scripts.script, -/// notes.rowid -/// FROM notes -/// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root -/// WHERE -/// execution_mode = 0 AND consumed_at IS NULL AND notes.rowid >= ?1 -/// ORDER BY notes.rowid ASC -/// LIMIT ?2 -/// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -pub(crate) fn unconsumed_network_notes( - conn: &mut SqliteConnection, - mut page: Page, -) -> Result<(Vec, Page), DatabaseError> { - assert_eq!( - NoteExecutionMode::Network as u8, - 0, - "Hardcoded execution value must match query" - ); - - let rowid_sel = diesel::dsl::sql::("notes.rowid"); - let rowid_sel_ge = - diesel::dsl::sql::("notes.rowid >= ") - .bind::(page.token.unwrap_or_default() as i64); - - #[allow( - clippy::items_after_statements, - reason = "It's only relevant for a single call function" - )] - type RawLoadedTuple = ( - NoteRecordRawRow, - Option>, // script - i64, // rowid (from sql::("notes.rowid")) - ); - - #[allow( - clippy::items_after_statements, - reason = "It's only relevant for a single call function" - )] - fn split_into_raw_note_record_and_implicit_row_id( - tuple: RawLoadedTuple, - ) -> (NoteRecordWithScriptRawJoined, i64) { - let (note, script, row) = tuple; - let combined = NoteRecordWithScriptRawJoined::from((note, script)); - (combined, row) - } - - let raw = SelectDsl::select( - schema::notes::table.left_join( - schema::note_scripts::table - .on(schema::notes::script_root.eq(schema::note_scripts::script_root.nullable())), - ), - ( - NoteRecordRawRow::as_select(), - schema::note_scripts::script.nullable(), - rowid_sel.clone(), - ), - ) - .filter(schema::notes::execution_mode.eq(NoteExecutionMode::Network.to_raw_sql())) - .filter(schema::notes::consumed_at.is_null()) - .filter(rowid_sel_ge) - .order(rowid_sel.asc()) - .limit(page.size.get() as i64 + 1) - .load::(conn)?; - - let mut notes = Vec::with_capacity(page.size.into()); - for raw_item in raw { - let (raw_item, row_id) = split_into_raw_note_record_and_implicit_row_id(raw_item); - page.token = None; - if notes.len() == page.size.get() { - page.token = Some(row_id as u64); - break; - } - notes.push(TryInto::::try_into(raw_item)?); - } - - Ok((notes, page)) -} - /// Returns a paginated batch of network notes for an account that are unconsumed by a specified /// block number. /// @@ -512,8 +425,7 @@ pub(crate) fn unconsumed_network_notes( /// notes.note_type, /// notes.sender, /// notes.tag, -/// notes.aux, -/// notes.execution_hint, +/// notes.attachment, /// notes.assets, /// notes.inputs, /// notes.serial_num, @@ -523,7 +435,7 @@ pub(crate) fn unconsumed_network_notes( /// FROM notes /// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root /// WHERE -/// execution_mode = 0 AND tag = ?1 AND +/// network_note_type = 1 AND target_account_id = ?1 AND /// committed_at <= ?2 AND /// (consumed_at IS NULL OR consumed_at > ?2) AND notes.rowid >= ?3 /// ORDER BY notes.rowid ASC @@ -537,18 +449,12 @@ pub(crate) fn unconsumed_network_notes( clippy::too_many_lines, reason = "Lines will be reduced when schema is updated to simplify logic" )] -pub(crate) fn select_unconsumed_network_notes_by_tag( +pub(crate) fn select_unconsumed_network_notes_by_account_id( conn: &mut SqliteConnection, - tag: u32, + account_id: AccountId, block_num: BlockNumber, mut page: Page, ) -> Result<(Vec, Page), DatabaseError> { - assert_eq!( - NoteExecutionMode::Network as u8, - 0, - "Hardcoded execution value must match query" - ); - let rowid_sel = diesel::dsl::sql::("notes.rowid"); let rowid_sel_ge = diesel::dsl::sql::("notes.rowid >= ") @@ -587,8 +493,8 @@ pub(crate) fn select_unconsumed_network_notes_by_tag( rowid_sel.clone(), ), ) - .filter(schema::notes::execution_mode.eq(NoteExecutionMode::Network.to_raw_sql())) - .filter(schema::notes::tag.eq(tag as i32)) + .filter(schema::notes::network_note_type.eq(i32::from(NetworkNoteType::SingleTarget))) + .filter(schema::notes::target_account_id.eq(Some(account_id.to_bytes()))) .filter(schema::notes::committed_at.le(block_num.to_raw_sql())) .filter( schema::notes::consumed_at @@ -691,8 +597,7 @@ pub struct NoteRecordWithScriptRawJoined { pub note_type: i32, pub sender: Vec, // AccountId pub tag: i32, - pub aux: i64, - pub execution_hint: i64, + pub attachment: Vec, // #[diesel(embed)] // pub metadata: NoteMetadataRaw, pub assets: Option>, @@ -716,8 +621,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined note_type, sender, tag, - aux, - execution_hint, + attachment, assets, inputs, serial_num, @@ -732,8 +636,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined note_type, sender, tag, - aux, - execution_hint, + attachment, assets, inputs, serial_num, @@ -760,8 +663,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { note_type, sender, tag, - execution_hint, - aux, + attachment, // metadata ^^^, assets, inputs, @@ -772,13 +674,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { .. } = raw; let index = BlockNoteIndexRawRow { batch_index, note_index }; - let metadata = NoteMetadataRawRow { - note_type, - sender, - tag, - aux, - execution_hint, - }; + let metadata = NoteMetadataRawRow { note_type, sender, tag, attachment }; let details = NoteDetailsRawRow { assets, inputs, serial_num }; let metadata = metadata.try_into()?; @@ -831,8 +727,7 @@ pub struct NoteRecordRawRow { pub note_type: i32, pub sender: Vec, // AccountId pub tag: i32, - pub aux: i64, - pub execution_hint: i64, + pub attachment: Vec, pub assets: Option>, pub inputs: Option>, @@ -848,8 +743,7 @@ pub struct NoteMetadataRawRow { note_type: i32, sender: Vec, // AccountId tag: i32, - aux: i64, - execution_hint: i64, + attachment: Vec, } #[allow(clippy::cast_sign_loss)] @@ -859,11 +753,9 @@ impl TryInto for NoteMetadataRawRow { let sender = AccountId::read_from_bytes(&self.sender[..])?; let note_type = NoteType::try_from(self.note_type as u32) .map_err(DatabaseError::conversiont_from_sql::)?; - let tag = NoteTag::from(self.tag as u32); - let execution_hint = NoteExecutionHint::try_from(self.execution_hint as u64) - .map_err(DatabaseError::conversiont_from_sql::)?; - let aux = Felt::new(self.aux as u64); - Ok(NoteMetadata::new(sender, note_type, tag, execution_hint, aux)?) + let tag = NoteTag::new(self.tag as u32); + let attachment = NoteAttachment::read_from_bytes(&self.attachment)?; + Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) } } @@ -913,7 +805,7 @@ pub(crate) fn insert_notes( .values(Vec::from_iter( notes .iter() - .map(|(note, nullifier)| NoteInsertRowInsert::from((note.clone(), *nullifier))), + .map(|(note, nullifier)| NoteInsertRow::from((note.clone(), *nullifier))), )) .execute(conn)?; Ok(count) @@ -956,7 +848,7 @@ pub(crate) fn insert_scripts<'a>( #[derive(Debug, Clone, PartialEq, Insertable)] #[diesel(table_name = schema::notes)] -pub struct NoteInsertRowInsert { +pub struct NoteInsertRow { pub committed_at: i64, pub batch_index: i32, @@ -968,21 +860,32 @@ pub struct NoteInsertRowInsert { pub note_type: i32, pub sender: Vec, // AccountId pub tag: i32, - pub aux: i64, - pub execution_hint: i64, + pub network_note_type: i32, + pub target_account_id: Option>, + pub attachment: Vec, + pub inclusion_path: Vec, pub consumed_at: Option, + pub nullifier: Option>, pub assets: Option>, pub inputs: Option>, - pub serial_num: Option>, - pub nullifier: Option>, pub script_root: Option>, - pub execution_mode: i32, - pub inclusion_path: Vec, + pub serial_num: Option>, } -impl From<(NoteRecord, Option)> for NoteInsertRowInsert { +impl From<(NoteRecord, Option)> for NoteInsertRow { fn from((note, nullifier): (NoteRecord, Option)) -> Self { + let attachment = note.metadata.attachment(); + + let target_account_id = NetworkAccountTarget::try_from(attachment).ok(); + let network_note_type = if target_account_id.is_some() { + NetworkNoteType::SingleTarget + } else { + NetworkNoteType::None + }; + + let attachment_bytes = attachment.to_bytes(); + Self { committed_at: note.block_num.to_raw_sql(), batch_index: idx_to_raw_sql(note.note_index.batch_idx()), @@ -992,12 +895,12 @@ impl From<(NoteRecord, Option)> for NoteInsertRowInsert { note_type: note_type_to_raw_sql(note.metadata.note_type() as u8), sender: note.metadata.sender().to_bytes(), tag: note.metadata.tag().to_raw_sql(), - execution_mode: note.metadata.tag().execution_mode().to_raw_sql(), - aux: aux_to_raw_sql(note.metadata.aux()), - execution_hint: execution_hint_to_raw_sql(note.metadata.execution_hint().into()), + network_note_type: network_note_type.into(), + target_account_id: target_account_id.map(|t| t.target_id().to_bytes()), + attachment: attachment_bytes, inclusion_path: note.inclusion_path.to_bytes(), consumed_at: None::, // New notes are always unconsumed. - nullifier: nullifier.as_ref().map(Nullifier::to_bytes), /* Beware: `Option` also implements `to_bytes`, but this is not what you want. */ + nullifier: nullifier.as_ref().map(Nullifier::to_bytes), assets: note.details.as_ref().map(|d| d.assets().to_bytes()), inputs: note.details.as_ref().map(|d| d.inputs().to_bytes()), script_root: note.details.as_ref().map(|d| d.script().root().to_bytes()), diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index f3be2a8a82..5ab5785374 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -11,14 +11,15 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, QueryParamLimiter, QueryParamNullifierLimit, QueryParamNullifierPrefixLimit, }; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; +use miden_protocol::utils::{Deserializable, Serializable}; use super::DatabaseError; use crate::COMPONENT; @@ -66,12 +67,10 @@ pub(crate) fn select_nullifiers_by_prefix( block_range: RangeInclusive, ) -> Result<(Vec, BlockNumber), DatabaseError> { // Size calculation: max 2^16 nullifiers per block × 36 bytes per nullifier = ~2.25MB - // We use 2.5MB to provide a safety margin for the unlikely case of hitting the maximum - pub const MAX_PAYLOAD_BYTES: usize = 2_500_000; // 2.5 MB - allows for max block size of ~2.25MB pub const NULLIFIER_BYTES: usize = 32; // digest size (nullifier) pub const BLOCK_NUM_BYTES: usize = 4; // 32 bits per block number pub const ROW_OVERHEAD_BYTES: usize = NULLIFIER_BYTES + BLOCK_NUM_BYTES; // 36 bytes - pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; assert_eq!(prefix_len, 16, "Only 16-bit prefixes are supported"); diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 88fc8e2869..be132e1a56 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -12,12 +12,16 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::Deserializable; -use miden_node_utils::limiter::{QueryParamAccountIdLimit, QueryParamLimiter}; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::{NoteId, Nullifier}; -use miden_objects::transaction::{OrderedTransactionHeaders, TransactionId}; +use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, + QueryParamAccountIdLimit, + QueryParamLimiter, +}; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{NoteId, Nullifier}; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionId}; +use miden_protocol::utils::{Deserializable, Serializable}; use super::DatabaseError; use crate::COMPONENT; @@ -94,7 +98,7 @@ pub struct TransactionRecordRaw { transaction_id: Vec, initial_state_commitment: Vec, final_state_commitment: Vec, - input_notes: Vec, + nullifiers: Vec, output_notes: Vec, size_in_bytes: i64, } @@ -113,16 +117,15 @@ impl TryInto for TransactionSummaryRaw { impl TryInto for TransactionRecordRaw { type Error = DatabaseError; fn try_into(self) -> Result { - use miden_lib::utils::Deserializable; - use miden_objects::Word; + use miden_protocol::Word; let initial_state_commitment = self.initial_state_commitment; let final_state_commitment = self.final_state_commitment; - let input_notes_binary = self.input_notes; + let nullifiers_binary = self.nullifiers; let output_notes_binary = self.output_notes; // Deserialize input notes as nullifiers and output notes as note IDs - let input_notes: Vec = Deserializable::read_from_bytes(&input_notes_binary)?; + let nullifiers: Vec = Deserializable::read_from_bytes(&nullifiers_binary)?; let output_notes: Vec = Deserializable::read_from_bytes(&output_notes_binary)?; Ok(crate::db::TransactionRecord { @@ -131,7 +134,7 @@ impl TryInto for TransactionRecordRaw { transaction_id: TransactionId::read_from_bytes(&self.transaction_id[..])?, initial_state_commitment: Word::read_from_bytes(&initial_state_commitment)?, final_state_commitment: Word::read_from_bytes(&final_state_commitment)?, - input_notes, + nullifiers, output_notes, }) } @@ -178,7 +181,7 @@ pub struct TransactionSummaryRowInsert { block_num: i64, initial_state_commitment: Vec, final_state_commitment: Vec, - input_notes: Vec, + nullifiers: Vec, output_notes: Vec, size_in_bytes: i64, } @@ -189,15 +192,13 @@ impl TransactionSummaryRowInsert { reason = "We will not approach the item count where i64 and usize cause issues" )] fn new( - transaction_header: &miden_objects::transaction::TransactionHeader, + transaction_header: &miden_protocol::transaction::TransactionHeader, block_num: BlockNumber, ) -> Self { - use miden_lib::utils::Serializable; - const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments // Serialize input notes using binary format (store nullifiers) - let input_notes_binary = transaction_header.input_notes().to_bytes(); + let nullifiers_binary = transaction_header.input_notes().to_bytes(); // Serialize output notes using binary format (store note IDs) let output_notes_binary = transaction_header.output_notes().to_bytes(); @@ -213,9 +214,9 @@ impl TransactionSummaryRowInsert { // // Note: 500 bytes per output note is an over-estimate but ensures we don't // exceed memory limits when these transactions are later converted to proto records. - let input_notes_size = (transaction_header.input_notes().num_notes() * 32) as usize; + let nullifiers_size = (transaction_header.input_notes().num_notes() * 32) as usize; let output_notes_size = transaction_header.output_notes().len() * 500; - let size_in_bytes = (HEADER_BASE_SIZE + input_notes_size + output_notes_size) as i64; + let size_in_bytes = (HEADER_BASE_SIZE + nullifiers_size + output_notes_size) as i64; Self { transaction_id: transaction_header.id().to_bytes(), @@ -223,7 +224,7 @@ impl TransactionSummaryRowInsert { block_num: block_num.to_raw_sql(), initial_state_commitment: transaction_header.initial_state_commitment().to_bytes(), final_state_commitment: transaction_header.final_state_commitment().to_bytes(), - input_notes: input_notes_binary, + nullifiers: nullifiers_binary, output_notes: output_notes_binary, size_in_bytes, } @@ -287,11 +288,13 @@ pub fn select_transactions_records( account_ids: &[AccountId], block_range: RangeInclusive, ) -> Result<(BlockNumber, Vec), DatabaseError> { - const MAX_PAYLOAD_BYTES: i64 = 4 * 1024 * 1024; // 4 MB const NUM_TXS_PER_CHUNK: i64 = 1000; // Read 1000 transactions at a time QueryParamAccountIdLimit::check(account_ids.len())?; + let max_payload_bytes = + i64::try_from(MAX_RESPONSE_PAYLOAD_BYTES).expect("payload limit fits within i64"); + if block_range.is_empty() { return Err(DatabaseError::InvalidBlockRange { from: *block_range.start(), @@ -341,7 +344,7 @@ pub fn select_transactions_records( let mut last_added_tx: Option = None; for tx in chunk { - if total_size + tx.size_in_bytes <= MAX_PAYLOAD_BYTES { + if total_size + tx.size_in_bytes <= max_payload_bytes { total_size += tx.size_in_bytes; last_added_tx = Some(tx); added_from_chunk += 1; @@ -366,7 +369,7 @@ pub fn select_transactions_records( // Ensure block consistency: remove the last block if it's incomplete // (we may have stopped loading mid-block due to size constraints) - if total_size >= MAX_PAYLOAD_BYTES { + if total_size >= max_payload_bytes { // SAFETY: We're guaranteed to have at least one transaction since total_size > 0 let last_block_num = last_block_num.expect( "guaranteed to have processed at least one transaction when size limit is reached", diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index 5124beabc8..c472940e45 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -1,6 +1,6 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; -use miden_lib::utils::{Deserializable, DeserializationError, Serializable}; -use miden_objects::note::Nullifier; +use miden_protocol::note::Nullifier; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use crate::errors::DatabaseError; diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 9fadd01751..0132848929 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -1,13 +1,13 @@ // @generated automatically by Diesel CLI. diesel::table! { - account_storage_map_values (account_id, block_num, slot, key) { + account_storage_map_values (account_id, block_num, slot_name, key) { account_id -> Binary, block_num -> BigInt, - slot -> Integer, + slot_name -> Text, key -> Binary, value -> Binary, - is_latest_update -> Bool, + is_latest -> Bool, } } @@ -17,20 +17,22 @@ diesel::table! { block_num -> BigInt, vault_key -> Binary, asset -> Nullable, - is_latest_update -> Bool, + is_latest -> Bool, } } diesel::table! { - accounts (account_id) { + accounts (account_id, block_num) { account_id -> Binary, network_account_id_prefix -> Nullable, account_commitment -> Binary, code_commitment -> Nullable, - storage -> Nullable, - vault -> Nullable, nonce -> Nullable, + storage_header -> Nullable, + vault_root -> Nullable, block_num -> BigInt, + is_latest -> Bool, + created_at_block -> BigInt, } } @@ -65,9 +67,9 @@ diesel::table! { note_type -> Integer, sender -> Binary, tag -> Integer, - execution_mode -> Integer, - aux -> BigInt, - execution_hint -> BigInt, + network_note_type -> Integer, + target_account_id -> Nullable, + attachment -> Binary, inclusion_path -> Binary, consumed_at -> Nullable, nullifier -> Nullable, @@ -93,7 +95,7 @@ diesel::table! { block_num -> BigInt, initial_state_commitment -> Binary, final_state_commitment -> Binary, - input_notes -> Binary, + nullifiers -> Binary, output_notes -> Binary, size_in_bytes -> BigInt, } @@ -101,11 +103,12 @@ diesel::table! { diesel::joinable!(accounts -> account_codes (code_commitment)); diesel::joinable!(accounts -> block_headers (block_num)); -diesel::joinable!(notes -> accounts (sender)); +// Note: Cannot use diesel::joinable! with accounts table due to composite primary key +// diesel::joinable!(notes -> accounts (sender)); +// diesel::joinable!(transactions -> accounts (account_id)); diesel::joinable!(notes -> block_headers (committed_at)); diesel::joinable!(notes -> note_scripts (script_root)); diesel::joinable!(nullifiers -> block_headers (block_num)); -diesel::joinable!(transactions -> accounts (account_id)); diesel::joinable!(transactions -> block_headers (block_num)); diesel::allow_tables_to_appear_in_same_query!( diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs new file mode 100644 index 0000000000..28e480fc0c --- /dev/null +++ b/crates/store/src/db/schema_hash.rs @@ -0,0 +1,186 @@ +//! Schema verification to detect database schema changes. +//! +//! Detects: +//! +//! - Direct modifications to the database schema outside of migrations +//! - Running a node against a database created with different set of migrations +//! - Forgetting to reset the database after schema changes i.e. for a specific migration +//! +//! The verification works by creating an in-memory reference database, applying all +//! migrations to it, and comparing its schema against the actual database schema. + +use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::MIGRATIONS; +use crate::errors::SchemaVerificationError; + +/// Represents a schema object for comparison. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +struct SchemaObject { + object_type: String, + name: String, + sql: String, +} + +/// Represents a row from the `sqlite_schema` table. +#[derive(diesel::QueryableByName, Debug)] +struct SqliteSchemaRow { + #[diesel(sql_type = diesel::sql_types::Text)] + schema_type: String, + #[diesel(sql_type = diesel::sql_types::Text)] + name: String, + #[diesel(sql_type = diesel::sql_types::Nullable)] + sql: Option, +} + +/// Extracts all schema objects from a database connection. +fn extract_schema( + conn: &mut SqliteConnection, +) -> Result, SchemaVerificationError> { + let rows: Vec = diesel::sql_query( + "SELECT type as schema_type, name, sql FROM sqlite_schema \ + WHERE type IN ('table', 'index') \ + AND name NOT LIKE 'sqlite_%' \ + AND name NOT LIKE '__diesel_%' \ + ORDER BY type, name", + ) + .load(conn) + .map_err(SchemaVerificationError::SchemaExtraction)?; + + let mut objects: Vec = rows + .into_iter() + .filter_map(|row| { + row.sql.map(|sql| SchemaObject { + object_type: row.schema_type, + name: row.name, + sql, + }) + }) + .collect(); + + objects.sort(); + Ok(objects) +} + +/// Computes the expected schema by applying migrations to an in-memory database. +fn compute_expected_schema() -> Result, SchemaVerificationError> { + let mut conn = SqliteConnection::establish(":memory:") + .map_err(SchemaVerificationError::InMemoryDbCreation)?; + + conn.run_pending_migrations(MIGRATIONS) + .map_err(SchemaVerificationError::MigrationApplication)?; + + extract_schema(&mut conn) +} + +/// Verifies that the database schema matches the expected schema. +/// +/// Creates an in-memory database, applies all migrations, and compares schemas. +/// +/// # Errors +/// +/// Returns `SchemaVerificationError::Mismatch` if schemas differ. +#[instrument(level = "info", target = COMPONENT, skip_all, err)] +pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificationError> { + let expected = compute_expected_schema()?; + let actual = extract_schema(conn)?; + + if actual != expected { + let expected_names: Vec<_> = expected.iter().map(|o| &o.name).collect(); + let actual_names: Vec<_> = actual.iter().map(|o| &o.name).collect(); + + // Find differences for better error messages + let missing: Vec<_> = expected.iter().filter(|e| !actual.contains(e)).collect(); + let extra: Vec<_> = actual.iter().filter(|a| !expected.contains(a)).collect(); + + tracing::error!( + target: COMPONENT, + ?expected_names, + ?actual_names, + missing_count = missing.len(), + extra_count = extra.len(), + "Database schema mismatch detected" + ); + + // Log specific differences at debug level + for obj in &missing { + tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + } + for obj in &extra { + tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + } + + return Err(SchemaVerificationError::Mismatch { + expected_count: expected.len(), + actual_count: actual.len(), + missing_count: missing.len(), + extra_count: extra.len(), + }); + } + + tracing::info!(target: COMPONENT, objects = expected.len(), "Database schema verification passed"); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::migrations::apply_migrations; + use crate::errors::DatabaseError; + + #[test] + fn verify_schema_passes_for_correct_schema() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + verify_schema(&mut conn).expect("Should pass for correct schema"); + } + + #[test] + fn verify_schema_fails_for_added_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE rogue_table (id INTEGER PRIMARY KEY)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn verify_schema_fails_for_removed_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("DROP TABLE transactions").execute(&mut conn).unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn apply_migrations_succeeds_on_fresh_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + apply_migrations(&mut conn).expect("Should succeed on fresh database"); + } + + #[test] + fn apply_migrations_fails_on_tampered_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE tampered (id INTEGER)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + } +} diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index aa8a5617c5..6bd26dda10 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -5,16 +5,14 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; use diesel::{Connection, SqliteConnection}; -use miden_lib::account::auth::AuthRpoFalcon512; -use miden_lib::note::create_p2id_note; -use miden_lib::transaction::TransactionKernel; use miden_node_proto::domain::account::AccountSummary; -use miden_node_utils::fee::test_fee_params; -use miden_objects::account::auth::PublicKeyCommitment; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_node_utils::fee::{test_fee, test_fee_params}; +use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ Account, AccountBuilder, + AccountCode, AccountComponent, AccountDelta, AccountId, @@ -24,19 +22,24 @@ use miden_objects::account::{ AccountType, AccountVaultDelta, StorageSlot, + StorageSlotContent, + StorageSlotDelta, + StorageSlotName, }; -use miden_objects::asset::{Asset, AssetVaultKey, FungibleAsset}; -use miden_objects::block::{ +use miden_protocol::asset::{Asset, AssetVaultKey, FungibleAsset}; +use miden_protocol::block::{ BlockAccountUpdate, BlockHeader, BlockNoteIndex, BlockNoteTree, BlockNumber, }; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::crypto::rand::RpoRandomCoin; -use miden_objects::note::{ +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::note::{ Note, + NoteAttachment, NoteDetails, NoteExecutionHint, NoteHeader, @@ -46,21 +49,26 @@ use miden_objects::note::{ NoteType, Nullifier, }; -use miden_objects::testing::account_id::{ +use miden_protocol::testing::account_id::{ ACCOUNT_ID_PRIVATE_SENDER, ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; -use miden_objects::transaction::{ +use miden_protocol::testing::random_signer::RandomBlockSigner; +use miden_protocol::transaction::{ InputNoteCommitment, InputNotes, OrderedTransactionHeaders, TransactionHeader, TransactionId, }; -use miden_objects::{EMPTY_WORD, Felt, FieldElement, Word, ZERO}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; +use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::code_builder::CodeBuilder; +use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; use pretty_assertions::assert_eq; use rand::Rng; @@ -88,7 +96,7 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { num_to_word(7), num_to_word(8), num_to_word(9), - num_to_word(10), + SecretKey::new().public_key(), test_fee_params(), 11_u8.into(), ); @@ -218,7 +226,7 @@ pub fn create_note(account_id: AccountId) -> Note { FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 10).unwrap(), )], NoteType::Public, - Felt::default(), + NoteAttachment::default(), &mut *rng, ) .expect("Failed to create note") @@ -250,7 +258,7 @@ fn sql_select_notes() { note_index: BlockNoteIndex::new(0, i.try_into().unwrap()).unwrap(), note_id: num_to_word(u64::try_from(i).unwrap()), note_commitment: num_to_word(u64::try_from(i).unwrap()), - metadata: *new_note.metadata(), + metadata: new_note.metadata().clone(), details: Some(NoteDetails::from(&new_note)), inclusion_path: SparseMerklePath::default(), }; @@ -273,105 +281,6 @@ fn sql_select_notes() { } } -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_select_notes_different_execution_hints() { - let mut conn = create_db(); - let conn = &mut conn; - - let block_num = 1.into(); - create_block(conn, block_num); - - // test querying empty table - let notes = queries::select_all_notes(conn).unwrap(); - assert!(notes.is_empty()); - - let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); - - queries::upsert_accounts(conn, &[mock_block_account_update(sender, 0)], block_num).unwrap(); - - // test multiple entries - let mut state = vec![]; - - let new_note = create_note(sender); - - let note_none = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, 0).unwrap(), - note_id: num_to_word(0), - note_commitment: num_to_word(0), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - 0.into(), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: Some(NoteDetails::from(&new_note)), - inclusion_path: SparseMerklePath::default(), - }; - state.push(note_none.clone()); - - queries::insert_scripts(conn, [¬e_none]).unwrap(); // only necessary for the first note - let res = queries::insert_notes(conn, &[(note_none, None)]); - assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - - let note = &queries::select_notes_by_id(conn, &[num_to_word(0).into()]).unwrap()[0]; - - assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::none()); - - let note_always = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, 1).unwrap(), - note_id: num_to_word(1), - note_commitment: num_to_word(1), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - 0.into(), - NoteExecutionHint::always(), - Felt::default(), - ) - .unwrap(), - details: Some(NoteDetails::from(&new_note)), - inclusion_path: SparseMerklePath::default(), - }; - state.push(note_always.clone()); - - let res = queries::insert_notes(conn, &[(note_always, None)]); - assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - - let note = &queries::select_notes_by_id(conn, &[num_to_word(1).into()]).unwrap()[0]; - assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::always()); - - let note_after_block = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, 2).unwrap(), - note_id: num_to_word(2), - note_commitment: num_to_word(2), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - 2.into(), - NoteExecutionHint::after_block(12.into()).unwrap(), - Felt::default(), - ) - .unwrap(), - details: Some(NoteDetails::from(&new_note)), - inclusion_path: SparseMerklePath::default(), - }; - state.push(note_after_block.clone()); - - let res = queries::insert_notes(conn, &[(note_after_block, None)]); - assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(2).into()]).unwrap()[0]; - assert_eq!( - note.metadata.execution_hint(), - NoteExecutionHint::after_block(12.into()).unwrap() - ); -} - #[test] #[miden_node_test_macro::enable_logging] fn sql_select_note_script_by_root() { @@ -393,7 +302,7 @@ fn sql_select_note_script_by_root() { note_index: BlockNoteIndex::new(0, 0.try_into().unwrap()).unwrap(), note_id: num_to_word(0), note_commitment: num_to_word(0), - metadata: *new_note.metadata(), + metadata: new_note.metadata().clone(), details: Some(NoteDetails::from(&new_note)), inclusion_path: SparseMerklePath::default(), }; @@ -446,139 +355,6 @@ fn make_account_and_note( #[test] #[miden_node_test_macro::enable_logging] fn sql_unconsumed_network_notes() { - // Number of notes to generate. - const N: u64 = 32; - - let mut conn = create_db(); - let conn = &mut conn; - - let block_num = BlockNumber::from(1); - // An arbitrary public account (network note tag requires public account). - create_block(conn, block_num); - - let account_notes = [ - make_account_and_note(conn, block_num, [0u8; 32], AccountStorageMode::Public), - make_account_and_note(conn, block_num, [1u8; 32], AccountStorageMode::Network), - ]; - let network_account_id = account_notes[1].0; - - // Create some notes, of which half are network notes. - let notes = (0..N) - .map(|i| { - let index = (i % 2) as usize; - let is_network = account_notes[index].0.storage_mode() == AccountStorageMode::Network; - let account_id = account_notes[index].0; - let new_note = &account_notes[index].1; - let note = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i), - note_commitment: num_to_word(i), - metadata: NoteMetadata::new( - account_notes[index].0, - NoteType::Public, - NoteTag::from_account_id(account_id), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: is_network.then_some(NoteDetails::from(new_note)), - inclusion_path: SparseMerklePath::default(), - }; - - (note, is_network.then_some(num_to_nullifier(i))) - }) - .collect::>(); - - // Copy out all network notes to assert against. These will be in chronological order already. - let network_notes = notes - .iter() - .filter_map(|(note, nullifier)| nullifier.is_some().then_some(note.clone())) - .collect::>(); - - // Insert the set of notes. - queries::insert_scripts(conn, notes.iter().map(|(note, _)| note)).unwrap(); - queries::insert_notes(conn, ¬es).unwrap(); - - // Fetch all network notes by setting a limit larger than the amount available. - let (result, _) = queries::unconsumed_network_notes( - conn, - Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }, - ) - .unwrap(); - assert_eq!(result, network_notes); - let (result, _) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }, - ) - .unwrap(); - assert_eq!(result, network_notes); - - // Check pagination works as expected. - let limit = 5; - let mut page = Page { - token: None, - size: NonZeroUsize::new(limit).unwrap(), - }; - network_notes.chunks(limit).for_each(|expected| { - let (result, new_page) = queries::unconsumed_network_notes(conn, page).unwrap(); - page = new_page; - assert_eq!(result, expected); - }); - network_notes.chunks(limit).for_each(|expected| { - let (result, new_page) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - page, - ) - .unwrap(); - page = new_page; - assert_eq!(result, expected); - }); - assert!(page.token.is_none()); - - // Consume every third network note and ensure these are now excluded from the results. - let consumed = notes - .iter() - .filter_map(|(_, nullifier)| *nullifier) - .step_by(3) - .collect::>(); - queries::insert_nullifiers_for_block(conn, &consumed, block_num).unwrap(); - - let expected = network_notes - .iter() - .enumerate() - .filter(|(i, _)| i % 3 != 0) - .map(|(_, note)| note.clone()) - .collect::>(); - let page = Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }; - let (result, _) = queries::unconsumed_network_notes(conn, page).unwrap(); - assert_eq!(result, expected); - let (result, _) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - page, - ) - .unwrap(); - assert_eq!(result, expected); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_unconsumed_network_notes_for_account() { let mut conn = create_db(); // Create account. @@ -589,36 +365,37 @@ fn sql_unconsumed_network_notes_for_account() { create_block(&mut conn, 0.into()); create_block(&mut conn, 1.into()); + // Create a NetworkAccountTarget attachment for the network account + let target = NetworkAccountTarget::new(account_note.0, NoteExecutionHint::Always) + .expect("NetworkAccountTarget creation should succeed for network account"); + let attachment: NoteAttachment = target.into(); + // Create an unconsumed note in each block. - let notes = (0..2) - .map(|i: u32| { - let note = NoteRecord { - block_num: 0.into(), // Created on same block. - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i.into()), - note_commitment: num_to_word(i.into()), - metadata: NoteMetadata::new( - account_note.0, - NoteType::Public, - NoteTag::from_account_id(account_note.0), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: None, - inclusion_path: SparseMerklePath::default(), - }; - (note, Some(num_to_nullifier(i.into()))) - }) - .collect::>(); + let notes = Vec::from_iter((0..2).map(|i: u32| { + let note = NoteRecord { + block_num: 0.into(), // Created on same block. + note_index: BlockNoteIndex::new(0, i as usize).unwrap(), + note_id: num_to_word(i.into()), + note_commitment: num_to_word(i.into()), + metadata: NoteMetadata::new( + account_note.0, + NoteType::Public, + NoteTag::with_account_target(account_note.0), + ) + .with_attachment(attachment.clone()), + details: None, + inclusion_path: SparseMerklePath::default(), + }; + (note, Some(num_to_nullifier(i.into()))) + })); queries::insert_scripts(&mut conn, notes.iter().map(|(note, _)| note)).unwrap(); queries::insert_notes(&mut conn, ¬es).unwrap(); // Both notes are unconsumed, query should return both notes on both blocks. (0..2).for_each(|i: u32| { - let (result, _) = queries::select_unconsumed_network_notes_by_tag( + let (result, _) = queries::select_unconsumed_network_notes_by_account_id( &mut conn, - NoteTag::from_account_id(account_note.0).into(), + account_note.0, i.into(), Page { token: None, @@ -633,9 +410,9 @@ fn sql_unconsumed_network_notes_for_account() { queries::insert_nullifiers_for_block(&mut conn, &[notes[1].1.unwrap()], 1.into()).unwrap(); // Query against first block should return both notes. - let (result, _) = queries::select_unconsumed_network_notes_by_tag( + let (result, _) = queries::select_unconsumed_network_notes_by_account_id( &mut conn, - NoteTag::from_account_id(account_note.0).into(), + account_note.0, 0.into(), Page { token: None, @@ -646,9 +423,9 @@ fn sql_unconsumed_network_notes_for_account() { assert_eq!(result.len(), 2); // Query against second block should return only first note. - let (result, _) = queries::select_unconsumed_network_notes_by_tag( + let (result, _) = queries::select_unconsumed_network_notes_by_account_id( &mut conn, - NoteTag::from_account_id(account_note.0).into(), + account_note.0, 1.into(), Page { token: None, @@ -984,7 +761,7 @@ fn db_block_header() { num_to_word(7), num_to_word(8), num_to_word(9), - num_to_word(10), + SecretKey::new().public_key(), test_fee_params(), 11_u8.into(), ); @@ -1016,7 +793,7 @@ fn db_block_header() { num_to_word(17), num_to_word(18), num_to_word(19), - num_to_word(20), + SecretKey::new().public_key(), test_fee_params(), 21_u8.into(), ); @@ -1141,27 +918,18 @@ fn notes() { let new_note = create_note(sender); let note_index = BlockNoteIndex::new(0, 2).unwrap(); let tag = 5u32; - let note_metadata = - NoteMetadata::new(sender, NoteType::Public, tag.into(), NoteExecutionHint::none(), ZERO) - .unwrap(); + let note_metadata = NoteMetadata::new(sender, NoteType::Public, tag.into()); - let values = [(note_index, new_note.id(), note_metadata)]; - let notes_db = BlockNoteTree::with_entries(values.iter().copied()).unwrap(); + let values = [(note_index, new_note.id(), ¬e_metadata)]; + let notes_db = BlockNoteTree::with_entries(values).unwrap(); let inclusion_path = notes_db.open(note_index); let note = NoteRecord { block_num: block_num_1, note_index, - note_id: new_note.id().into(), + note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: NoteMetadata::new( - sender, - NoteType::Public, - tag.into(), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), + metadata: NoteMetadata::new(sender, NoteType::Public, tag.into()), details: Some(NoteDetails::from(&new_note)), inclusion_path: inclusion_path.clone(), }; @@ -1199,9 +967,9 @@ fn notes() { let note2 = NoteRecord { block_num: block_num_2, note_index: note.note_index, - note_id: new_note.id().into(), + note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), - metadata: note.metadata, + metadata: note.metadata.clone(), details: None, inclusion_path: inclusion_path.clone(), }; @@ -1229,7 +997,7 @@ fn notes() { // test query notes by id let notes = vec![note.clone(), note2]; - let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::from(note.note_id))); + let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::from_raw(note.note_id))); let res = queries::select_notes_by_id(conn, ¬e_ids).unwrap(); assert_eq!(res, notes); @@ -1247,10 +1015,17 @@ fn insert_account_delta( block_number: BlockNumber, delta: &AccountDelta, ) { - for (slot, slot_delta) in delta.storage().maps() { + for (slot_name, slot_delta) in delta.storage().maps() { for (k, v) in slot_delta.entries() { - insert_account_storage_map_value(conn, account_id, block_number, *slot, *k.inner(), *v) - .unwrap(); + insert_account_storage_map_value( + conn, + account_id, + block_number, + slot_name.clone(), + *k.inner(), + *v, + ) + .unwrap(); } } } @@ -1260,7 +1035,7 @@ fn insert_account_delta( fn sql_account_storage_map_values_insertion() { use std::collections::BTreeMap; - use miden_objects::account::StorageMapDelta; + use miden_protocol::account::StorageMapDelta; let mut conn = create_db(); let conn = &mut conn; @@ -1273,7 +1048,7 @@ fn sql_account_storage_map_values_insertion() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); - let slot = 3u8; + let slot_name = StorageSlotName::mock(3); let key1 = Word::from([1u32, 2, 3, 4]); let key2 = Word::from([5u32, 6, 7, 8]); let value1 = Word::from([10u32, 11, 12, 13]); @@ -1284,8 +1059,8 @@ fn sql_account_storage_map_values_insertion() { let mut map1 = StorageMapDelta::default(); map1.insert(key1, value1); map1.insert(key2, value2); - let maps1: BTreeMap<_, _> = [(slot, map1)].into_iter().collect(); - let storage1 = AccountStorageDelta::from_parts(BTreeMap::new(), maps1).unwrap(); + let delta1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map1))]); + let storage1 = AccountStorageDelta::from_raw(delta1); let delta1 = AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); insert_account_delta(conn, account_id, block1, &delta1); @@ -1298,8 +1073,8 @@ fn sql_account_storage_map_values_insertion() { // Update key1 at block 2 let mut map2 = StorageMapDelta::default(); map2.insert(key1, value3); - let maps2 = BTreeMap::from_iter([(slot, map2)]); - let storage2 = AccountStorageDelta::from_parts(BTreeMap::new(), maps2).unwrap(); + let delta2 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map2))]); + let storage2 = AccountStorageDelta::from_raw(delta2); let delta2 = AccountDelta::new(account_id, storage2, AccountVaultDelta::default(), Felt::new(2)) .unwrap(); @@ -1315,14 +1090,14 @@ fn sql_account_storage_map_values_insertion() { storage_map_values .values .iter() - .any(|val| val.slot_index == slot && val.key == key1 && val.value == value3), + .any(|val| val.slot_name == slot_name && val.key == key1 && val.value == value3), "key1 should point to new value at block2" ); assert!( storage_map_values .values .iter() - .any(|val| val.slot_index == slot && val.key == key2 && val.value == value2), + .any(|val| val.slot_name == slot_name && val.key == key2 && val.value == value2), "key2 should stay the same (from block1)" ); } @@ -1331,7 +1106,7 @@ fn sql_account_storage_map_values_insertion() { fn select_storage_map_sync_values() { let mut conn = create_db(); let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let slot = 5u8; + let slot_name = StorageSlotName::mock(5); let key1 = num_to_word(1); let key2 = num_to_word(2); @@ -1346,20 +1121,55 @@ fn select_storage_map_sync_values() { // Insert data across multiple blocks using individual inserts // Block 1: key1 -> value1, key2 -> value2 - queries::insert_account_storage_map_value(&mut conn, account_id, block1, slot, key1, value1) - .unwrap(); - queries::insert_account_storage_map_value(&mut conn, account_id, block1, slot, key2, value2) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + key1, + value1, + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + key2, + value2, + ) + .unwrap(); // Block 2: key2 -> value3 (update), key3 -> value3 (new) - queries::insert_account_storage_map_value(&mut conn, account_id, block2, slot, key2, value3) - .unwrap(); - queries::insert_account_storage_map_value(&mut conn, account_id, block2, slot, key3, value3) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + key2, + value3, + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + key3, + value3, + ) + .unwrap(); // Block 3: key1 -> value2 (update) - queries::insert_account_storage_map_value(&mut conn, account_id, block3, slot, key1, value2) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block3, + slot_name.clone(), + key1, + value2, + ) + .unwrap(); let page = queries::select_account_storage_map_values( &mut conn, @@ -1373,19 +1183,19 @@ fn select_storage_map_sync_values() { // Compare ordered by key using a tuple view to avoid relying on the concrete struct name let expected = vec![ StorageMapValue { - slot_index: slot, + slot_name: slot_name.clone(), key: key2, value: value3, block_num: block2, }, StorageMapValue { - slot_index: slot, + slot_name: slot_name.clone(), key: key3, value: value3, block_num: block2, }, StorageMapValue { - slot_index: slot, + slot_name, key: key1, value: value2, block_num: block3, @@ -1402,13 +1212,37 @@ fn num_to_word(n: u64) -> Word { } fn num_to_nullifier(n: u64) -> Nullifier { - Nullifier::from(num_to_word(n)) + Nullifier::from_raw(num_to_word(n)) } fn mock_block_account_update(account_id: AccountId, num: u64) -> BlockAccountUpdate { BlockAccountUpdate::new(account_id, num_to_word(num), AccountUpdateDetails::Private) } +// Helper function to create account with specific code for tests +fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", code_str) + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountUpdatableCode); + + AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap() +} + fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader { let initial_state_commitment = Word::try_from([num, 0, 0, 0]).unwrap(); let final_account_commitment = Word::try_from([0, num, 0, 0]).unwrap(); @@ -1423,14 +1257,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader Word::try_from([num, num, 0, 0]).unwrap(), Word::try_from([0, 0, num, num]).unwrap(), ), - NoteMetadata::new( - account_id, - NoteType::Public, - NoteTag::LocalAny(num as u32), - NoteExecutionHint::None, - Felt::default(), - ) - .unwrap(), + NoteMetadata::new(account_id, NoteType::Public, NoteTag::new(num as u32)), )]; TransactionHeader::new_unchecked( @@ -1445,6 +1272,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader final_account_commitment, input_notes, output_notes, + test_fee(), ) } @@ -1478,35 +1306,978 @@ fn mock_account_code_and_storage( init_seed: Option<[u8; 32]>, ) -> Account { let component_code = "\ - export.account_procedure_1 + pub proc account_procedure_1 push.1.2 add end "; let component_storage = vec![ - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(1)), - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(3)), - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(5)), + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + StorageSlot::with_value(StorageSlotName::mock(2), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(3), num_to_word(3)), + StorageSlot::with_value(StorageSlotName::mock(4), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(5), num_to_word(5)), ]; - let component = AccountComponent::compile( - component_code, - TransactionKernel::assembler(), - component_storage, - ) - .unwrap() - .with_supported_type(account_type); + let account_component_code = CodeBuilder::default() + .compile_component_code("counter_contract::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); AccountBuilder::new(init_seed.unwrap_or([0; 32])) .account_type(account_type) .storage_mode(storage_mode) .with_assets(assets) - .with_component(component) - .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .with_component(account_component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap() +} + +// ACCOUNT CODE TESTS +// ================================================================================================ + +#[test] +fn test_select_account_code_by_commitment() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + + // Create block 1 + create_block(&mut conn, block_num_1); + + // Create an account with code at block 1 using the existing mock function + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + // Get the code commitment and bytes before inserting + let code_commitment = account.code().commitment(); + let expected_code = account.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account.id(), + account.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Query code by commitment - should return the code + let code = queries::select_account_code_by_commitment(&mut conn, code_commitment) + .unwrap() + .expect("Code should exist"); + assert_eq!(code, expected_code); + + // Query code for non-existent commitment - should return None + let non_existent_commitment = [0u8; 32]; + let non_existent_commitment = Word::read_from_bytes(&non_existent_commitment).unwrap(); + let code_other = + queries::select_account_code_by_commitment(&mut conn, non_existent_commitment).unwrap(); + assert!(code_other.is_none(), "Code should not exist for non-existent commitment"); +} + +#[test] +fn test_select_account_code_by_commitment_multiple_codes() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + let block_num_2 = BlockNumber::from(2); + + // Create blocks + create_block(&mut conn, block_num_1); + create_block(&mut conn, block_num_2); + + // Create account with code v1 at block 1 + let code_v1_str = "\ + pub proc account_procedure_1 + push.1.2 + add + end + "; + let account_v1 = create_account_with_code(code_v1_str, [1u8; 32]); + let code_v1_commitment = account_v1.code().commitment(); + let code_v1 = account_v1.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v1.id(), + account_v1.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v1).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Create account with different code v2 at block 2 + let code_v2_str = "\ + pub proc account_procedure_1 + push.3.4 + mul + end + "; + let account_v2 = create_account_with_code(code_v2_str, [1u8; 32]); // Same seed to keep same account_id + let code_v2_commitment = account_v2.code().commitment(); + let code_v2 = account_v2.code().to_bytes(); + + // Verify that the codes are actually different + assert_ne!( + code_v1, code_v2, + "Test setup error: codes should be different for different code strings" + ); + assert_ne!( + code_v1_commitment, code_v2_commitment, + "Test setup error: code commitments should be different" + ); + + // Insert the updated account at block 2 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v2.id(), + account_v2.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v2).unwrap()), + )], + block_num_2, + ) + .unwrap(); + + // Both codes should be retrievable by their respective commitments + let code_from_v1_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v1_commitment) + .unwrap() + .expect("v1 code should exist"); + assert_eq!(code_from_v1_commitment, code_v1, "v1 commitment should return v1 code"); + + let code_from_v2_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v2_commitment) + .unwrap() + .expect("v2 code should exist"); + assert_eq!(code_from_v2_commitment, code_v2, "v2 commitment should return v2 code"); +} + +// GENESIS REGRESSION TESTS +// ================================================================================================ + +/// Verifies genesis block with account containing vault assets can be inserted. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_assets() { + use crate::genesis::GenesisState; + let component_code = "pub proc foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 1000).unwrap(); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with account containing storage maps can be inserted. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_storage_map() { + use miden_protocol::account::StorageMap; + + use crate::genesis::GenesisState; + + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_map(StorageSlotName::mock(0), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(1)), + ]; + + let component_code = "pub proc foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with account containing both vault assets and storage maps. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_assets_and_storage() { + use miden_protocol::account::StorageMap; + + use crate::genesis::GenesisState; + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + + let storage_map = StorageMap::with_entries(vec![( + Word::from([Felt::new(100), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]), + )]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_empty_value(StorageSlotName::mock(0)), + StorageSlot::with_map(StorageSlotName::mock(2), storage_map), + ]; + + let component_code = "pub proc foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with multiple accounts of different types. +/// Tests realistic genesis scenario with basic accounts, assets, and storage. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_multiple_accounts() { + use miden_protocol::account::StorageMap; + + use crate::genesis::GenesisState; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", "pub proc foo push.1 end") + .unwrap(); + let account_component1 = AccountComponent::new(account_component_code, Vec::new()) .unwrap() + .with_supports_all_types(); + + let account1 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component1) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 2000).unwrap(); + + let account_component_code = CodeBuilder::default() + .compile_component_code("bar::interface", "pub proc bar push.2 end") + .unwrap(); + let account_component2 = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); + + let account2 = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component2) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_map = StorageMap::with_entries(vec![( + Word::from([Felt::new(5), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(15), Felt::new(25), Felt::new(35), Felt::new(45)]), + )]) + .unwrap(); + + let component_storage = vec![StorageSlot::with_map(StorageSlotName::mock(0), storage_map)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("baz::interface", "pub proc baz push.3 end") + .unwrap(); + let account_component3 = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account3 = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component3) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = GenesisState::new( + vec![account1, account2, account3], + test_fee_params(), + 1, + 0, + SecretKey::random(), + ); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn regression_1461_full_state_delta_inserts_vault_assets() { + let mut conn = create_db(); + let block_num: BlockNumber = 1.into(); + create_block(&mut conn, block_num); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [fungible_asset.into()], + Some([42u8; 32]), + ); + let account_id = account.id(); + + // Convert to full state delta, same as genesis + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + assert!(account_delta.is_full_state()); + + let block_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(account_delta), + ); + + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + let (_, vault_assets) = queries::select_account_vault_assets( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + // Before the fix, vault_assets was empty + let vault_asset = vault_assets.first().unwrap(); + let expected_asset: Asset = fungible_asset.into(); + assert_eq!(vault_asset.block_num, block_num); + assert_eq!(vault_asset.asset, Some(expected_asset)); + assert_eq!(vault_asset.vault_key, expected_asset.vault_key()); +} + +// SERIALIZATION SYMMETRY TESTS +// ================================================================================================ +// +// These tests ensure that `to_bytes` and `from_bytes`/`read_from_bytes` are symmetric for all +// types used in database operations. This guarantees that data inserted into the database can +// always be correctly retrieved. + +#[test] +fn serialization_symmetry_core_types() { + // AccountId + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + let bytes = account_id.to_bytes(); + let restored = AccountId::read_from_bytes(&bytes).unwrap(); + assert_eq!(account_id, restored, "AccountId serialization must be symmetric"); + + // Word + let word = num_to_word(0x1234_5678_9ABC_DEF0); + let bytes = word.to_bytes(); + let restored = Word::read_from_bytes(&bytes).unwrap(); + assert_eq!(word, restored, "Word serialization must be symmetric"); + + // Nullifier + let nullifier = num_to_nullifier(0xDEAD_BEEF); + let bytes = nullifier.to_bytes(); + let restored = Nullifier::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifier, restored, "Nullifier serialization must be symmetric"); + + // TransactionId + let tx_id = TransactionId::new(num_to_word(1), num_to_word(2), num_to_word(3), num_to_word(4)); + let bytes = tx_id.to_bytes(); + let restored = TransactionId::read_from_bytes(&bytes).unwrap(); + assert_eq!(tx_id, restored, "TransactionId serialization must be symmetric"); + + // NoteId + let note_id = NoteId::new(num_to_word(1), num_to_word(2)); + let bytes = note_id.to_bytes(); + let restored = NoteId::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_id, restored, "NoteId serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_block_header() { + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + 3.into(), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + let bytes = block_header.to_bytes(); + let restored = BlockHeader::read_from_bytes(&bytes).unwrap(); + assert_eq!(block_header, restored, "BlockHeader serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_assets() { + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // FungibleAsset + let fungible = FungibleAsset::new(faucet_id, 1000).unwrap(); + let asset: Asset = fungible.into(); + let bytes = asset.to_bytes(); + let restored = Asset::read_from_bytes(&bytes).unwrap(); + assert_eq!(asset, restored, "Asset (fungible) serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_account_code() { + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + let code = account.code(); + let bytes = code.to_bytes(); + let restored = AccountCode::read_from_bytes(&bytes).unwrap(); + assert_eq!(*code, restored, "AccountCode serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_sparse_merkle_path() { + let path = SparseMerklePath::default(); + let bytes = path.to_bytes(); + let restored = SparseMerklePath::read_from_bytes(&bytes).unwrap(); + assert_eq!(path, restored, "SparseMerklePath serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_metadata() { + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type + // bits + let tag = NoteTag::with_account_target(sender); + let metadata = NoteMetadata::new(sender, NoteType::Public, tag); + + let bytes = metadata.to_bytes(); + let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); + assert_eq!(metadata, restored, "NoteMetadata serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_nullifier_vec() { + let nullifiers: Vec = (0..5).map(num_to_nullifier).collect(); + let bytes = nullifiers.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifiers, restored, "Vec serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_id_vec() { + let note_ids: Vec = + (0..5).map(|i| NoteId::new(num_to_word(i), num_to_word(i + 100))).collect(); + let bytes = note_ids.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_ids, restored, "Vec serialization must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_block_header() { + let mut conn = create_db(); + + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + BlockNumber::from(42), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + // Insert + queries::insert_block_header(&mut conn, &block_header).unwrap(); + + // Retrieve + let retrieved = + queries::select_block_header_by_block_num(&mut conn, Some(block_header.block_num())) + .unwrap() + .expect("Block header should exist"); + + assert_eq!(block_header, retrieved, "BlockHeader DB roundtrip must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_nullifiers() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let nullifiers: Vec = (0..5).map(|i| num_to_nullifier(i << 48)).collect(); + + // Insert + queries::insert_nullifiers_for_block(&mut conn, &nullifiers, block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_nullifiers(&mut conn).unwrap(); + + assert_eq!(nullifiers.len(), retrieved.len(), "Should retrieve same number of nullifiers"); + for (orig, info) in nullifiers.iter().zip(retrieved.iter()) { + assert_eq!(*orig, info.nullifier, "Nullifier DB roundtrip must be symmetric"); + assert_eq!(block_num, info.block_num, "Block number must match"); + } +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + Some([99u8; 32]), + ); + let account_id = account.id(); + let account_commitment = account.commitment(); + + // Insert with full delta (like genesis) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account_commitment, + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_accounts(&mut conn).unwrap(); + assert_eq!(retrieved.len(), 1, "Should have one account"); + + let retrieved_info = &retrieved[0]; + assert_eq!( + retrieved_info.summary.account_id, account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + retrieved_info.summary.account_commitment, account_commitment, + "Account commitment DB roundtrip must be symmetric" + ); + assert_eq!(retrieved_info.summary.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_notes() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(sender, 0)], block_num) + .unwrap(); + + let new_note = create_note(sender); + let note_index = BlockNoteIndex::new(0, 0).unwrap(); + + let note = NoteRecord { + block_num, + note_index, + note_id: new_note.id().as_word(), + note_commitment: new_note.commitment(), + metadata: new_note.metadata().clone(), + details: Some(NoteDetails::from(&new_note)), + inclusion_path: SparseMerklePath::default(), + }; + + // Insert + queries::insert_scripts(&mut conn, [¬e]).unwrap(); + queries::insert_notes(&mut conn, &[(note.clone(), None)]).unwrap(); + + // Retrieve + let note_ids = vec![NoteId::from_raw(note.note_id)]; + let retrieved = queries::select_notes_by_id(&mut conn, ¬e_ids).unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one note"); + let retrieved_note = &retrieved[0]; + + assert_eq!(note.note_id, retrieved_note.note_id, "NoteId DB roundtrip must be symmetric"); + assert_eq!( + note.note_commitment, retrieved_note.note_commitment, + "Note commitment DB roundtrip must be symmetric" + ); + assert_eq!( + note.metadata, retrieved_note.metadata, + "Metadata DB roundtrip must be symmetric" + ); + assert_eq!( + note.inclusion_path, retrieved_note.inclusion_path, + "Inclusion path DB roundtrip must be symmetric" + ); + assert_eq!( + note.details, retrieved_note.details, + "Note details DB roundtrip must be symmetric" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) + .unwrap(); + + let tx = mock_block_transaction(account_id, 1); + let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); + + // Insert + queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); + + // Retrieve + let retrieved = queries::select_transactions_by_accounts_and_block_range( + &mut conn, + &[account_id], + BlockNumber::from(0)..=BlockNumber::from(2), + ) + .unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one transaction"); + let retrieved_tx = &retrieved[0]; + + assert_eq!( + tx.account_id(), + retrieved_tx.account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + tx.id(), + retrieved_tx.transaction_id, + "TransactionId DB roundtrip must be symmetric" + ); + assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_vault_assets() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + // Create account first + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); + + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + let asset: Asset = fungible_asset.into(); + let vault_key = asset.vault_key(); + + // Insert vault asset + queries::insert_account_vault_asset(&mut conn, account_id, block_num, vault_key, Some(asset)) + .unwrap(); + + // Retrieve + let (_, vault_assets) = queries::select_account_vault_assets( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(vault_assets.len(), 1, "Should have one vault asset"); + let retrieved = &vault_assets[0]; + + assert_eq!(retrieved.asset, Some(asset), "Asset DB roundtrip must be symmetric"); + assert_eq!(retrieved.vault_key, vault_key, "VaultKey DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_storage_map_values() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(5); + let key = num_to_word(12345); + let value = num_to_word(67890); + + // Insert + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block_num, + slot_name.clone(), + key, + value, + ) + .unwrap(); + + // Retrieve + let page = queries::select_account_storage_map_values( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(page.values.len(), 1, "Should have one storage map value"); + let retrieved = &page.values[0]; + + assert_eq!(retrieved.slot_name, slot_name, "StorageSlotName DB roundtrip must be symmetric"); + assert_eq!(retrieved.key, key, "Key (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.value, value, "Value (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account_storage_with_maps() { + use miden_protocol::account::StorageMap; + + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + // Create storage with both value slots and map slots + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), num_to_word(42)), + StorageSlot::with_map(StorageSlotName::mock(1), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(2)), + ]; + + let component_code = "pub proc foo push.1 end"; + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([50u8; 32]) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let account_id = account.id(); + let original_storage = account.storage().clone(); + let original_commitment = original_storage.to_commitment(); + + // Insert the account (this should store header + map values separately) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve the storage using select_latest_account_storage (reconstructs from header + map + // values) + let retrieved_storage = queries::select_latest_account_storage(&mut conn, account_id).unwrap(); + let retrieved_commitment = retrieved_storage.to_commitment(); + + // Verify the commitment matches (this proves the reconstruction is correct) + assert_eq!( + original_commitment, retrieved_commitment, + "Storage commitment must match after DB roundtrip" + ); + + // Verify slot count matches + assert_eq!( + original_storage.slots().len(), + retrieved_storage.slots().len(), + "Number of slots must match" + ); + + // Verify each slot + for (original_slot, retrieved_slot) in + original_storage.slots().iter().zip(retrieved_storage.slots().iter()) + { + assert_eq!(original_slot.name(), retrieved_slot.name(), "Slot names must match"); + assert_eq!(original_slot.slot_type(), retrieved_slot.slot_type(), "Slot types must match"); + + match (original_slot.content(), retrieved_slot.content()) { + (StorageSlotContent::Value(orig), StorageSlotContent::Value(retr)) => { + assert_eq!(orig, retr, "Value slot contents must match"); + }, + (StorageSlotContent::Map(orig_map), StorageSlotContent::Map(retr_map)) => { + assert_eq!(orig_map.root(), retr_map.root(), "Map slot roots must match"); + for (key, value) in orig_map.entries() { + let retrieved_value = retr_map.get(key); + assert_eq!(*value, retrieved_value, "Map entry for key {:?} must match", key); + } + }, + // The slot_type assertion above guarantees matching variants, so this is unreachable + _ => unreachable!(), + } + } + + // Also verify full account reconstruction via select_account (which calls select_full_account) + let account_info = queries::select_account(&mut conn, account_id).unwrap(); + assert!(account_info.details.is_some(), "Public account should have details"); + let retrieved_account = account_info.details.unwrap(); + assert_eq!( + account.commitment(), + retrieved_account.commitment(), + "Full account commitment must match after DB roundtrip" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn test_note_metadata_with_attachment_roundtrip() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let (account_id, _) = + make_account_and_note(&mut conn, block_num, [1u8; 32], AccountStorageMode::Network); + + let target = NetworkAccountTarget::new(account_id, NoteExecutionHint::Always) + .expect("NetworkAccountTarget creation should succeed for network account"); + let attachment: NoteAttachment = target.into(); + + // Create NoteMetadata with the attachment + let metadata = + NoteMetadata::new(account_id, NoteType::Public, NoteTag::with_account_target(account_id)) + .with_attachment(attachment.clone()); + + let note = NoteRecord { + block_num, + note_index: BlockNoteIndex::new(0, 0).unwrap(), + note_id: num_to_word(1), + note_commitment: num_to_word(1), + metadata: metadata.clone(), + details: None, + inclusion_path: SparseMerklePath::default(), + }; + + queries::insert_scripts(&mut conn, [¬e]).unwrap(); + queries::insert_notes(&mut conn, &[(note.clone(), None)]).unwrap(); + + // Fetch the note back and verify the attachment is preserved + let retrieved = queries::select_notes_by_id(&mut conn, &[NoteId::from_raw(note.note_id)]) + .expect("select_notes_by_id should succeed"); + + assert_eq!(retrieved.len(), 1, "Should retrieve exactly one note"); + + let retrieved_metadata = &retrieved[0].metadata; + assert_eq!( + retrieved_metadata.attachment(), + metadata.attachment(), + "Attachment should be preserved after DB roundtrip" + ); + + let retrieved_target = NetworkAccountTarget::try_from(retrieved_metadata.attachment()) + .expect("Should be able to parse NetworkAccountTarget from retrieved attachment"); + assert_eq!( + retrieved_target.target_id(), + account_id, + "NetworkAccountTarget should have the correct target account ID" + ); } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 9a23e2f67d..df1f0fa653 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -6,13 +6,13 @@ use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_node_utils::limiter::QueryLimitError; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::crypto::merkle::MmrError; -use miden_objects::crypto::utils::DeserializationError; -use miden_objects::note::Nullifier; -use miden_objects::transaction::OutputNote; -use miden_objects::{ +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::MerkleError; +use miden_protocol::crypto::merkle::mmr::MmrError; +use miden_protocol::crypto::utils::DeserializationError; +use miden_protocol::errors::{ AccountDeltaError, AccountError, AccountTreeError, @@ -21,14 +21,17 @@ use miden_objects::{ FeeError, NoteError, NullifierTreeError, - Word, + StorageMapError, }; +use miden_protocol::note::{NoteId, Nullifier}; +use miden_protocol::transaction::OutputNote; use thiserror::Error; use tokio::sync::oneshot::error::RecvError; use tonic::Status; use crate::db::manager::ConnectionManagerError; use crate::db::models::conv::DatabaseTypeConversionError; +use crate::inner_forest::{InnerForestError, WitnessError}; // DATABASE ERRORS // ================================================================================================= @@ -56,11 +59,13 @@ pub enum DatabaseError { #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] - MerkleError(#[from] miden_objects::crypto::merkle::MerkleError), + MerkleError(#[from] MerkleError), #[error("network account error")] NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), + #[error("storage map error")] + StorageMapError(#[from] StorageMapError), #[error("setup deadpool connection pool failed")] Deadpool(#[from] deadpool::managed::PoolError), #[error("setup deadpool connection pool failed")] @@ -98,12 +103,18 @@ pub enum DatabaseError { AccountNotFoundInDb(AccountId), #[error("account {0} state at block height {1} not found")] AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), + #[error("block {0} not found in database")] + BlockNotFound(BlockNumber), + #[error("historical block {block_num} not available: {reason}")] + HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, + #[error("invalid storage slot type: {0}")] + InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), #[error("SQLite pool interaction failed: {0}")] @@ -115,12 +126,20 @@ pub enum DatabaseError { Remove all database files and try again." )] UnsupportedDatabaseVersion, + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), #[error(transparent)] ConnectionManager(#[from] ConnectionManagerError), #[error(transparent)] SqlValueConversion(#[from] DatabaseTypeConversionError), #[error("Not implemented: {0}")] NotImplemented(String), + #[error("storage root not found for account {account_id}, slot {slot_name}, block {block_num}")] + StorageRootNotFound { + account_id: AccountId, + slot_name: String, + block_num: BlockNumber, + }, } impl DatabaseError { @@ -169,6 +188,10 @@ impl From for Status { #[derive(Error, Debug)] pub enum StateInitializationError { + #[error("account tree IO error: {0}")] + AccountTreeIoError(String), + #[error("nullifier tree IO error: {0}")] + NullifierTreeIoError(String), #[error("database error")] DatabaseError(#[from] DatabaseError), #[error("failed to create nullifier tree")] @@ -181,6 +204,23 @@ pub enum StateInitializationError { BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] DatabaseLoadError(#[from] DatabaseSetupError), + #[error("inner forest error")] + InnerForestError(#[from] InnerForestError), + #[error( + "{tree_name} SMT root ({tree_root:?}) does not match expected root from block {block_num} \ + ({block_root:?}). Delete the tree storage directories and restart the node to rebuild \ + from the database." + )] + TreeStorageDiverged { + tree_name: &'static str, + block_num: BlockNumber, + tree_root: Word, + block_root: Word, + }, + #[error("public account {0} is missing details in database")] + PublicAccountMissingDetails(AccountId), + #[error("failed to convert account to delta: {0}")] + AccountToDeltaConversionFailed(String), } #[derive(Debug, Error)] @@ -242,6 +282,8 @@ pub enum InvalidBlockError { NewBlockNullifierAlreadySpent(#[source] NullifierTreeError), #[error("duplicate account ID prefix in new block")] NewBlockDuplicateAccountIdPrefix(#[source] AccountTreeError), + #[error("failed to build note tree: {0}")] + FailedToBuildNoteTree(String), } #[derive(Error, Debug)] @@ -256,6 +298,8 @@ pub enum ApplyBlockError { TokioJoinError(#[from] tokio::task::JoinError), #[error("invalid block error")] InvalidBlockError(#[from] InvalidBlockError), + #[error("inner forest error")] + InnerForestError(#[from] InnerForestError), // OTHER ERRORS // --------------------------------------------------------------------------------------------- @@ -323,8 +367,6 @@ pub enum NoteSyncError { MmrError(#[from] MmrError), #[error("invalid block range")] InvalidBlockRange(#[from] InvalidBlockRange), - #[error("too many note tags: received {0}, max {1}")] - TooManyNoteTags(usize, usize), #[error("malformed note tags")] DeserializationFailed(#[from] ConversionError), } @@ -410,6 +452,20 @@ pub enum SyncStorageMapsError { AccountNotPublic(AccountId), } +// GET NETWORK ACCOUNT IDS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetNetworkAccountIdsError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("invalid block range")] + InvalidBlockRange(#[from] InvalidBlockRange), + #[error("malformed nullifier prefix")] + DeserializationFailed(#[from] ConversionError), +} + // GET BLOCK BY NUMBER ERRORS // ================================================================================================ @@ -433,11 +489,9 @@ pub enum GetNotesByIdError { #[error("malformed note ID")] DeserializationFailed(#[from] ConversionError), #[error("note {0} not found")] - NoteNotFound(miden_objects::note::NoteId), - #[error("too many note IDs: received {0}, max {1}")] - TooManyNoteIds(usize, usize), + NoteNotFound(NoteId), #[error("note {0} is not public")] - NoteNotPublic(miden_objects::note::NoteId), + NoteNotPublic(NoteId), } // GET NOTE SCRIPT BY ROOT ERRORS @@ -464,8 +518,6 @@ pub enum CheckNullifiersError { DatabaseError(#[from] DatabaseError), #[error("malformed nullifier")] DeserializationFailed(#[from] ConversionError), - #[error("too many nullifiers: received {0}, maximum {1}")] - TooManyNullifiers(usize, usize), } // SYNC TRANSACTIONS ERRORS @@ -482,8 +534,40 @@ pub enum SyncTransactionsError { DeserializationFailed(#[from] ConversionError), #[error("account {0} not found")] AccountNotFound(AccountId), - #[error("too many account IDs: received {0}, max {1}")] - TooManyAccountIds(usize, usize), + #[error("failed to retrieve witness")] + WitnessError(#[from] WitnessError), +} + +#[derive(Debug, Error, GrpcError)] +pub enum GetWitnessesError { + #[error("malformed request")] + DeserializationFailed(#[from] ConversionError), + #[error("failed to retrieve witness")] + WitnessError(#[from] WitnessError), +} + +// SCHEMA VERIFICATION ERRORS +// ================================================================================================= + +/// Errors that can occur during schema verification. +#[derive(Debug, Error)] +pub enum SchemaVerificationError { + #[error("failed to create in-memory reference database")] + InMemoryDbCreation(#[source] diesel::ConnectionError), + #[error("failed to apply migrations to reference database")] + MigrationApplication(#[source] Box), + #[error("failed to extract schema from database")] + SchemaExtraction(#[source] diesel::result::Error), + #[error( + "schema mismatch: expected {expected_count} objects, found {actual_count} \ + ({missing_count} missing, {extra_count} unexpected)" + )] + Mismatch { + expected_count: usize, + actual_count: usize, + missing_count: usize, + extra_count: usize, + }, } // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index 313d390cd5..b39495c872 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,7 +1,14 @@ -use miden_lib::account::faucets::FungibleFaucetError; -use miden_lib::account::wallets::BasicWalletError; -use miden_objects::account::AccountId; -use miden_objects::{AccountError, AssetError, FeeError, TokenSymbolError}; +use miden_protocol::account::AccountId; +use miden_protocol::errors::{ + AccountDeltaError, + AccountError, + AssetError, + FeeError, + TokenSymbolError, +}; +use miden_protocol::utils::DeserializationError; +use miden_standards::account::faucets::FungibleFaucetError; +use miden_standards::account::wallets::BasicWalletError; use crate::genesis::config::TokenSymbolStr; @@ -15,7 +22,7 @@ pub enum GenesisConfigError { #[error("asset translation from config to state failed")] Asset(#[from] AssetError), #[error("adding assets to account failed")] - AccountDelta(#[from] miden_objects::AccountDeltaError), + AccountDelta(#[from] AccountDeltaError), #[error("the defined asset {symbol:?} has no corresponding faucet")] MissingFaucetDefinition { symbol: TokenSymbolStr }, #[error("account with id {account_id} was referenced but is not part of given genesis state")] @@ -54,4 +61,8 @@ pub enum GenesisConfigError { NativeAssetFaucetIsNotPublic(TokenSymbolStr), #[error("faucet account of {0} is not public")] NativeAssetFaucitIsNotAFungibleFaucet(TokenSymbolStr), + #[error("invalid secret key")] + InvalidSecretKey(#[from] DeserializationError), + #[error("provided signer config is not supported")] + UnsupportedSignerConfig, } diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 193d2f105a..8d1a83437b 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -4,19 +4,15 @@ use std::cmp::Ordering; use std::str::FromStr; use indexmap::IndexMap; -use miden_lib::AuthScheme; -use miden_lib::account::auth::AuthRpoFalcon512; -use miden_lib::account::faucets::BasicFungibleFaucet; -use miden_lib::account::wallets::create_basic_wallet; -use miden_lib::transaction::memory; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_objects::account::auth::AuthSecretKey; -use miden_objects::account::{ +use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::{ Account, AccountBuilder, AccountDelta, AccountFile, AccountId, + AccountStorage, AccountStorageDelta, AccountStorageMode, AccountType, @@ -24,10 +20,15 @@ use miden_objects::account::{ FungibleAssetDelta, NonFungibleAssetDelta, }; -use miden_objects::asset::{FungibleAsset, TokenSymbol}; -use miden_objects::block::FeeParameters; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; -use miden_objects::{Felt, FieldElement, ONE, TokenSymbolError, ZERO}; +use miden_protocol::asset::{FungibleAsset, TokenSymbol}; +use miden_protocol::block::FeeParameters; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey as RpoSecretKey; +use miden_protocol::errors::TokenSymbolError; +use miden_protocol::{Felt, FieldElement, ONE, ZERO}; +use miden_standards::AuthScheme; +use miden_standards::account::auth::AuthFalcon512Rpo; +use miden_standards::account::faucets::BasicFungibleFaucet; +use miden_standards::account::wallets::create_basic_wallet; use rand::distr::weighted::Weight; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -94,7 +95,10 @@ impl GenesisConfig { /// /// Also returns the set of secrets for the generated accounts. #[allow(clippy::too_many_lines)] - pub fn into_state(self) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { + pub fn into_state( + self, + signer: S, + ) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { let GenesisConfig { version, timestamp, @@ -102,6 +106,7 @@ impl GenesisConfig { fee_parameters, fungible_faucet: fungible_faucet_configs, wallet: wallet_configs, + .. } = self; let symbol = native_faucet.symbol.clone(); @@ -154,8 +159,8 @@ impl GenesisConfig { tracing::debug!("Adding wallet account {index} with {assets:?}"); let mut rng = ChaCha20Rng::from_seed(rand::random()); - let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }; + let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; let init_seed: [u8; 32] = rng.random(); let account_type = if has_updatable_code { @@ -215,9 +220,9 @@ impl GenesisConfig { if total_issuance != 0 { // slot 0 storage_delta.set_item( - memory::FAUCET_STORAGE_DATA_SLOT, + AccountStorage::faucet_sysdata_slot().clone(), [ZERO, ZERO, ZERO, Felt::new(total_issuance)].into(), - ); + )?; tracing::debug!( "Reducing faucet account {faucet} for {symbol} by {amount}", faucet = faucet_id.to_hex(), @@ -263,6 +268,7 @@ impl GenesisConfig { accounts: all_accounts, version, timestamp, + block_signer: signer, }, AccountSecrets { secrets }, )) @@ -332,7 +338,7 @@ pub struct FungibleFaucetConfig { impl FungibleFaucetConfig { /// Create a fungible faucet from a config entry - fn build_account(self) -> Result<(Account, SecretKey), GenesisConfigError> { + fn build_account(self) -> Result<(Account, RpoSecretKey), GenesisConfigError> { let FungibleFaucetConfig { symbol, decimals, @@ -340,8 +346,8 @@ impl FungibleFaucetConfig { storage_mode, } = self; let mut rng = ChaCha20Rng::from_seed(rand::random()); - let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); - let auth = AuthRpoFalcon512::new(secret_key.public_key().into()); + let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); + let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); let init_seed: [u8; 32] = rng.random(); let max_supply = Felt::try_from(max_supply) @@ -426,7 +432,7 @@ pub struct AccountFileWithName { #[derive(Debug, Clone)] pub struct AccountSecrets { // name, account, private key, account seed - pub secrets: Vec<(String, AccountId, SecretKey)>, + pub secrets: Vec<(String, AccountId, RpoSecretKey)>, } impl AccountSecrets { @@ -434,10 +440,10 @@ impl AccountSecrets { /// /// If no name is present, a new one is generated based on the current time /// and the index in - pub fn as_account_files( + pub fn as_account_files( &self, - genesis_state: &GenesisState, - ) -> impl Iterator> + use<'_> { + genesis_state: &GenesisState, + ) -> impl Iterator> + use<'_, S> { let account_lut = IndexMap::::from_iter( genesis_state.accounts.iter().map(|account| (account.id(), account.clone())), ); @@ -446,7 +452,7 @@ impl AccountSecrets { .get(&account_id) .ok_or(GenesisConfigError::MissingGenesisAccount { account_id })?; let account_file = - AccountFile::new(account.clone(), vec![AuthSecretKey::RpoFalcon512(secret_key)]); + AccountFile::new(account.clone(), vec![AuthSecretKey::Falcon512Rpo(secret_key)]); Ok(AccountFileWithName { name, account_file }) }) } diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 2f7fd15e45..23e2daa43c 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; -use miden_lib::transaction::memory; -use miden_objects::ONE; +use miden_protocol::ONE; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use super::*; @@ -11,7 +11,7 @@ type TestResult = Result<(), Box>; fn parsing_yields_expected_default_values() -> TestResult { let s = include_str!("./samples/01-simple.toml"); let gcfg = GenesisConfig::read_toml(s)?; - let (state, _secrets) = gcfg.into_state()?; + let (state, _secrets) = gcfg.into_state(SecretKey::new())?; let _ = state; // faucets always precede wallet accounts let native_faucet = state.accounts[0].clone(); @@ -45,7 +45,7 @@ fn parsing_yields_expected_default_values() -> TestResult { // check total issuance of the faucet assert_eq!( - native_faucet.storage().get_item(memory::FAUCET_STORAGE_DATA_SLOT).unwrap()[3], + native_faucet.storage().get_item(AccountStorage::faucet_sysdata_slot()).unwrap()[3], Felt::new(999_777), "Issuance mismatch" ); @@ -57,7 +57,7 @@ fn parsing_yields_expected_default_values() -> TestResult { #[miden_node_test_macro::enable_logging] fn genesis_accounts_have_nonce_one() -> TestResult { let gcfg = GenesisConfig::default(); - let (state, secrets) = gcfg.into_state().unwrap(); + let (state, secrets) = gcfg.into_state(SecretKey::new()).unwrap(); let mut iter = secrets.as_account_files(&state); let AccountFileWithName { account_file: status_quo, .. } = iter.next().unwrap().unwrap(); assert!(iter.next().is_none()); diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index cad4d51c9d..5df1825d66 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -1,20 +1,22 @@ -use miden_lib::transaction::TransactionKernel; -use miden_objects::Word; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{Account, AccountDelta}; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::block::{ +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta}; +use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; +use miden_protocol::block::{ BlockAccountUpdate, + BlockBody, BlockHeader, BlockNoteTree, BlockNumber, + BlockProof, + BlockSigner, FeeParameters, ProvenBlock, }; -use miden_objects::crypto::merkle::{Forest, LargeSmt, MemoryStorage, MmrPeaks, Smt}; -use miden_objects::note::Nullifier; -use miden_objects::transaction::OrderedTransactionHeaders; -use miden_objects::utils::serde::{ByteReader, Deserializable, DeserializationError}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks}; +use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage, Smt}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionKernel}; use crate::errors::GenesisError; @@ -25,11 +27,12 @@ pub mod config; /// Represents the state at genesis, which will be used to derive the genesis block. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct GenesisState { +pub struct GenesisState { pub accounts: Vec, pub fee_parameters: FeeParameters, pub version: u32, pub timestamp: u32, + pub block_signer: S, } /// A type-safety wrapper ensuring that genesis block data can only be created from @@ -46,21 +49,25 @@ impl GenesisBlock { } } -impl GenesisState { +impl GenesisState { pub fn new( accounts: Vec, fee_parameters: FeeParameters, version: u32, timestamp: u32, + signer: S, ) -> Self { Self { accounts, fee_parameters, version, timestamp, + block_signer: signer, } } +} +impl GenesisState { /// Returns the block header and the account SMT pub fn into_block(self) -> Result { let accounts: Vec = self @@ -113,36 +120,24 @@ impl GenesisState { empty_block_note_tree.root(), Word::empty(), TransactionKernel.to_commitment(), - Word::empty(), + self.block_signer.public_key(), self.fee_parameters, self.timestamp, ); - // SAFETY: Header and accounts should be valid by construction. - // No notes or nullifiers are created at genesis, which is consistent with the above empty - // block note tree root and empty nullifier tree root. - Ok(GenesisBlock(ProvenBlock::new_unchecked( - header, + let body = BlockBody::new_unchecked( accounts, empty_output_notes, empty_nullifiers, empty_transactions, - ))) - } -} - -// SERIALIZATION -// ================================================================================================ - -impl Deserializable for GenesisState { - fn read_from(source: &mut R) -> Result { - let num_accounts = source.read_usize()?; - let accounts = source.read_many::(num_accounts)?; + ); - let version = source.read_u32()?; - let timestamp = source.read_u32()?; - let fee_parameters = source.read::()?; + let block_proof = BlockProof::new_dummy(); - Ok(Self::new(accounts, fee_parameters, version, timestamp)) + let signature = self.block_signer.sign(&header); + // SAFETY: Header and accounts should be valid by construction. + // No notes or nullifiers are created at genesis, which is consistent with the above empty + // block note tree root and empty nullifier tree root. + Ok(GenesisBlock(ProvenBlock::new_unchecked(header, body, signature, block_proof))) } } diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs new file mode 100644 index 0000000000..0a4bd00d62 --- /dev/null +++ b/crates/store/src/inner_forest/mod.rs @@ -0,0 +1,502 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; +use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; +use miden_protocol::account::{ + AccountId, + NonFungibleDeltaAction, + StorageMap, + StorageMapWitness, + StorageSlotName, +}; +use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; +use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; +use miden_protocol::errors::{AssetError, StorageMapError}; +use miden_protocol::{EMPTY_WORD, Word}; +use thiserror::Error; + +#[cfg(test)] +mod tests; + +// ERRORS +// ================================================================================================ + +#[derive(Debug, Error)] +pub enum InnerForestError { + #[error( + "balance underflow: account {account_id}, faucet {faucet_id}, \ + previous balance {prev_balance}, delta {delta}" + )] + BalanceUnderflow { + account_id: AccountId, + faucet_id: AccountId, + prev_balance: u64, + delta: i64, + }, +} + +#[derive(Debug, Error)] +pub enum WitnessError { + #[error("root not found")] + RootNotFound, + #[error("merkle error")] + MerkleError(#[from] MerkleError), + #[error("storage map error")] + StorageMapError(#[from] StorageMapError), + #[error("failed to construct asset")] + AssetError(#[from] AssetError), +} + +// INNER FOREST +// ================================================================================================ + +/// Container for forest-related state that needs to be updated atomically. +pub(crate) struct InnerForest { + /// `SmtForest` for efficient account storage reconstruction. + /// Populated during block import with storage and vault SMTs. + forest: SmtForest, + + /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. + /// Populated during block import for all storage map slots. + storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, + + /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. + /// Accumulated from deltas - each block's entries include all entries up to that point. + storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + + /// Maps (`account_id`, `block_num`) to vault SMT root. + /// Tracks asset vault versions across all blocks with structural sharing. + vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, +} + +impl InnerForest { + pub(crate) fn new() -> Self { + Self { + forest: SmtForest::new(), + storage_map_roots: BTreeMap::new(), + storage_entries: BTreeMap::new(), + vault_roots: BTreeMap::new(), + } + } + + // HELPERS + // -------------------------------------------------------------------------------------------- + + /// Returns the root of an empty SMT. + const fn empty_smt_root() -> Word { + *EmptySubtreeRoots::entry(SMT_DEPTH, 0) + } + + /// Retrieves the most recent vault SMT root for an account. + /// + /// Returns the latest vault root entry regardless of block number. + /// Used when applying incremental deltas where we always want the previous state. + /// + /// If no vault root is found for the account, returns an empty SMT root. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, returns an empty SMT root (for new accounts or DB + /// reconstruction where delta values are absolute). If `false`, looks up the previous state + /// (for incremental updates where delta values are relative changes). + fn get_latest_vault_root(&self, account_id: AccountId, is_full_state: bool) -> Word { + if is_full_state { + return Self::empty_smt_root(); + } + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Retrieves the most recent storage map SMT root for an account slot. + /// + /// Returns the latest storage root entry regardless of block number. + /// Used when applying incremental deltas where we always want the previous state. + /// + /// If no storage root is found for the slot, returns an empty SMT root. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, returns an empty SMT root (for new accounts or DB + /// reconstruction where delta values are absolute). If `false`, looks up the previous state + /// (for incremental updates where delta values are relative changes). + fn get_latest_storage_map_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + is_full_state: bool, + ) -> Word { + if is_full_state { + return Self::empty_smt_root(); + } + + self.storage_map_roots + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Retrieves a vault root for the specified account at or before the specified block. + pub(crate) fn get_vault_root( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Option { + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + } + + /// Retrieves the storage map root for an account slot at or before the specified block. + pub(crate) fn get_storage_map_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> Option { + self.storage_map_roots + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, root)| *root) + } + + /// Retrieves a storage map witness for the specified account and storage slot. + /// + /// Finds the most recent witness at or before the specified block number. + /// + /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to + /// get the actual key into the storage map. + pub(crate) fn get_storage_map_witness( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + raw_key: Word, + ) -> Result { + let key = StorageMap::hash_key(raw_key); + let root = self + .get_storage_map_root(account_id, slot_name, block_num) + .ok_or(WitnessError::RootNotFound)?; + let proof = self.forest.open(root, key)?; + + Ok(StorageMapWitness::new(proof, vec![raw_key])?) + } + + /// Retrieves a vault asset witnesses for the specified account and asset keys at the specified + /// block number. + pub fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + block_num: BlockNumber, + asset_keys: BTreeSet, + ) -> Result, WitnessError> { + let root = self.get_vault_root(account_id, block_num).ok_or(WitnessError::RootNotFound)?; + let witnessees = asset_keys + .into_iter() + .map(|key| { + let proof = self.forest.open(root, key.into())?; + let asset = AssetWitness::new(proof)?; + Ok(asset) + }) + .collect::, WitnessError>>()?; + Ok(witnessees) + } + + /// Opens a storage map and returns storage map details with SMT proofs for the given keys. + /// + /// Returns `None` if no storage root is tracked for this account/slot/block combination. + /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. + pub(crate) fn open_storage_map( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + raw_keys: &[Word], + ) -> Option> { + let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; + + // Collect SMT proofs for each key + let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { + let key = StorageMap::hash_key(*raw_key); + self.forest.open(root, key) + })); + + Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) + } + + /// Returns all key-value entries for a specific account storage slot at or before a block. + /// + /// Uses range query semantics: finds the most recent entries at or before `block_num`. + /// Returns `None` if no entries exist for this account/slot up to the given block. + /// Returns `LimitExceeded` if there are too many entries to return. + pub(crate) fn storage_map_entries( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + ) -> Option { + // Find the most recent entries at or before block_num + let entries = self + .storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, entries)| entries)?; + + if entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Some(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::LimitExceeded, + }); + } + let entries = Vec::from_iter(entries.iter().map(|(k, v)| (*k, *v))); + + Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) + } + + // PUBLIC INTERFACE + // -------------------------------------------------------------------------------------------- + + /// Applies account updates from a block to the forest. + /// + /// Iterates through account updates and applies each delta to the forest. + /// Private accounts should be filtered out before calling this method. + /// + /// # Arguments + /// + /// * `block_num` - Block number for which these updates apply + /// * `account_updates` - Iterator of `AccountDelta` for public accounts + /// + /// # Errors + /// + /// Returns an error if applying a vault delta results in a negative balance. + pub(crate) fn apply_block_updates( + &mut self, + block_num: BlockNumber, + account_updates: impl IntoIterator, + ) -> Result<(), InnerForestError> { + for delta in account_updates { + self.update_account(block_num, &delta)?; + + tracing::debug!( + target: crate::COMPONENT, + account_id = %delta.id(), + %block_num, + is_full_state = delta.is_full_state(), + "Updated forest with account delta" + ); + } + Ok(()) + } + + /// Updates the forest with account vault and storage changes from a delta. + /// + /// Unified interface for updating all account state in the forest, handling both full-state + /// deltas (new accounts or reconstruction from DB) and partial deltas (incremental updates + /// during block application). + /// + /// Full-state deltas (`delta.is_full_state() == true`) populate the forest from scratch using + /// an empty SMT root. Partial deltas apply changes on top of the previous block's state. + /// + /// # Errors + /// + /// Returns an error if applying a vault delta results in a negative balance. + pub(crate) fn update_account( + &mut self, + block_num: BlockNumber, + delta: &AccountDelta, + ) -> Result<(), InnerForestError> { + let account_id = delta.id(); + let is_full_state = delta.is_full_state(); + + if !delta.vault().is_empty() { + self.update_account_vault(block_num, account_id, delta.vault(), is_full_state)?; + } + + if !delta.storage().is_empty() { + self.update_account_storage(block_num, account_id, delta.storage(), is_full_state); + } + Ok(()) + } + + // PRIVATE METHODS + // -------------------------------------------------------------------------------------------- + + /// Updates the forest with vault changes from a delta. + /// + /// Processes both fungible and non-fungible asset changes, building entries for the vault SMT + /// and tracking the new root. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + /// + /// # Errors + /// + /// Returns an error if applying a delta results in a negative balance. + fn update_account_vault( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + vault_delta: &AccountVaultDelta, + is_full_state: bool, + ) -> Result<(), InnerForestError> { + let prev_root = self.get_latest_vault_root(account_id, is_full_state); + + let mut entries = Vec::new(); + + // Process fungible assets + for (faucet_id, amount_delta) in vault_delta.fungible().iter() { + let key: Word = + FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); + + let new_amount = if is_full_state { + // For full-state deltas, amount is the absolute value + (*amount_delta).try_into().expect("full-state amount should be non-negative") + } else { + // For partial deltas, amount is a change that must be applied to previous balance. + // + // TODO: SmtForest only exposes `fn open()` which computes a full Merkle + // proof. We only need the leaf, so a direct `fn get()` method would be faster. + let prev_amount = self + .forest + .open(prev_root, key) + .ok() + .and_then(|proof| proof.get(&key)) + .and_then(|word| FungibleAsset::try_from(word).ok()) + .map_or(0, |asset| asset.amount()); + + let new_balance = i128::from(prev_amount) + i128::from(*amount_delta); + u64::try_from(new_balance).map_err(|_| InnerForestError::BalanceUnderflow { + account_id, + faucet_id: *faucet_id, + prev_balance: prev_amount, + delta: *amount_delta, + })? + }; + + let value = if new_amount == 0 { + EMPTY_WORD + } else { + let asset: Asset = FungibleAsset::new(*faucet_id, new_amount) + .expect("valid fungible asset") + .into(); + Word::from(asset) + }; + entries.push((key, value)); + } + + // Process non-fungible assets + for (asset, action) in vault_delta.non_fungible().iter() { + let value = match action { + NonFungibleDeltaAction::Add => Word::from(Asset::NonFungible(*asset)), + NonFungibleDeltaAction::Remove => EMPTY_WORD, + }; + entries.push((asset.vault_key().into(), value)); + } + + if entries.is_empty() { + return Ok(()); + } + + let updated_root = self + .forest + .batch_insert(prev_root, entries.iter().copied()) + .expect("forest insertion should succeed"); + + self.vault_roots.insert((account_id, block_num), updated_root); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + vault_entries = entries.len(), + "Updated vault in forest" + ); + Ok(()) + } + + /// Updates the forest with storage map changes from a delta. + /// + /// Processes storage map slot deltas, building SMTs for each modified slot + /// and tracking the new roots and accumulated entries. + /// + /// # Arguments + /// + /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). + /// If `false`, delta values are relative changes applied to previous state. + fn update_account_storage( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + storage_delta: &AccountStorageDelta, + is_full_state: bool, + ) { + for (slot_name, map_delta) in storage_delta.maps() { + let prev_root = self.get_latest_storage_map_root(account_id, slot_name, is_full_state); + + let delta_entries: Vec<_> = + map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); + + if delta_entries.is_empty() { + continue; + } + + let updated_root = self + .forest + .batch_insert(prev_root, delta_entries.iter().copied()) + .expect("forest insertion should succeed"); + + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), updated_root); + + // Accumulate entries: start from previous block's entries or empty for full state + let mut accumulated_entries = if is_full_state { + BTreeMap::new() + } else { + self.storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, entries)| entries.clone()) + .unwrap_or_default() + }; + + // Apply delta entries (insert or remove if value is EMPTY_WORD) + for (key, value) in &delta_entries { + if *value == EMPTY_WORD { + accumulated_entries.remove(key); + } else { + accumulated_entries.insert(*key, *value); + } + } + + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), accumulated_entries); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + ?slot_name, + delta_entries = delta_entries.len(), + "Updated storage map in forest" + ); + } + } +} diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs new file mode 100644 index 0000000000..216ef42061 --- /dev/null +++ b/crates/store/src/inner_forest/tests.rs @@ -0,0 +1,430 @@ +use miden_protocol::account::AccountCode; +use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, +}; +use miden_protocol::{Felt, FieldElement}; + +use super::*; + +fn dummy_account() -> AccountId { + AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap() +} + +fn dummy_faucet() -> AccountId { + AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap() +} + +fn dummy_fungible_asset(faucet_id: AccountId, amount: u64) -> Asset { + FungibleAsset::new(faucet_id, amount).unwrap().into() +} + +/// Creates a partial `AccountDelta` (without code) for testing incremental updates. +fn dummy_partial_delta( + account_id: AccountId, + vault_delta: AccountVaultDelta, + storage_delta: AccountStorageDelta, +) -> AccountDelta { + // For partial deltas, nonce_delta must be > 0 if there are changes + let nonce_delta = if vault_delta.is_empty() && storage_delta.is_empty() { + Felt::ZERO + } else { + Felt::ONE + }; + AccountDelta::new(account_id, storage_delta, vault_delta, nonce_delta).unwrap() +} + +/// Creates a full-state `AccountDelta` (with code) for testing DB reconstruction. +fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDelta { + use miden_protocol::account::{Account, AccountStorage}; + + // Create a minimal account with the given assets + let vault = AssetVault::new(assets).unwrap(); + let storage = AccountStorage::new(vec![]).unwrap(); + let code = AccountCode::mock(); + let nonce = Felt::ONE; + + let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); + + // Convert to delta - this will be a full-state delta because it has code + AccountDelta::try_from(account).unwrap() +} + +#[test] +fn test_empty_smt_root_is_recognized() { + use miden_protocol::crypto::merkle::smt::Smt; + + let empty_root = InnerForest::empty_smt_root(); + + // Verify an empty SMT has the expected root + assert_eq!(Smt::default().root(), empty_root); + + // Test that SmtForest accepts this root in batch_insert + let mut forest = SmtForest::new(); + let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; + + assert!(forest.batch_insert(empty_root, entries).is_ok()); +} + +#[test] +fn test_inner_forest_basic_initialization() { + let forest = InnerForest::new(); + assert!(forest.storage_map_roots.is_empty()); + assert!(forest.vault_roots.is_empty()); +} + +#[test] +fn test_update_account_with_empty_deltas() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let block_num = BlockNumber::GENESIS.child(); + + let delta = dummy_partial_delta( + account_id, + AccountVaultDelta::default(), + AccountStorageDelta::default(), + ); + + forest.update_account(block_num, &delta).unwrap(); + + // Empty deltas should not create entries + assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); + assert!(forest.storage_map_roots.is_empty()); +} + +#[test] +fn test_update_vault_with_fungible_asset() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + + let asset = dummy_fungible_asset(faucet_id, 100); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset).unwrap(); + + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + + let vault_root = forest.vault_roots[&(account_id, block_num)]; + assert_ne!(vault_root, EMPTY_WORD); +} + +#[test] +fn test_compare_partial_vs_full_state_delta_vault() { + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + let asset = dummy_fungible_asset(faucet_id, 100); + + // Approach 1: Partial delta (simulates block application) + let mut forest_partial = InnerForest::new(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset).unwrap(); + let partial_delta = + dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest_partial.update_account(block_num, &partial_delta).unwrap(); + + // Approach 2: Full-state delta (simulates DB reconstruction) + let mut forest_full = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[asset]); + forest_full.update_account(block_num, &full_delta).unwrap(); + + // Both approaches must produce identical vault roots + let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); + let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); + + assert_eq!(root_partial, root_full); + assert_ne!(*root_partial, EMPTY_WORD); +} + +#[test] +fn test_incremental_vault_updates() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: 100 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_1 = forest.vault_roots[&(account_id, block_1)]; + + // Block 2: 150 tokens (update) + let block_2 = block_1.child(); + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); + let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2).unwrap(); + let root_2 = forest.vault_roots[&(account_id, block_2)]; + + assert_ne!(root_1, root_2); +} + +#[test] +fn test_full_state_delta_starts_from_empty_root() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + + // Simulate a pre-existing vault state that should be ignored for full-state deltas + let mut vault_delta_pre = AccountVaultDelta::default(); + vault_delta_pre.add_asset(dummy_fungible_asset(faucet_id, 999)).unwrap(); + let delta_pre = + dummy_partial_delta(account_id, vault_delta_pre, AccountStorageDelta::default()); + forest.update_account(block_num, &delta_pre).unwrap(); + assert!(forest.vault_roots.contains_key(&(account_id, block_num))); + + // Now create a full-state delta at the same block + // A full-state delta should start from an empty root, not from the previous state + let asset = dummy_fungible_asset(faucet_id, 100); + let full_delta = dummy_full_state_delta(account_id, &[asset]); + + // Create a fresh forest to compare + let mut fresh_forest = InnerForest::new(); + fresh_forest.update_account(block_num, &full_delta).unwrap(); + let fresh_root = fresh_forest.vault_roots[&(account_id, block_num)]; + + // Update the original forest with the full-state delta + forest.update_account(block_num, &full_delta).unwrap(); + let updated_root = forest.vault_roots[&(account_id, block_num)]; + + // The full-state delta should produce the same root regardless of prior state + assert_eq!(updated_root, fresh_root); +} + +#[test] +fn test_vault_state_persists_across_blocks_without_changes() { + // Regression test for issue #7: vault state should persist across blocks + // where no changes occur, not reset to empty. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Helper to query vault root at or before a block (range query) + let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { + forest + .vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + }; + + // Block 1: Add 100 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; + + // Blocks 2-5: No changes to this account (simulated by not calling update_account) + // This means no entries are added to vault_roots for these blocks. + + // Block 6: Add 50 more tokens + // The previous root lookup should find block_1's root, not return empty. + let block_6 = BlockNumber::from(6); + let mut vault_delta_6 = AccountVaultDelta::default(); + vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); // 100 + 50 = 150 + let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); + forest.update_account(block_6, &delta_6).unwrap(); + + // The root at block 6 should be different from block 1 (we added more tokens) + let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; + assert_ne!(root_after_block_1, root_after_block_6); + + // Verify range query finds the correct previous root for intermediate blocks + // Block 3 should return block 1's root (most recent before block 3) + let root_at_block_3 = get_vault_root(&forest, account_id, BlockNumber::from(3)); + assert_eq!(root_at_block_3, Some(root_after_block_1)); + + // Block 5 should also return block 1's root + let root_at_block_5 = get_vault_root(&forest, account_id, BlockNumber::from(5)); + assert_eq!(root_at_block_5, Some(root_after_block_1)); + + // Block 6 should return block 6's root + let root_at_block_6 = get_vault_root(&forest, account_id, block_6); + assert_eq!(root_at_block_6, Some(root_after_block_6)); +} + +#[test] +fn test_partial_delta_applies_fungible_changes_correctly() { + // Regression test for issue #8: partial deltas should apply changes to previous balance, + // not treat amounts as absolute values. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: Add 100 tokens (partial delta with +100) + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_100 = forest.vault_roots[&(account_id, block_1)]; + + // Block 2: Add 50 more tokens (partial delta with +50) + // Result should be 150 tokens, not 50 tokens + let block_2 = block_1.child(); + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); + let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2).unwrap(); + let root_after_150 = forest.vault_roots[&(account_id, block_2)]; + + // Roots should be different (100 tokens vs 150 tokens) + assert_ne!(root_after_100, root_after_150); + + // Block 3: Remove 30 tokens (partial delta with -30) + // Result should be 120 tokens + let block_3 = block_2.child(); + let mut vault_delta_3 = AccountVaultDelta::default(); + vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); + let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); + forest.update_account(block_3, &delta_3).unwrap(); + let root_after_120 = forest.vault_roots[&(account_id, block_3)]; + + // Root should change again + assert_ne!(root_after_150, root_after_120); + + // Verify by creating a fresh forest with a full-state delta of 120 tokens + // The roots should match + let mut fresh_forest = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); + fresh_forest.update_account(block_3, &full_delta).unwrap(); + let root_full_state_120 = fresh_forest.vault_roots[&(account_id, block_3)]; + + assert_eq!(root_after_120, root_full_state_120); +} + +#[test] +fn test_partial_delta_across_long_block_range() { + // Validation test: partial deltas should work across 101+ blocks. + // + // This test passes now because InnerForest keeps all history. Once pruning is implemented + // (estimated ~50 blocks), this test will fail unless DB fallback is also implemented. + // When that happens, the test should be updated to use DB fallback or converted to an + // integration test that has DB access. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: Add 1000 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_1000 = forest.vault_roots[&(account_id, block_1)]; + + // Blocks 2-100: No changes to this account (simulating long gap) + + // Block 101: Add 500 more tokens (partial delta with +500) + // This requires looking up block 1's state across a 100-block gap. + let block_101 = BlockNumber::from(101); + let mut vault_delta_101 = AccountVaultDelta::default(); + vault_delta_101.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); + let delta_101 = + dummy_partial_delta(account_id, vault_delta_101, AccountStorageDelta::default()); + forest.update_account(block_101, &delta_101).unwrap(); + let root_after_1500 = forest.vault_roots[&(account_id, block_101)]; + + // Roots should be different (1000 tokens vs 1500 tokens) + assert_ne!(root_after_1000, root_after_1500); + + // Verify the final state matches a fresh forest with 1500 tokens + let mut fresh_forest = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 1500)]); + fresh_forest.update_account(block_101, &full_delta).unwrap(); + let root_full_state_1500 = fresh_forest.vault_roots[&(account_id, block_101)]; + + assert_eq!(root_after_1500, root_full_state_1500); +} + +#[test] +fn test_update_storage_map() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let block_num = BlockNumber::GENESIS.child(); + + let slot_name = StorageSlotName::mock(3); + let key = Word::from([1u32, 2, 3, 4]); + let value = Word::from([5u32, 6, 7, 8]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + // Verify storage root was created + assert!( + forest + .storage_map_roots + .contains_key(&(account_id, slot_name.clone(), block_num)) + ); + let storage_root = forest.storage_map_roots[&(account_id, slot_name, block_num)]; + assert_ne!(storage_root, InnerForest::empty_smt_root()); +} + +#[test] +fn test_storage_map_incremental_updates() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + + let slot_name = StorageSlotName::mock(3); + let key1 = Word::from([1u32, 0, 0, 0]); + let key2 = Word::from([2u32, 0, 0, 0]); + let value1 = Word::from([10u32, 0, 0, 0]); + let value2 = Word::from([20u32, 0, 0, 0]); + let value3 = Word::from([30u32, 0, 0, 0]); + + // Block 1: Insert key1 -> value1 + let block_1 = BlockNumber::GENESIS.child(); + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key1, value1); + let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; + + // Block 2: Insert key2 -> value2 (key1 should persist) + let block_2 = block_1.child(); + let mut map_delta_2 = StorageMapDelta::default(); + map_delta_2.insert(key2, value2); + let raw_2 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_2))]); + let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); + forest.update_account(block_2, &delta_2).unwrap(); + let root_2 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_2)]; + + // Block 3: Update key1 -> value3 + let block_3 = block_2.child(); + let mut map_delta_3 = StorageMapDelta::default(); + map_delta_3.insert(key1, value3); + let raw_3 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_3))]); + let storage_delta_3 = AccountStorageDelta::from_raw(raw_3); + let delta_3 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_3); + forest.update_account(block_3, &delta_3).unwrap(); + let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; + + // All roots should be different + assert_ne!(root_1, root_2); + assert_ne!(root_2, root_3); + assert_ne!(root_1, root_3); +} diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index d50f124f73..1d345dcf01 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -1,25 +1,18 @@ -use std::time::Duration; - mod accounts; mod blocks; mod db; mod errors; pub mod genesis; +mod inner_forest; mod server; pub mod state; -pub use accounts::{ - AccountTreeStorage, - AccountTreeWithHistory, - HistoricalError, - InMemoryAccountTree, -}; +#[cfg(feature = "rocksdb")] +pub use accounts::PersistentAccountTree; +pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; pub use server::{DataDirectory, Store}; // CONSTANTS // ================================================================================================= const COMPONENT: &str = "miden-store"; - -/// How often to run the database maintenance routine. -const DATABASE_MAINTENANCE_INTERVAL: Duration = Duration::from_secs(24 * 60 * 60); diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 69c29e6120..292842e778 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; use tonic::{Request, Response, Status}; -use tracing::instrument; +use tracing::{info, instrument}; use crate::COMPONENT; use crate::state::State; @@ -26,8 +26,9 @@ impl StoreApi { /// Shared implementation for all `get_block_header_by_number` endpoints. pub async fn get_block_header_by_number_inner( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { + info!(target: COMPONENT, ?request); let request = request.into_inner(); let block_num = request.block_num.map(BlockNumber::from); @@ -36,7 +37,7 @@ impl StoreApi { .get_block_header(block_num, request.include_mmr_proof.unwrap_or(false)) .await?; - Ok(Response::new(proto::shared::BlockHeaderByNumberResponse { + Ok(Response::new(proto::rpc::BlockHeaderByNumberResponse { block_header: block_header.map(Into::into), chain_length: mmr_proof.as_ref().map(|p| p.forest.num_leaves() as u32), mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), @@ -64,9 +65,9 @@ pub fn conversion_error_to_status(value: &ConversionError) -> Status { /// Reads a block range from a request, returning a specific error type if the field is missing pub fn read_block_range( - block_range: Option, + block_range: Option, entity: &'static str, -) -> Result +) -> Result where E: From, { @@ -128,7 +129,7 @@ where id.ok_or_else(|| { ConversionError::deserialization_error( "AccountId", - miden_objects::crypto::utils::DeserializationError::InvalidValue( + miden_protocol::crypto::utils::DeserializationError::InvalidValue( "Missing account ID".to_string(), ), ) @@ -163,5 +164,5 @@ pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result< #[instrument(level = "debug",target = COMPONENT, skip_all)] pub fn read_block_numbers(block_numbers: &[u32]) -> BTreeSet { - block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number)).collect() + BTreeSet::from_iter(block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number))) } diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 9006106eae..9dd2b39c4d 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,17 +1,16 @@ use std::convert::Infallible; -use miden_node_proto::generated::block_producer_store::block_producer_server; +use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::Word; -use miden_objects::block::{BlockNumber, ProvenBlock}; -use miden_objects::utils::Deserializable; +use miden_protocol::Word; +use miden_protocol::block::{BlockNumber, ProvenBlock}; +use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; -use tracing::{Instrument, instrument}; +use tracing::Instrument; -use crate::COMPONENT; use crate::errors::ApplyBlockError; use crate::server::api::{ StoreApi, @@ -31,28 +30,14 @@ impl block_producer_server::BlockProducer for StoreApi { /// Returns block header for the specified block number. /// /// If the block number is not provided, block header for the latest block is returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_block_header_by_number", - skip_all, - err - )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } /// Updates the local DB by inserting a new block header and the related data. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.apply_block", - skip_all, - err - )] async fn apply_block( &self, request: Request, @@ -65,10 +50,10 @@ impl block_producer_server::BlockProducer for StoreApi { let span = tracing::Span::current(); span.set_attribute("block.number", block.header().block_num()); - span.set_attribute("block.commitment", block.commitment()); - span.set_attribute("block.accounts.count", block.updated_accounts().len()); - span.set_attribute("block.output_notes.count", block.output_notes().count()); - span.set_attribute("block.nullifiers.count", block.created_nullifiers().len()); + span.set_attribute("block.commitment", block.header().commitment()); + span.set_attribute("block.accounts.count", block.body().updated_accounts().len()); + span.set_attribute("block.output_notes.count", block.body().output_notes().count()); + span.set_attribute("block.nullifiers.count", block.body().created_nullifiers().len()); // We perform the apply_block work in a separate task. This prevents the caller cancelling // the request and thereby cancelling the task at an arbitrary point of execution. @@ -106,17 +91,10 @@ impl block_producer_server::BlockProducer for StoreApi { } /// Returns data needed by the block producer to construct and prove the next block. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_block_inputs", - skip_all, - err - )] async fn get_block_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_ids = read_account_ids::(&request.account_ids)?; @@ -136,7 +114,7 @@ impl block_producer_server::BlockProducer for StoreApi { reference_blocks, ) .await - .map(proto::block_producer_store::BlockInputs::from) + .map(proto::store::BlockInputs::from) .map(Response::new) .inspect_err(|err| tracing::Span::current().set_error(err)) .map_err(|err| tonic::Status::internal(err.as_report())) @@ -145,17 +123,10 @@ impl block_producer_server::BlockProducer for StoreApi { /// Fetches the inputs for a transaction batch from the database. /// /// See [`State::get_batch_inputs`] for details. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_batch_inputs", - skip_all, - err - )] async fn get_batch_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let note_commitments: Vec = try_convert(request.note_commitments) @@ -177,17 +148,10 @@ impl block_producer_server::BlockProducer for StoreApi { .map_err(|err| tonic::Status::internal(err.as_report())) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.block_producer_server.get_transaction_inputs", - skip_all, - err - )] async fn get_transaction_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_id = read_account_id::(request.account_id)?; @@ -205,17 +169,19 @@ impl block_producer_server::BlockProducer for StoreApi { let block_height = self.state.latest_block_num().await.as_u32(); - Ok(Response::new(proto::block_producer_store::TransactionInputs { - account_state: Some(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord { + Ok(Response::new(proto::store::TransactionInputs { + account_state: Some(proto::store::transaction_inputs::AccountTransactionInputRecord { account_id: Some(account_id.into()), account_commitment: Some(tx_inputs.account_commitment.into()), }), nullifiers: tx_inputs .nullifiers .into_iter() - .map(|nullifier| proto::block_producer_store::transaction_inputs::NullifierTransactionInputRecord { - nullifier: Some(nullifier.nullifier.into()), - block_num: nullifier.block_num.as_u32(), + .map(|nullifier| { + proto::store::transaction_inputs::NullifierTransactionInputRecord { + nullifier: Some(nullifier.nullifier.into()), + block_num: nullifier.block_num.as_u32(), + } }) .collect(), found_unauthenticated_notes: tx_inputs diff --git a/crates/store/src/server/db_maintenance.rs b/crates/store/src/server/db_maintenance.rs deleted file mode 100644 index fce2676772..0000000000 --- a/crates/store/src/server/db_maintenance.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use tracing::{Instrument, Span}; - -use crate::state::State; - -pub struct DbMaintenance { - state: Arc, - optimization_interval: Duration, -} - -impl DbMaintenance { - pub fn new(state: Arc, optimization_interval: Duration) -> Self { - Self { state, optimization_interval } - } - - /// Runs infinite maintenance loop. - pub async fn run(self) { - loop { - tokio::time::sleep(self.optimization_interval).await; - - let root_span = tracing::info_span!( - "optimize_database", - interval = self.optimization_interval.as_secs_f32() - ); - self.state - .optimize_db() - .instrument(root_span) - .await - .unwrap_or_else(|err| Span::current().set_error(&err)); - } - } -} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 197d2f3bd9..b4b5798db9 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -4,15 +4,15 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use miden_node_proto::generated::{block_producer_store, ntx_builder_store, rpc_store}; +use miden_node_proto::generated::store; use miden_node_proto_build::{ store_block_producer_api_descriptor, store_ntx_builder_api_descriptor, store_rpc_api_descriptor, - store_shared_api_descriptor, }; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_protocol::block::BlockSigner; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; @@ -21,13 +21,12 @@ use tracing::{info, instrument}; use crate::blocks::BlockStore; use crate::db::Db; -use crate::server::db_maintenance::DbMaintenance; +use crate::errors::ApplyBlockError; use crate::state::State; -use crate::{COMPONENT, DATABASE_MAINTENANCE_INTERVAL, GenesisState}; +use crate::{COMPONENT, GenesisState}; mod api; mod block_producer; -mod db_maintenance; mod ntx_builder; mod rpc_api; @@ -51,7 +50,10 @@ impl Store { skip_all, err, )] - pub fn bootstrap(genesis: GenesisState, data_directory: &Path) -> anyhow::Result<()> { + pub fn bootstrap( + genesis: GenesisState, + data_directory: &Path, + ) -> anyhow::Result<()> { let genesis = genesis .into_block() .context("failed to convert genesis configuration into the genesis block")?; @@ -87,29 +89,30 @@ impl Store { let ntx_builder_address = self.ntx_builder_listener.local_addr()?; let block_producer_address = self.block_producer_listener.local_addr()?; info!(target: COMPONENT, rpc_endpoint=?rpc_address, ntx_builder_endpoint=?ntx_builder_address, - block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_timeout, "Loading database"); - - let state = - Arc::new(State::load(&self.data_directory).await.context("failed to load state")?); - - let db_maintenance_service = - DbMaintenance::new(Arc::clone(&state), DATABASE_MAINTENANCE_INTERVAL); + block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_timeout, + "Loading database"); + + let (termination_ask, mut termination_signal) = + tokio::sync::mpsc::channel::(1); + let state = Arc::new( + State::load(&self.data_directory, termination_ask) + .await + .context("failed to load state")?, + ); let rpc_service = - rpc_store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); - let ntx_builder_service = - ntx_builder_store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { - state: Arc::clone(&state), - }); + store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { + state: Arc::clone(&state), + }); let block_producer_service = - block_producer_store::block_producer_server::BlockProducerServer::new(api::StoreApi { + store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) .register_file_descriptor_set(store_ntx_builder_api_descriptor()) .register_file_descriptor_set(store_block_producer_api_descriptor()) - .register_file_descriptor_set(store_shared_api_descriptor()) .build_v1() .context("failed to build reflection service")?; @@ -121,7 +124,6 @@ impl Store { .register_file_descriptor_set(store_rpc_api_descriptor()) .register_file_descriptor_set(store_ntx_builder_api_descriptor()) .register_file_descriptor_set(store_block_producer_api_descriptor()) - .register_file_descriptor_set(store_shared_api_descriptor()) .build_v1alpha() .context("failed to build reflection service")?; @@ -129,11 +131,6 @@ impl Store { let mut join_set = JoinSet::new(); - join_set.spawn(async move { - db_maintenance_service.run().await; - Ok(()) - }); - join_set.spawn(async move { // Manual tests on testnet indicate each iteration takes ~2s once things are OS cached. // @@ -182,7 +179,13 @@ impl Store { ); // SAFETY: The joinset is definitely not empty. - join_set.join_next().await.unwrap()?.map_err(Into::into) + let service = async move { join_set.join_next().await.unwrap()?.map_err(Into::into) }; + tokio::select! { + result = service => result, + Some(err) = termination_signal.recv() => { + Err(anyhow::anyhow!("received termination signal").context(err)) + } + } } } @@ -196,7 +199,7 @@ impl DataDirectory { /// Creates a new [`DataDirectory`], ensuring that the directory exists and is accessible /// insofar as is possible. pub fn load(path: PathBuf) -> std::io::Result { - let meta = std::fs::metadata(&path)?; + let meta = fs_err::metadata(&path)?; if meta.is_dir().not() { return Err(std::io::ErrorKind::NotConnected.into()); } diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 1f2dd15958..5f0fd764de 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -1,18 +1,30 @@ +use std::collections::BTreeSet; use std::num::{NonZero, TryFromIntError}; -use miden_node_proto::domain::account::{AccountInfo, NetworkAccountPrefix}; -use miden_node_proto::generated::ntx_builder_store::ntx_builder_server; -use miden_node_proto::generated::{self as proto}; +use miden_crypto::merkle::smt::SmtProof; +use miden_node_proto::domain::account::{AccountInfo, validate_network_account_prefix}; +use miden_node_proto::generated as proto; +use miden_node_proto::generated::rpc::BlockRange; +use miden_node_proto::generated::store::ntx_builder_server; use miden_node_utils::ErrorReport; -use miden_objects::block::BlockNumber; -use miden_objects::note::Note; +use miden_protocol::account::StorageSlotName; +use miden_protocol::asset::AssetVaultKey; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Note; use tonic::{Request, Response, Status}; -use tracing::{debug, instrument}; +use tracing::debug; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::GetNoteScriptByRootError; -use crate::server::api::{StoreApi, internal_error, invalid_argument, read_root}; +use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError, GetWitnessesError}; +use crate::server::api::{ + StoreApi, + internal_error, + invalid_argument, + read_account_id, + read_block_range, + read_root, +}; // NTX BUILDER ENDPOINTS // ================================================================================================ @@ -22,18 +34,10 @@ impl ntx_builder_server::NtxBuilder for StoreApi { /// Returns block header for the specified block number. /// /// If the block number is not provided, block header for the latest block is returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_block_header_by_number", - skip_all, - ret(level = "debug"), - err - )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -42,18 +46,10 @@ impl ntx_builder_server::NtxBuilder for StoreApi { /// /// This returns all the blockchain-related information needed for executing transactions /// without authenticating notes. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_current_blockchain_data", - skip_all, - ret(level = "debug"), - err - )] async fn get_current_blockchain_data( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { let block_num = request.into_inner().block_num.map(BlockNumber::from); let response = match self @@ -62,11 +58,11 @@ impl ntx_builder_server::NtxBuilder for StoreApi { .await .map_err(internal_error)? { - Some((header, peaks)) => proto::ntx_builder_store::CurrentBlockchainData { + Some((header, peaks)) => proto::store::CurrentBlockchainData { current_peaks: peaks.peaks().iter().map(Into::into).collect(), current_block_header: Some(header.into()), }, - None => proto::ntx_builder_store::CurrentBlockchainData { + None => proto::store::CurrentBlockchainData { current_peaks: vec![], current_block_header: None, }, @@ -75,46 +71,33 @@ impl ntx_builder_server::NtxBuilder for StoreApi { Ok(Response::new(response)) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_network_account_details_by_prefix", - skip_all, - ret(level = "debug"), - err - )] async fn get_network_account_details_by_prefix( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); // Validate that the call is for a valid network account prefix - let prefix = NetworkAccountPrefix::try_from(request.account_id_prefix).map_err(|err| { + let prefix = validate_network_account_prefix(request.account_id_prefix).map_err(|err| { Status::invalid_argument( err.as_report_context("request does not contain a valid network account prefix"), ) })?; let account_info: Option = - self.state.get_network_account_details_by_prefix(prefix.inner()).await?; + self.state.get_network_account_details_by_prefix(prefix).await?; - Ok(Response::new(proto::ntx_builder_store::MaybeAccountDetails { + Ok(Response::new(proto::store::MaybeAccountDetails { details: account_info.map(|acc| (&acc).into()), })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_unconsumed_network_notes", - skip_all, - err - )] async fn get_unconsumed_network_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); + let block_num = BlockNumber::from(request.block_num); + let account_id = read_account_id::(request.account_id)?; let state = self.state.clone(); @@ -125,8 +108,10 @@ impl ntx_builder_server::NtxBuilder for StoreApi { let page = Page { token: request.page_token, size }; // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created // instead - let (notes, next_page) = - state.get_unconsumed_network_notes(page).await.map_err(internal_error)?; + let (notes, next_page) = state + .get_unconsumed_network_notes_for_account(account_id, block_num, page) + .await + .map_err(internal_error)?; let mut network_notes = Vec::with_capacity(notes.len()); for note in notes { @@ -137,71 +122,68 @@ impl ntx_builder_server::NtxBuilder for StoreApi { network_notes.push(note.into()); } - Ok(Response::new(proto::ntx_builder_store::UnconsumedNetworkNotes { + Ok(Response::new(proto::store::UnconsumedNetworkNotes { notes: network_notes, next_token: next_page.token, })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_unconsumed_network_notes_for_account", - skip_all, - err - )] - async fn get_unconsumed_network_notes_for_account( + /// Returns network account IDs within the specified block range (based on account creation + /// block). + /// + /// The function may return fewer accounts than exist in the range if the result would exceed + /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is + /// truncated at a block boundary to ensure all accounts from included blocks are returned. + /// + /// The response includes pagination info with the last block number that was fully included. + async fn get_network_account_ids( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); - let block_num = BlockNumber::from(request.block_num); - let network_account_id_prefix = - NetworkAccountPrefix::try_from(request.network_account_id_prefix).map_err(|err| { - invalid_argument(err.as_report_context("invalid network_account_id_prefix")) - })?; - let state = self.state.clone(); + let mut chain_tip = self.state.latest_block_num().await; + let block_range = + read_block_range::(Some(request), "GetNetworkAccountIds")? + .into_inclusive_range::(&chain_tip)?; - let size = - NonZero::try_from(request.page_size as usize).map_err(|err: TryFromIntError| { - invalid_argument(err.as_report_context("invalid page_size")) - })?; - let page = Page { token: request.page_token, size }; - // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created - // instead - let (notes, next_page) = state - .get_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) - .await - .map_err(internal_error)?; + let (account_ids, mut last_block_included) = + self.state.get_all_network_accounts(block_range).await.map_err(internal_error)?; - let mut network_notes = Vec::with_capacity(notes.len()); - for note in notes { - // SAFETY: Network notes are filtered in the database, so they should have details; - // otherwise the state would be corrupted - let (assets, recipient) = note.details.unwrap().into_parts(); - let note = Note::new(assets, note.metadata, recipient); - network_notes.push(note.into()); + let account_ids = Vec::from_iter(account_ids.into_iter().map(Into::into)); + + if last_block_included > chain_tip { + last_block_included = chain_tip; } - Ok(Response::new(proto::ntx_builder_store::UnconsumedNetworkNotes { - notes: network_notes, - next_token: next_page.token, + chain_tip = self.state.latest_block_num().await; + + Ok(Response::new(proto::store::NetworkAccountIdList { + account_ids, + pagination_info: Some(proto::rpc::PaginationInfo { + chain_tip: chain_tip.as_u32(), + block_num: last_block_included.as_u32(), + }), })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err - )] + async fn get_account( + &self, + request: Request, + ) -> Result, Status> { + debug!(target: COMPONENT, ?request); + let request = request.into_inner(); + let account_request = request.try_into()?; + + let proof = self.state.get_account(account_request).await?; + + Ok(Response::new(proof.into())) + } + async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let root = read_root::(request.into_inner().root, "NoteRoot")?; @@ -212,8 +194,104 @@ impl ntx_builder_server::NtxBuilder for StoreApi { .await .map_err(GetNoteScriptByRootError::from)?; - Ok(Response::new(proto::shared::MaybeNoteScript { + Ok(Response::new(proto::rpc::MaybeNoteScript { script: note_script.map(Into::into), })) } + + async fn get_vault_asset_witnesses( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + // Read account ID. + let account_id = + read_account_id::(request.account_id).map_err(invalid_argument)?; + + // Read vault keys. + let vault_keys = request + .vault_keys + .into_iter() + .map(|key_digest| { + let word = read_root::(Some(key_digest), "VaultKey") + .map_err(invalid_argument)?; + Ok(AssetVaultKey::new_unchecked(word)) + }) + .collect::, Status>>()?; + + // Read block number from request, use latest if not provided. + let block_num = if let Some(num) = request.block_num { + num.into() + } else { + self.state.latest_block_num().await + }; + + // Retrieve the asset witnesses. + let asset_witnesses = self + .state + .get_vault_asset_witnesses(account_id, block_num, vault_keys) + .await + .map_err(internal_error)?; + + // Convert AssetWitness to protobuf format by extracting witness data. + let proto_witnesses = asset_witnesses + .into_iter() + .map(|witness| { + let proof: SmtProof = witness.into(); + proto::store::vault_asset_witnesses_response::VaultAssetWitness { + proof: Some(proof.into()), + } + }) + .collect(); + + Ok(Response::new(proto::store::VaultAssetWitnessesResponse { + block_num: block_num.as_u32(), + asset_witnesses: proto_witnesses, + })) + } + + async fn get_storage_map_witness( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + // Read the account ID. + let account_id = + read_account_id::(request.account_id).map_err(invalid_argument)?; + + // Read the map key. + let map_key = + read_root::(request.map_key, "MapKey").map_err(invalid_argument)?; + + // Read the slot name. + let slot_name = StorageSlotName::new(request.slot_name).map_err(|err| { + tonic::Status::invalid_argument(format!("Invalid storage slot name: {err}")) + })?; + + // Read the block number, use latest if not provided. + let block_num = if let Some(num) = request.block_num { + num.into() + } else { + self.state.latest_block_num().await + }; + + // Retrieve the storage map witness. + let storage_witness = self + .state + .get_storage_map_witness(account_id, &slot_name, block_num, map_key) + .await + .map_err(internal_error)?; + + // Convert StorageMapWitness to protobuf format by extracting witness data. + let proof: SmtProof = storage_witness.into(); + Ok(Response::new(proto::store::StorageMapWitnessResponse { + witness: Some(proto::store::storage_map_witness_response::StorageWitness { + key: Some(map_key.into()), + proof: Some(proof.into()), + }), + block_num: self.state.latest_block_num().await.as_u32(), + })) + } } diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index e1c923e27f..845855aa7d 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,12 +1,18 @@ use miden_node_proto::convert; -use miden_node_proto::domain::account::AccountInfo; -use miden_node_proto::generated::rpc_store::rpc_server; +use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::note::NoteId; +use miden_node_utils::limiter::{ + QueryParamAccountIdLimit, + QueryParamLimiter, + QueryParamNoteIdLimit, + QueryParamNoteTagLimit, + QueryParamNullifierLimit, +}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::note::NoteId; use tonic::{Request, Response, Status}; -use tracing::{debug, info, instrument}; +use tracing::{debug, info}; use crate::COMPONENT; use crate::errors::{ @@ -31,14 +37,6 @@ use crate::server::api::{ validate_nullifiers, }; -// CONSTANTS -// ================================================================================================ - -const MAX_ACCOUNT_IDS: usize = 100; -const MAX_NULLIFIERS: usize = 100; -const MAX_NOTE_TAGS: usize = 100; -const MAX_NOTE_IDS: usize = 100; - // CLIENT ENDPOINTS // ================================================================================================ @@ -47,19 +45,10 @@ impl rpc_server::Rpc for StoreApi { /// Returns block header for the specified block number. /// /// If the block number is not provided, block header for the latest block is returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_block_header_by_number", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -67,37 +56,22 @@ impl rpc_server::Rpc for StoreApi { /// /// This endpoint also returns Merkle authentication path for each requested nullifier which can /// be verified against the latest root of the nullifier database. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.check_nullifiers", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn check_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { // Validate the nullifiers and convert them to Word values. Stop on first error. let request = request.into_inner(); // Validate nullifiers count - if request.nullifiers.len() > MAX_NULLIFIERS { - return Err(CheckNullifiersError::TooManyNullifiers( - request.nullifiers.len(), - MAX_NULLIFIERS, - ) - .into()); - } + check::(request.nullifiers.len())?; let nullifiers = validate_nullifiers::(&request.nullifiers)?; // Query the state for the request's nullifiers let proofs = self.state.check_nullifiers(&nullifiers).await; - Ok(Response::new(proto::rpc_store::CheckNullifiersResponse { + Ok(Response::new(proto::rpc::CheckNullifiersResponse { proofs: convert(proofs).collect(), })) } @@ -105,19 +79,10 @@ impl rpc_server::Rpc for StoreApi { /// Returns nullifiers that match the specified prefixes and have been consumed. /// /// Currently the only supported prefix length is 16 bits. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_nullifiers", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); if request.prefix_len != 16 { @@ -137,14 +102,14 @@ impl rpc_server::Rpc for StoreApi { let nullifiers = nullifiers .into_iter() - .map(|nullifier_info| proto::rpc_store::sync_nullifiers_response::NullifierUpdate { + .map(|nullifier_info| proto::rpc::sync_nullifiers_response::NullifierUpdate { nullifier: Some(nullifier_info.nullifier.into()), block_num: nullifier_info.block_num.as_u32(), }) .collect(); - Ok(Response::new(proto::rpc_store::SyncNullifiersResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncNullifiersResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: block_num.as_u32(), }), @@ -154,19 +119,10 @@ impl rpc_server::Rpc for StoreApi { /// Returns info which can be used by the client to sync up to the latest state of the chain /// for the objects the client is interested in. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_state", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_state( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_ids: Vec = read_account_ids::(&request.account_ids)?; @@ -199,7 +155,7 @@ impl rpc_server::Rpc for StoreApi { let notes = state.notes.into_iter().map(Into::into).collect(); - Ok(Response::new(proto::rpc_store::SyncStateResponse { + Ok(Response::new(proto::rpc::SyncStateResponse { chain_tip: self.state.latest_block_num().await.as_u32(), block_header: Some(state.block_header.into()), mmr_delta: Some(delta.into()), @@ -210,19 +166,10 @@ impl rpc_server::Rpc for StoreApi { } /// Returns info which can be used by the client to sync note state. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_notes", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -231,19 +178,15 @@ impl rpc_server::Rpc for StoreApi { .into_inclusive_range::(&chain_tip)?; // Validate note tags count - if request.note_tags.len() > MAX_NOTE_TAGS { - return Err( - NoteSyncError::TooManyNoteTags(request.note_tags.len(), MAX_NOTE_TAGS).into() - ); - } + check::(request.note_tags.len())?; let (state, mmr_proof, last_block_included) = self.state.sync_notes(request.note_tags, block_range).await?; let notes = state.notes.into_iter().map(Into::into).collect(); - Ok(Response::new(proto::rpc_store::SyncNotesResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncNotesResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), @@ -257,15 +200,6 @@ impl rpc_server::Rpc for StoreApi { /// /// If the list is empty or no [`Note`] matched the requested [`NoteId`] and empty list is /// returned. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_notes_by_id", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_notes_by_id( &self, request: Request, @@ -275,13 +209,11 @@ impl rpc_server::Rpc for StoreApi { let note_ids = request.into_inner().ids; // Validate note IDs count - if note_ids.len() > MAX_NOTE_IDS { - return Err(GetNotesByIdError::TooManyNoteIds(note_ids.len(), MAX_NOTE_IDS).into()); - } + check::(note_ids.len())?; let note_ids: Vec = convert_digests_to_words::(note_ids)?; - let note_ids: Vec = note_ids.into_iter().map(From::from).collect(); + let note_ids: Vec = note_ids.into_iter().map(NoteId::from_raw).collect(); let notes = self .state @@ -295,38 +227,6 @@ impl rpc_server::Rpc for StoreApi { Ok(Response::new(proto::note::CommittedNoteList { notes })) } - /// Returns details for public (public) account by id. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_account_details", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] - async fn get_account_details( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - let account_id = read_account_id::(Some(request))?; - let account_info: AccountInfo = self.state.get_account_details(account_id).await?; - - // TODO: revisit this, previous implementation was just returning only the summary, but it - // is weird since the details are not empty. - Ok(Response::new((&account_info).into())) - } - - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_block_by_number", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn get_block_by_number( &self, request: Request, @@ -344,41 +244,23 @@ impl rpc_server::Rpc for StoreApi { Ok(Response::new(proto::blockchain::MaybeBlock { block })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_account_proof", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] - async fn get_account_proof( + async fn get_account( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_proof_request = request.try_into()?; + let account_request = request.try_into()?; - let proof = self.state.get_account_proof(account_proof_request).await?; + let account_data = self.state.get_account(account_request).await?; - Ok(Response::new(proof.into())) + Ok(Response::new(account_data.into())) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_account_vault", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_account_vault( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -404,7 +286,7 @@ impl rpc_server::Rpc for StoreApi { .into_iter() .map(|update| { let vault_key: Word = update.vault_key.into(); - proto::rpc_store::AccountVaultUpdate { + proto::rpc::AccountVaultUpdate { vault_key: Some(vault_key.into()), asset: update.asset.map(Into::into), block_num: update.block_num.as_u32(), @@ -412,8 +294,8 @@ impl rpc_server::Rpc for StoreApi { }) .collect(); - Ok(Response::new(proto::rpc_store::SyncAccountVaultResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncAccountVaultResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_included_block.as_u32(), }), @@ -424,19 +306,10 @@ impl rpc_server::Rpc for StoreApi { /// Returns storage map updates for the specified account within a block range. /// /// Supports cursor-based pagination for large storage maps. - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_storage_maps", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn sync_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_id = read_account_id::(request.account_id)?; @@ -461,16 +334,16 @@ impl rpc_server::Rpc for StoreApi { let updates = storage_maps_page .values .into_iter() - .map(|map_value| proto::rpc_store::StorageMapUpdate { - slot_index: u32::from(map_value.slot_index), + .map(|map_value| proto::rpc::StorageMapUpdate { + slot_name: map_value.slot_name.to_string(), key: Some(map_value.key.into()), value: Some(map_value.value.into()), block_num: map_value.block_num.as_u32(), }) .collect(); - Ok(Response::new(proto::rpc_store::SyncStorageMapsResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncStorageMapsResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: storage_maps_page.last_block_included.as_u32(), }), @@ -478,38 +351,21 @@ impl rpc_server::Rpc for StoreApi { })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.status", - skip_all, - level = "debug", - ret(level = "debug"), - err - )] async fn status( &self, _request: Request<()>, - ) -> Result, Status> { - Ok(Response::new(proto::rpc_store::StoreStatus { + ) -> Result, Status> { + Ok(Response::new(proto::rpc::StoreStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), chain_tip: self.state.latest_block_num().await.as_u32(), })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err - )] async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let root = read_root::(request.into_inner().root, "NoteRoot")?; @@ -520,23 +376,15 @@ impl rpc_server::Rpc for StoreApi { .await .map_err(GetNoteScriptByRootError::from)?; - Ok(Response::new(proto::shared::MaybeNoteScript { + Ok(Response::new(proto::rpc::MaybeNoteScript { script: note_script.map(Into::into), })) } - #[instrument( - parent = None, - target = COMPONENT, - name = "store.rpc_server.sync_transactions", - skip_all, - ret(level = "debug"), - err - )] async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let request = request.into_inner(); @@ -552,13 +400,7 @@ impl rpc_server::Rpc for StoreApi { read_account_ids::(&request.account_ids)?; // Validate account IDs count - if account_ids.len() > MAX_ACCOUNT_IDS { - return Err(SyncTransactionsError::TooManyAccountIds( - account_ids.len(), - MAX_ACCOUNT_IDS, - ) - .into()); - } + check::(account_ids.len())?; let (last_block_included, transaction_records_db) = self .state @@ -566,28 +408,61 @@ impl rpc_server::Rpc for StoreApi { .await .map_err(SyncTransactionsError::from)?; + // Collect all note IDs from all transactions to make a single query + let all_notes_ids = transaction_records_db + .iter() + .flat_map(|tx| tx.output_notes.iter()) + .copied() + .collect::>(); + + // Retrieve all note data in a single query + let all_note_records = self + .state + .get_notes_by_id(all_notes_ids) + .await + .map_err(SyncTransactionsError::from)?; + + // Create a map from note ID to note record for efficient lookup + let note_map: std::collections::HashMap<_, _> = all_note_records + .into_iter() + .map(|note_record| (note_record.note_id, note_record)) + .collect(); + // Convert database TransactionRecord to proto TransactionRecord - let mut transaction_records = Vec::with_capacity(transaction_records_db.len()); + let mut transactions = Vec::with_capacity(transaction_records_db.len()); for tx_header in transaction_records_db { - // Retrieve full note data for output notes from the database - let note_records = self - .state - .get_notes_by_id(tx_header.output_notes.clone()) - .await - .map_err(SyncTransactionsError::from)?; + // Get note records for this transaction's output notes + let note_records: Vec<_> = tx_header + .output_notes + .iter() + .filter_map(|note_id| note_map.get(¬e_id.as_word()).cloned()) + .collect(); // Convert to proto using the helper method let proto_record = tx_header.into_proto_with_note_records(note_records); - transaction_records.push(proto_record); + transactions.push(proto_record); } - Ok(Response::new(proto::rpc_store::SyncTransactionsResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncTransactionsResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), - transaction_records, + transactions, })) } } + +// LIMIT HELPERS +// ================================================================================================ + +/// Formats an "Out of range" error +fn out_of_range_error(err: E) -> Status { + Status::out_of_range(err.to_string()) +} + +/// Check, but don't repeat ourselves mapping the error +fn check(n: usize) -> Result<(), Status> { + ::check(n).map_err(out_of_range_error) +} diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs new file mode 100644 index 0000000000..504ea06313 --- /dev/null +++ b/crates/store/src/state/loader.rs @@ -0,0 +1,298 @@ +//! Tree loading logic for the store state. +//! +//! This module handles loading and initializing the Merkle trees (account tree, nullifier tree, +//! and SMT forest) from storage backends. It supports different loading modes: +//! +//! - **Memory mode** (`rocksdb` feature disabled): Trees are rebuilt from the database on each +//! startup. +//! - **Persistent mode** (`rocksdb` feature enabled): Trees are loaded from persistent storage if +//! data exists, otherwise rebuilt from the database and persisted. + +use std::future::Future; +use std::path::Path; + +use miden_protocol::Word; +use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; +use miden_protocol::block::nullifier_tree::NullifierTree; +use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; +#[cfg(not(feature = "rocksdb"))] +use miden_protocol::crypto::merkle::smt::MemoryStorage; +use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; +#[cfg(feature = "rocksdb")] +use tracing::info; +use tracing::instrument; +#[cfg(feature = "rocksdb")] +use { + miden_crypto::merkle::smt::RocksDbStorage, + miden_protocol::crypto::merkle::smt::RocksDbConfig, +}; + +use crate::COMPONENT; +use crate::db::Db; +use crate::errors::{DatabaseError, StateInitializationError}; +use crate::inner_forest::InnerForest; + +// CONSTANTS +// ================================================================================================ + +/// Directory name for the account tree storage within the data directory. +pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; + +/// Directory name for the nullifier tree storage within the data directory. +pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; + +// STORAGE TYPE ALIAS +// ================================================================================================ + +/// The storage backend for trees. +#[cfg(feature = "rocksdb")] +pub type TreeStorage = RocksDbStorage; +#[cfg(not(feature = "rocksdb"))] +pub type TreeStorage = MemoryStorage; + +// ERROR CONVERSION +// ================================================================================================ + +/// Converts a `LargeSmtError` into a `StateInitializationError`. +pub fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInitializationError { + use miden_node_utils::ErrorReport; + match e { + LargeSmtError::Merkle(merkle_error) => { + StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) + }, + LargeSmtError::Storage(err) => { + StateInitializationError::AccountTreeIoError(err.as_report()) + }, + } +} + +// STORAGE LOADER TRAIT +// ================================================================================================ + +/// Trait for loading trees from storage. +/// +/// For `MemoryStorage`, the tree is rebuilt from database entries on each startup. +/// For `RocksDbStorage`, the tree is loaded directly from disk (much faster for large trees). +/// +/// Missing or corrupted storage is handled by the `verify_tree_consistency` check after loading, +/// which detects divergence between persistent storage and the database. If divergence is detected, +/// the user should manually delete the tree storage directories and restart the node. +pub trait StorageLoader: SmtStorage + Sized { + /// Creates a storage backend for the given domain. + fn create(data_dir: &Path, domain: &'static str) -> Result; + + /// Loads an account tree, either from persistent storage or by rebuilding from DB. + fn load_account_tree( + self, + db: &mut Db, + ) -> impl Future>, StateInitializationError>> + Send; + + /// Loads a nullifier tree, either from persistent storage or by rebuilding from DB. + fn load_nullifier_tree( + self, + db: &mut Db, + ) -> impl Future>, StateInitializationError>> + Send; +} + +// MEMORY STORAGE IMPLEMENTATION +// ================================================================================================ + +#[cfg(not(feature = "rocksdb"))] +impl StorageLoader for MemoryStorage { + fn create(_data_dir: &Path, _domain: &'static str) -> Result { + Ok(MemoryStorage::default()) + } + + async fn load_account_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + let account_data = db.select_all_account_commitments().await?; + let smt_entries = account_data + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + let smt = LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) + } + + async fn load_nullifier_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + let nullifiers = db.select_all_nullifiers().await?; + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + NullifierTree::with_storage_from_entries(self, entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) + } +} + +// ROCKSDB STORAGE IMPLEMENTATION +// ================================================================================================ + +#[cfg(feature = "rocksdb")] +impl StorageLoader for RocksDbStorage { + fn create(data_dir: &Path, domain: &'static str) -> Result { + let storage_path = data_dir.join(domain); + + fs_err::create_dir_all(&storage_path) + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; + RocksDbStorage::open(RocksDbConfig::new(storage_path)) + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) + } + + async fn load_account_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + // If RocksDB storage has data, load from it directly + let has_data = self + .has_leaves() + .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string()))?; + if has_data { + let smt = load_smt(self)?; + return AccountTree::new(smt) + .map_err(StateInitializationError::FailedToCreateAccountsTree); + } + + info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); + let account_data = db.select_all_account_commitments().await?; + let smt_entries = account_data + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + let smt = LargeSmt::with_entries(self, smt_entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) + } + + async fn load_nullifier_tree( + self, + db: &mut Db, + ) -> Result>, StateInitializationError> { + // If RocksDB storage has data, load from it directly + let has_data = self + .has_leaves() + .map_err(|e| StateInitializationError::NullifierTreeIoError(e.to_string()))?; + if has_data { + let smt = load_smt(self)?; + return Ok(NullifierTree::new_unchecked(smt)); + } + + info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); + let nullifiers = db.select_all_nullifiers().await?; + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + NullifierTree::with_storage_from_entries(self, entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) + } +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Loads an SMT from persistent storage. +#[cfg(feature = "rocksdb")] +pub fn load_smt(storage: S) -> Result, StateInitializationError> { + LargeSmt::new(storage).map_err(account_tree_large_smt_error_to_init_error) +} + +// TREE LOADING FUNCTIONS +// ================================================================================================ + +/// Loads the blockchain MMR from all block headers in the database. +#[instrument(target = COMPONENT, skip_all)] +pub async fn load_mmr(db: &mut Db) -> Result { + let block_commitments: Vec = db + .select_all_block_headers() + .await? + .iter() + .map(BlockHeader::commitment) + .collect(); + + // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX + // entries. + let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + + Ok(chain_mmr) +} + +/// Loads SMT forest with storage map and vault Merkle paths for all public accounts. +#[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] +pub async fn load_smt_forest( + db: &mut Db, + block_num: BlockNumber, +) -> Result { + use miden_protocol::account::delta::AccountDelta; + + let public_account_ids = db.select_all_public_account_ids().await?; + + // Acquire write lock once for the entire initialization + let mut forest = InnerForest::new(); + + // Process each account + for account_id in public_account_ids { + // Get the full account from the database + let account_info = db.select_account(account_id).await?; + let account = account_info.details.expect("public accounts always have details in DB"); + + // Convert the full account to a full-state delta + let delta = + AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + + // Use the unified update method (will recognize it's a full-state delta) + forest.update_account(block_num, &delta)?; + } + + Ok(forest) +} + +// CONSISTENCY VERIFICATION +// ================================================================================================ + +/// Verifies that tree roots match the expected roots from the latest block header. +/// +/// This check ensures the database and tree storage (memory or persistent) haven't diverged due to +/// corruption or incomplete shutdown. When trees are rebuilt from the database, they will naturally +/// match; when loaded from persistent storage, this catches any inconsistencies. +/// +/// # Arguments +/// * `account_tree_root` - Root of the loaded account tree +/// * `nullifier_tree_root` - Root of the loaded nullifier tree +/// * `db` - Database connection to fetch the latest block header +/// +/// # Errors +/// Returns `StateInitializationError::TreeStorageDiverged` if any root doesn't match. +#[instrument(target = COMPONENT, skip_all)] +pub async fn verify_tree_consistency( + account_tree_root: Word, + nullifier_tree_root: Word, + db: &mut Db, +) -> Result<(), StateInitializationError> { + // Fetch the latest block header to get the expected roots + let latest_header = db.select_block_header_by_block_num(None).await?; + + let (block_num, expected_account_root, expected_nullifier_root) = latest_header + .map(|header| (header.block_num(), header.account_root(), header.nullifier_root())) + .unwrap_or_default(); + + // Verify account tree root + if account_tree_root != expected_account_root { + return Err(StateInitializationError::TreeStorageDiverged { + tree_name: "Account", + block_num, + tree_root: account_tree_root, + block_root: expected_account_root, + }); + } + + // Verify nullifier tree root + if nullifier_tree_root != expected_nullifier_root { + return Err(StateInitializationError::TreeStorageDiverged { + tree_name: "Nullifier", + block_num, + tree_root: nullifier_tree_root, + block_root: expected_nullifier_root, + }); + } + + Ok(()) +} diff --git a/crates/store/src/state.rs b/crates/store/src/state/mod.rs similarity index 72% rename from crates/store/src/state.rs rename to crates/store/src/state/mod.rs index 2b05092404..b275f400a2 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state/mod.rs @@ -12,48 +12,33 @@ use miden_node_proto::domain::account::{ AccountDetailRequest, AccountDetails, AccountInfo, - AccountProofRequest, - AccountProofResponse, + AccountRequest, + AccountResponse, AccountStorageDetails, AccountStorageMapDetails, AccountVaultDetails, - NetworkAccountPrefix, + SlotData, StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; -use miden_objects::account::{AccountHeader, AccountId, StorageSlot}; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::block::{ - AccountWitness, - BlockHeader, - BlockInputs, - BlockNumber, - Blockchain, - NullifierTree, - NullifierWitness, - ProvenBlock, -}; -use miden_objects::crypto::merkle::{ - Forest, - LargeSmt, - MemoryStorage, - Mmr, - MmrDelta, - MmrPeaks, - MmrProof, - PartialMmr, - SmtProof, - SmtStorage, -}; -use miden_objects::note::{NoteDetails, NoteId, NoteScript, Nullifier}; -use miden_objects::transaction::{OutputNote, PartialBlockchain}; -use miden_objects::utils::Serializable; -use miden_objects::{AccountError, Word}; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness}; +use miden_protocol::block::account_tree::AccountWitness; +use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; +use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; +use miden_protocol::transaction::{OutputNote, PartialBlockchain}; +use miden_protocol::utils::Serializable; use tokio::sync::{Mutex, RwLock, oneshot}; use tracing::{Instrument, info, info_span, instrument}; +use crate::accounts::{AccountTreeWithHistory, HistoricalError}; use crate::blocks::BlockStore; use crate::db::models::Page; use crate::db::models::queries::StorageMapValuesPage; @@ -77,7 +62,18 @@ use crate::errors::{ StateInitializationError, StateSyncError, }; -use crate::{AccountTreeWithHistory, COMPONENT, DataDirectory, InMemoryAccountTree}; +use crate::inner_forest::{InnerForest, WitnessError}; +use crate::{COMPONENT, DataDirectory}; + +mod loader; + +pub use loader::{ + ACCOUNT_TREE_STORAGE_DIR, + NULLIFIER_TREE_STORAGE_DIR, + StorageLoader, + TreeStorage, +}; +use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; // STRUCTURES // ================================================================================================ @@ -91,19 +87,16 @@ pub struct TransactionInputs { } /// Container for state that needs to be updated atomically. -struct InnerState +struct InnerState where S: SmtStorage, { - nullifier_tree: NullifierTree, + nullifier_tree: NullifierTree>, blockchain: Blockchain, - account_tree: AccountTreeWithHistory>>, + account_tree: AccountTreeWithHistory, } -impl InnerState -where - S: SmtStorage, -{ +impl InnerState { /// Returns the latest block number. fn latest_block_num(&self) -> BlockNumber { self.blockchain @@ -112,7 +105,10 @@ where } } -/// The rollup state +// CHAIN STATE +// ================================================================================================ + +/// The rollup state. pub struct State { /// The database which stores block headers, nullifiers, notes, and the latest states of /// accounts. @@ -124,17 +120,29 @@ pub struct State { /// Read-write lock used to prevent writing to a structure while it is being used. /// /// The lock is writer-preferring, meaning the writer won't be starved. - inner: RwLock, + inner: RwLock>, + + /// Forest-related state `(SmtForest, storage_map_roots, vault_roots)` with its own lock. + forest: RwLock, /// To allow readers to access the tree data while an update in being performed, and prevent /// TOCTOU issues, there must be no concurrent writers. This locks to serialize the writers. writer: Mutex<()>, + + /// Request termination of the process due to a fatal internal state error. + termination_ask: tokio::sync::mpsc::Sender, } impl State { - /// Loads the state from the `db`. + // CONSTRUCTOR + // -------------------------------------------------------------------------------------------- + + /// Loads the state from the data directory. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(data_path: &Path) -> Result { + pub async fn load( + data_path: &Path, + termination_ask: tokio::sync::mpsc::Sender, + ) -> Result { let data_directory = DataDirectory::load(data_path.to_path_buf()) .map_err(StateInitializationError::DataDirectoryLoadError)?; @@ -148,33 +156,43 @@ impl State { .await .map_err(StateInitializationError::DatabaseLoadError)?; - let chain_mmr = load_mmr(&mut db).await?; - let block_headers = db.select_all_block_headers().await?; - // TODO: Account tree loading synchronization - // Currently `load_account_tree` loads all account commitments from the DB. This could - // potentially lead to inconsistency if the DB contains account states from blocks beyond - // `latest_block_num`, though in practice the DB writes are transactional and this - // should not occur. - let latest_block_num = block_headers - .last() - .map_or(BlockNumber::GENESIS, miden_objects::block::BlockHeader::block_num); - let account_tree = load_account_tree(&mut db, latest_block_num).await?; - let nullifier_tree = load_nullifier_tree(&mut db).await?; - - let inner = RwLock::new(InnerState { - nullifier_tree, - // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX - // entries. - blockchain: Blockchain::from_mmr_unchecked(chain_mmr), - account_tree, - }); + let blockchain = load_mmr(&mut db).await?; + let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); + + let account_storage = TreeStorage::create(data_path, ACCOUNT_TREE_STORAGE_DIR)?; + let account_tree = account_storage.load_account_tree(&mut db).await?; + + let nullifier_storage = TreeStorage::create(data_path, NULLIFIER_TREE_STORAGE_DIR)?; + let nullifier_tree = nullifier_storage.load_nullifier_tree(&mut db).await?; + + // Verify that tree roots match the expected roots from the database. + // This catches any divergence between persistent storage and the database caused by + // corruption or incomplete shutdown. + verify_tree_consistency(account_tree.root(), nullifier_tree.root(), &mut db).await?; + let account_tree = AccountTreeWithHistory::new(account_tree, latest_block_num); + + let forest = load_smt_forest(&mut db, latest_block_num).await?; + + let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); + + let forest = RwLock::new(forest); let writer = Mutex::new(()); let db = Arc::new(db); - Ok(Self { db, block_store, inner, writer }) + Ok(Self { + db, + block_store, + inner, + forest, + writer, + termination_ask, + }) } + // STATE MUTATOR + // -------------------------------------------------------------------------------------------- + /// Apply changes of a new block to the DB and in-memory data structures. /// /// ## Note on state consistency @@ -206,7 +224,7 @@ impl State { let header = block.header(); - let tx_commitment = block.transactions().commitment(); + let tx_commitment = block.body().transactions().commitment(); if header.tx_commitment() != tx_commitment { return Err(InvalidBlockError::InvalidBlockTxCommitment { @@ -217,7 +235,7 @@ impl State { } let block_num = header.block_num(); - let block_commitment = block.commitment(); + let block_commitment = header.commitment(); // ensures the right block header is being processed let prev_block = self @@ -264,9 +282,10 @@ impl State { // nullifiers can be produced only once let duplicate_nullifiers: Vec<_> = block + .body() .created_nullifiers() .iter() - .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some()) + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) .copied() .collect(); if !duplicate_nullifiers.is_empty() { @@ -285,11 +304,20 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + block + .body() + .created_nullifiers() + .iter() + .map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); } @@ -298,20 +326,24 @@ impl State { .account_tree .compute_mutations( block + .body() .updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) .map_err(|e| match e { - crate::HistoricalError::AccountTreeError(err) => { + HistoricalError::AccountTreeError(err) => { InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) }, - crate::HistoricalError::MerkleError(_) => { + HistoricalError::MerkleError(_) => { panic!("Unexpected MerkleError during account tree mutation computation") }, })?; if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); } @@ -324,12 +356,13 @@ impl State { }; // build note tree - let note_tree = block.build_output_note_tree(); + let note_tree = block.body().compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } let notes = block + .body() .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -349,9 +382,9 @@ impl State { let note_record = NoteRecord { block_num, note_index, - note_id: note.id().into(), + note_id: note.id().as_word(), note_commitment: note.commitment(), - metadata: *note.metadata(), + metadata: note.metadata().clone(), details, inclusion_path, }; @@ -365,6 +398,16 @@ impl State { // Signals the write lock has been acquired, and the transaction can be committed let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); + // Extract public account updates with deltas before block is moved into async task. + // Private accounts are filtered out since they don't expose their state changes. + let account_deltas = + Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { + match update.details() { + AccountUpdateDetails::Delta(delta) => Some(delta.clone()), + AccountUpdateDetails::Private => None, + } + })); + // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the // in-memory write lock. This requires the DB update to run concurrently, so a new task is @@ -425,10 +468,19 @@ impl State { Ok(()) } - .instrument(info_span!("update trees")) - .await + .in_current_span() + .await?; + + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + + info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); + + Ok(()) } + // STATE ACCESSORS + // -------------------------------------------------------------------------------------------- + /// Queries a [BlockHeader] from the database, and returns it alongside its inclusion proof. /// /// If [None] is given as the value of `block_num`, the data for the latest [BlockHeader] is @@ -639,8 +691,8 @@ impl State { /// Loads data to synchronize a client. /// - /// The client's request contains a list of tag prefixes, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filter based on this + /// The client's request contains a list of note tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filtered based on this /// block range. /// /// # Arguments @@ -893,11 +945,8 @@ impl State { let found_unauthenticated_notes = self .db - .select_notes_by_commitment(unauthenticated_note_commitments) - .await? - .into_iter() - .map(|note| note.note_commitment) - .collect(); + .select_existing_note_commitments(unauthenticated_note_commitments) + .await?; Ok(TransactionInputs { account_commitment, @@ -920,109 +969,178 @@ impl State { self.db.select_network_account_by_prefix(id_prefix).await } - /// Returns the respective account proof with optional details, such as asset and storage - /// entries. + /// Returns network account IDs within the specified block range (based on account creation + /// block). /// - /// Note: The `block_num` parameter in the request is currently ignored and will always - /// return the current state. Historical block support will be implemented in a future update. - #[allow(clippy::too_many_lines)] - pub async fn get_account_proof( + /// The function may return fewer accounts than exist in the range if the result would exceed + /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is + /// truncated at a block boundary to ensure all accounts from included blocks are returned. + /// + /// The response includes the last block number that was fully included in the result. + pub async fn get_all_network_accounts( &self, - account_request: AccountProofRequest, - ) -> Result { - let AccountProofRequest { block_num, account_id, details } = account_request; - let _ = block_num.ok_or_else(|| { - DatabaseError::NotImplemented( - "Handling of historical/past block numbers is not implemented yet".to_owned(), - ) - }); + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber), DatabaseError> { + self.db.select_all_network_account_ids(block_range).await + } - // Lock inner state for the whole operation. We need to hold this lock to prevent the - // database, account tree and latest block number from changing during the operation, - // because changing one of them would lead to inconsistent state. - let inner_state = self.inner.read().await; + /// Returns an account witness and optionally account details at a specific block. + /// + /// The witness is a Merkle proof of inclusion in the account tree, proving the account's + /// state commitment. If `details` is requested, the method also returns the account's code, + /// vault assets, and storage data. Account details are only available for public accounts. + /// + /// If `block_num` is provided, returns the state at that historical block; otherwise, returns + /// the latest state. Note that historical states are only available for recent blocks close + /// to the chain tip. + pub async fn get_account( + &self, + account_request: AccountRequest, + ) -> Result { + let AccountRequest { block_num, account_id, details } = account_request; - let block_num = inner_state.account_tree.block_number_latest(); - let witness = inner_state.account_tree.open_latest(account_id); + if details.is_some() && !account_id.has_public_state() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } - let account_details = if let Some(AccountDetailRequest { - code_commitment, - asset_vault_commitment, - storage_requests, - }) = details - { - let account_info = self.db.select_account(account_id).await?; + let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; - // if we get a query for a _private_ account _with_ details requested, we'll error out - let Some(account) = account_info.details else { - return Err(DatabaseError::AccountNotPublic(account_id)); - }; + let details = if let Some(request) = details { + Some(self.fetch_public_account_details(account_id, block_num, request).await?) + } else { + None + }; - let storage_header = account.storage().to_header(); + Ok(AccountResponse { block_num, witness, details }) + } - let mut storage_map_details = - Vec::::with_capacity(storage_requests.len()); + /// Returns an account witness (Merkle proof of inclusion in the account tree). + /// + /// If `block_num` is provided, returns the witness at that historical block; + /// otherwise, returns the witness at the latest block. + async fn get_account_witness( + &self, + block_num: Option, + account_id: AccountId, + ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { + let inner_state = self.inner.read().await; - for StorageMapRequest { slot_index, slot_data } in storage_requests { - let Some(StorageSlot::Map(storage_map)) = - account.storage().slots().get(slot_index as usize) - else { - return Err(AccountError::StorageSlotNotMap(slot_index).into()); - }; - let details = AccountStorageMapDetails::new(slot_index, slot_data, storage_map); - storage_map_details.push(details); - } + // Determine which block to query + let (block_num, witness) = if let Some(requested_block) = block_num { + // Historical query: use the account tree with history + let witness = inner_state + .account_tree + .open_at(account_id, requested_block) + .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { + block_num: requested_block, + reason: "Block is either in the future or has been pruned from history" + .to_string(), + })?; + (requested_block, witness) + } else { + // Latest query: use the latest state + let block_num = inner_state.account_tree.block_number_latest(); + let witness = inner_state.account_tree.open_latest(account_id); + (block_num, witness) + }; - // Only include unknown account code blobs, which is equal to a account code digest - // mismatch. If `None` was requested, don't return any. - let account_code = code_commitment - .is_some_and(|code_commitment| code_commitment != account.code().commitment()) - .then(|| account.code().to_bytes()); + Ok((block_num, witness)) + } - // storage details - let storage_details = AccountStorageDetails { - header: storage_header, - map_details: storage_map_details, - }; + /// Fetches the account details (code, vault, storage) for a public account at the specified + /// block. + /// + /// This method queries the database to fetch the account state and processes the detail + /// request to return only the requested information. + /// + /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. + /// Returns an error if the forest doesn't have data for the requested slot. + /// All-entries queries (`SlotData::All`) use the forest to return all entries. + async fn fetch_public_account_details( + &self, + account_id: AccountId, + block_num: BlockNumber, + detail_request: AccountDetailRequest, + ) -> Result { + let AccountDetailRequest { + code_commitment, + asset_vault_commitment, + storage_requests, + } = detail_request; - // Handle vault details based on the `asset_vault_commitment`. - // Similar to `code_commitment`, if the provided commitment matches, we don't return - // vault data. If no commitment is provided or it doesn't match, we return - // the vault data. If the number of vault contained assets are exceeding a - // limit, we signal this back in the response and the user must handle that - // in follow-up request. - let vault_details = match asset_vault_commitment { - Some(commitment) if commitment == account.vault().root() => { - // The client already has the correct vault data - AccountVaultDetails::empty() - }, - Some(_) => { - // The commitment doesn't match, so return vault data - AccountVaultDetails::new(account.vault()) - }, - None => { - // No commitment provided, so don't return vault data - AccountVaultDetails::empty() - }, - }; + if !account_id.has_public_state() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } - Some(AccountDetails { - account_header: AccountHeader::from(account), - account_code, - vault_details, - storage_details, - }) - } else { - None + // Validate block exists in the blockchain before querying the database + self.validate_block_exists(block_num).await?; + + // Query account header and storage header together in a single DB call + let (account_header, storage_header) = self + .db + .select_account_header_with_storage_header_at_block(account_id, block_num) + .await? + .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; + + let account_code = match code_commitment { + Some(commitment) if commitment == account_header.code_commitment() => None, + Some(_) => { + self.db + .select_account_code_by_commitment(account_header.code_commitment()) + .await? + }, + None => None, }; - let response = AccountProofResponse { - block_num, - witness, - details: account_details, + let vault_details = match asset_vault_commitment { + Some(commitment) if commitment == account_header.vault_root() => { + AccountVaultDetails::empty() + }, + Some(_) => { + let vault_assets = + self.db.select_account_vault_at_block(account_id, block_num).await?; + AccountVaultDetails::from_assets(vault_assets) + }, + None => AccountVaultDetails::empty(), }; - Ok(response) + let mut storage_map_details = + Vec::::with_capacity(storage_requests.len()); + + // Use forest for storage map queries + let forest_guard = self.forest.read().await; + + for StorageMapRequest { slot_name, slot_data } in storage_requests { + let details = match &slot_data { + SlotData::MapKeys(keys) => forest_guard + .open_storage_map(account_id, slot_name.clone(), block_num, keys) + .ok_or_else(|| DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + })? + .map_err(DatabaseError::MerkleError)?, + SlotData::All => forest_guard + .storage_map_entries(account_id, slot_name.clone(), block_num) + .ok_or_else(|| DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + })?, + }; + + storage_map_details.push(details); + } + + Ok(AccountDetails { + account_header, + account_code, + vault_details, + storage_details: AccountStorageDetails { + header: storage_header, + map_details: storage_map_details, + }, + }) } /// Returns storage map values for syncing within a block range. @@ -1050,9 +1168,24 @@ impl State { self.inner.read().await.latest_block_num() } - /// Runs database optimization. - pub async fn optimize_db(&self) -> Result<(), DatabaseError> { - self.db.optimize().await + /// Validates that a block exists in the blockchain + /// + /// # Attention + /// + /// Acquires a *read lock** on `self.inner`. + /// + /// # Errors + /// + /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. + async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(DatabaseError::BlockNotFound(block_num)); + } + + Ok(()) } /// Emits metrics for each database table's size. @@ -1068,26 +1201,15 @@ impl State { ) -> Result<(BlockNumber, Vec), DatabaseError> { self.db.get_account_vault_sync(account_id, block_range).await } - - /// Returns the unprocessed network notes, along with the next pagination token. - pub async fn get_unconsumed_network_notes( - &self, - page: Page, - ) -> Result<(Vec, Page), DatabaseError> { - self.db.select_unconsumed_network_notes(page).await - } - /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( &self, - network_account_id_prefix: NetworkAccountPrefix, + account_id: AccountId, block_num: BlockNumber, page: Page, ) -> Result<(Vec, Page), DatabaseError> { - self.db - .select_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) - .await + self.db.select_unconsumed_network_notes(account_id, block_num, page).await } /// Returns the script for a note by its root. @@ -1107,46 +1229,39 @@ impl State { ) -> Result<(BlockNumber, Vec), DatabaseError> { self.db.select_transactions_records(account_ids, block_range).await } -} - -// UTILITIES -// ================================================================================================ - -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_nullifier_tree(db: &mut Db) -> Result { - let nullifiers = db.select_all_nullifiers().await?; - - NullifierTree::with_entries(nullifiers.into_iter().map(|info| (info.nullifier, info.block_num))) - .map_err(StateInitializationError::FailedToCreateNullifierTree) -} - -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); - Ok(block_commitments.into()) -} - -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_account_tree( - db: &mut Db, - block_number: BlockNumber, -) -> Result, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?.into_iter().collect::>(); - - // Convert account_data to use account_id_to_smt_key - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - - let smt = LargeSmt::with_entries(MemoryStorage::default(), smt_entries) - .expect("Failed to create LargeSmt from database account data"); + /// Returns vault asset witnesses for the specified account and block number. + pub async fn get_vault_asset_witnesses( + &self, + account_id: AccountId, + block_num: BlockNumber, + vault_keys: BTreeSet, + ) -> Result, WitnessError> { + let witnesses = self + .forest + .read() + .await + .get_vault_asset_witnesses(account_id, block_num, vault_keys)?; + Ok(witnesses) + } - let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); - Ok(AccountTreeWithHistory::new(account_tree, block_number)) + /// Returns a storage map witness for the specified account and storage entry at the block + /// number. + /// + /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to + /// get the actual key into the storage map. + pub async fn get_storage_map_witness( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + raw_key: Word, + ) -> Result { + let witness = self + .forest + .read() + .await + .get_storage_map_witness(account_id, slot_name, block_num, raw_key)?; + Ok(witness) + } } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 1c28ce8fb5..e61930937e 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -15,10 +15,8 @@ version.workspace = true workspace = true [features] -# Enables depedencies intended for build script generation of version metadata. -vergen = ["dep:vergen", "dep:vergen-gitcl"] # Enables utility functions for testing traces created by some other crate's stack. -testing = ["dep:tokio", "miden-objects/testing"] +testing = ["miden-protocol/testing"] [dependencies] anyhow = { workspace = true } @@ -27,14 +25,16 @@ figment = { features = ["env", "toml"], version = "0.10" } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } -miden-objects = { workspace = true } +lru = { workspace = true } +miden-protocol = { workspace = true } opentelemetry = { version = "0.31" } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } -tonic = { workspace = true } +tokio = { workspace = true } +tonic = { default-features = true, workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } @@ -42,12 +42,5 @@ tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } -# Optional dependencies enabled by `vergen` feature. -# This must match the version expected by `vergen-gitcl`. -vergen = { "version" = "9.0", optional = true } -vergen-gitcl = { features = ["cargo", "rustc"], optional = true, version = "1.0" } -# Optional dependencies enabled by `testing` feature. -tokio = { optional = true, workspace = true } - [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/src/crypto.rs b/crates/utils/src/crypto.rs index f17b885807..44eac3f87b 100644 --- a/crates/utils/src/crypto.rs +++ b/crates/utils/src/crypto.rs @@ -1,5 +1,5 @@ -use miden_objects::crypto::rand::RpoRandomCoin; -use miden_objects::{Felt, Word}; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::{Felt, Word}; use rand::Rng; /// Creates a new RPO Random Coin with random seed diff --git a/crates/utils/src/fee.rs b/crates/utils/src/fee.rs index d7d167f242..5bde432842 100644 --- a/crates/utils/src/fee.rs +++ b/crates/utils/src/fee.rs @@ -1,9 +1,9 @@ -use miden_objects::asset::FungibleAsset; -use miden_objects::block::FeeParameters; -use miden_objects::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET; +use miden_protocol::asset::FungibleAsset; +use miden_protocol::block::FeeParameters; +use miden_protocol::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET; /// Derive a default, zero valued fee, payable to -/// [`miden_objects::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET`]. +/// [`miden_protocol::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET`]. pub fn test_fee() -> FungibleAsset { let faucet = ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().unwrap(); FungibleAsset::new(faucet, 0).unwrap() diff --git a/crates/utils/src/formatting.rs b/crates/utils/src/formatting.rs index 1c132b6d58..5845e09b18 100644 --- a/crates/utils/src/formatting.rs +++ b/crates/utils/src/formatting.rs @@ -1,7 +1,7 @@ use std::fmt::Display; use itertools::Itertools; -use miden_objects::transaction::{InputNoteCommitment, InputNotes, OutputNotes}; +use miden_protocol::transaction::{InputNoteCommitment, InputNotes, OutputNotes}; pub fn format_opt(opt: Option<&T>) -> String { opt.map_or("None".to_owned(), ToString::to_string) diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 4ff02f9397..530e971e49 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -7,9 +7,9 @@ pub mod formatting; pub mod grpc; pub mod limiter; pub mod logging; +pub mod lru_cache; pub mod panic; pub mod tracing; -pub mod version; pub trait ErrorReport: std::error::Error { /// Returns a string representation of the error and its source chain. diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index d024998417..1adf5be411 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -1,12 +1,17 @@ -//! Limit the size of a parameter list for a specific parameter +//! Limits for RPC and store parameters and payload sizes. //! -//! Used for: -//! 1. the external facing RPC -//! 2. limiting SQL statements not exceeding parameter limits +//! # Rationale +//! - Parameter limits are kept across all multi-value RPC parameters. This caps worst-case SQL `IN` +//! clauses and keeps responses comfortably under the 4 MiB payload budget enforced in the store. +//! - Limits are enforced both at the RPC boundary and inside the store to prevent bypasses and to +//! avoid expensive queries even if validation is skipped earlier in the stack. +//! - `MAX_PAGINATED_PAYLOAD_BYTES` is set to 4 MiB (e.g. 1000 nullifier rows at ~36 B each, 1000 +//! transactions summaries streamed in chunks). //! -//! The 1st is good to terminate invalid requests as early as possible, -//! where the second is both a fallback and a safeguard not benching -//! pointless parameter combinations. +//! Add new limits here so callers share the same values and rationale. + +/// Basic request limit. +pub const GENERAL_REQUEST_LIMIT: usize = 1000; #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] @@ -37,58 +42,92 @@ pub trait QueryParamLimiter { } } +/// Maximum payload size (in bytes) for paginated responses returned by the +/// store. +pub const MAX_RESPONSE_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; + /// Used for the following RPC endpoints /// * `state_sync` +/// +/// Capped at 1000 account IDs to keep SQL `IN` clauses bounded and response payloads under the +/// 4 MB budget. pub struct QueryParamAccountIdLimit; impl QueryParamLimiter for QueryParamAccountIdLimit { const PARAM_NAME: &str = "account_id"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// * `select_nullifiers_by_prefix` +/// +/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload +/// budget and to avoid unbounded prefix scans. pub struct QueryParamNullifierPrefixLimit; impl QueryParamLimiter for QueryParamNullifierPrefixLimit { const PARAM_NAME: &str = "nullifier_prefix"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// * `select_nullifiers_by_prefix` /// * `sync_nullifiers` /// * `sync_state` +/// +/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. pub struct QueryParamNullifierLimit; impl QueryParamLimiter for QueryParamNullifierLimit { const PARAM_NAME: &str = "nullifier"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// * `get_note_sync` +/// +/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. pub struct QueryParamNoteTagLimit; impl QueryParamLimiter for QueryParamNoteTagLimit { const PARAM_NAME: &str = "note_tag"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// `select_notes_by_id` +/// +/// The limit is set to 100 notes to keep responses within the 4 MiB payload cap because individual +/// notes are bounded to roughly 32 KiB. pub struct QueryParamNoteIdLimit; impl QueryParamLimiter for QueryParamNoteIdLimit { const PARAM_NAME: &str = "note_id"; - const LIMIT: usize = 1000; + const LIMIT: usize = 100; } /// Used for internal queries retrieving note inclusion proofs by commitment. +/// +/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB +/// payload cap. pub struct QueryParamNoteCommitmentLimit; impl QueryParamLimiter for QueryParamNoteCommitmentLimit { const PARAM_NAME: &str = "note_commitment"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Only used internally, not exposed via public RPC. +/// +/// Capped at 1000 block headers to bound internal batch operations and keep payloads below the +/// 4 MB limit. pub struct QueryParamBlockLimit; impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; +} + +/// Used for the following RPC endpoints +/// * `get_account` +/// +/// Capped at 64 total storage map keys across all slots to limit the number of SMT proofs +/// returned. +pub struct QueryParamStorageMapKeyTotalLimit; +impl QueryParamLimiter for QueryParamStorageMapKeyTotalLimit { + const PARAM_NAME: &str = "storage_map_key"; + const LIMIT: usize = 64; } diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs new file mode 100644 index 0000000000..7e67515296 --- /dev/null +++ b/crates/utils/src/lru_cache.rs @@ -0,0 +1,32 @@ +use std::hash::Hash; +use std::num::NonZeroUsize; +use std::sync::Arc; + +use lru::LruCache as InnerCache; +use tokio::sync::Mutex; + +/// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across +/// await points. +#[derive(Clone)] +pub struct LruCache(Arc>>); + +impl LruCache +where + K: Hash + Eq, + V: Clone, +{ + /// Creates a new cache with the given capacity. + pub fn new(capacity: NonZeroUsize) -> Self { + Self(Arc::new(Mutex::new(InnerCache::new(capacity)))) + } + + /// Retrieves a value from the cache. + pub async fn get(&self, key: &K) -> Option { + self.0.lock().await.get(key).cloned() + } + + /// Puts a value into the cache. + pub async fn put(&self, key: K, value: V) { + self.0.lock().await.put(key, value); + } +} diff --git a/crates/utils/src/tracing/span_ext.rs b/crates/utils/src/tracing/span_ext.rs index c887346b21..07ac008fe3 100644 --- a/crates/utils/src/tracing/span_ext.rs +++ b/crates/utils/src/tracing/span_ext.rs @@ -1,11 +1,11 @@ use core::time::Duration; use std::net::IpAddr; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::batch::BatchId; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::batch::BatchId; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::TransactionId; use opentelemetry::trace::Status; use opentelemetry::{Key, StringValue, Value}; diff --git a/crates/utils/src/version/mod.rs b/crates/utils/src/version/mod.rs deleted file mode 100644 index 03ff66249f..0000000000 --- a/crates/utils/src/version/mod.rs +++ /dev/null @@ -1,184 +0,0 @@ -#[cfg(feature = "vergen")] -pub use vergen::vergen; - -/// Contains build metadata which can be formatted into a pretty --version -/// output using its Display implementation. -/// -/// The build metadata can be embedded at compile time using the `vergen` function -/// available from the `vergen` feature. See that functions description for a list -/// of the environment variables emitted which map nicely to [`LongVersion`]. -/// -/// Unfortunately these values must be transferred manually by the end user since the -/// env variables are only available once the caller's build script has run - which is -/// after this crate is compiled. -pub struct LongVersion { - pub version: &'static str, - pub sha: &'static str, - pub branch: &'static str, - pub dirty: &'static str, - pub features: &'static str, - pub rust_version: &'static str, - pub host: &'static str, - pub target: &'static str, - pub opt_level: &'static str, - pub debug: &'static str, -} - -impl std::fmt::Display for LongVersion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let &Self { - version, - mut sha, - mut branch, - dirty, - features, - rust_version, - host, - target, - opt_level, - debug, - } = self; - - let dirty = match dirty { - "true" => "-dirty", - _ => "", - }; - - // This is the default value set by `vergen` when these values are missing. - // The git values can be missing for a published crate, and while we do attempt - // to set default values in the build.rs, its still possible for these to be skipped - // e.g. when cargo publish --allow-dirty is used. - if branch == "VERGEN_IDEMPOTENT_OUTPUT" { - branch = ""; - } - if sha == "VERGEN_IDEMPOTENT_OUTPUT" { - sha = ""; - } - - f.write_fmt(format_args!( - "{version} - -SHA: {sha}{dirty} -branch: {branch} -features: {features} -rust version: {rust_version} -target arch: {target} -host arch: {host} -opt-level: {opt_level} -debug: {debug} -" - )) - } -} - -#[cfg(feature = "vergen")] -mod vergen { - use std::path::PathBuf; - - use anyhow::{Context, Result}; - - /// Emits environment variables for build metadata intended for extended version information. - /// - /// The following environment variables are emitted: - /// - /// - `VERGEN_GIT_BRANCH` - /// - `VERGEN_GIT_SHA` - /// - `VERGEN_GIT_DIRTY` - /// - `VERGEN_RUSTC_SEMVER` - /// - `VERGEN_RUSTC_HOST_TRIPLE` - /// - `VERGEN_CARGO_TARGET_TRIPLE` - /// - `VERGEN_CARGO_FEATURES` - /// - `VERGEN_CARGO_OPT_LEVEL` - /// - `VERGEN_CARGO_DEBUG` - pub fn vergen() -> Result<()> { - if let Some(sha) = published_git_sha().context("Checking for published vcs info")? { - // git data is not available if in a published state, so we set them manually. - println!("cargo::rustc-env=VERGEN_GIT_SHA={sha}"); - println!("cargo::rustc-env=VERGEN_GIT_BRANCH=NA (published)"); - println!("cargo::rustc-env=VERGEN_GIT_DIRTY="); - - vergen_gitcl::Emitter::new() - } else { - // In a non-published state so we can expect git instructions to work. - let mut emitter = vergen_gitcl::Emitter::new(); - emitter - .add_instructions(&git_instructions()?) - .context("Adding git instructions")?; - - emitter - } - .add_instructions(&cargo_instructions()?) - .context("Adding cargo instructions")? - .add_instructions(&rustc_instructions()?) - .context("Adding rustc instructions")? - .emit() - } - - /// Normal git info is lost on `cargo publish`, which instead adds a file containing the SHA1 - /// hash. - /// - /// This function returns the short SHA value. If present, this indicates this we're in a - /// published state. - fn published_git_sha() -> Result> { - let cargo_vcs_info = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".cargo_vcs_info.json"); - if cargo_vcs_info.exists() { - // The file is small so reading to string is acceptable. - let contents = std::fs::read_to_string(cargo_vcs_info).context("Reading vcs info")?; - - // File format: - // { - // "git": { - // "sha1": "9d48046e9654d93a86212e77d6c92f14c95de44b" - // }, - // "path_in_vcs": "bin/node" - // } - let offset = contents.find(r#""sha1""#).context("Searching for sha1 property")? - + r#""sha1""#.len(); - - let sha1 = contents[offset + 1..] - .chars() - // Find and skip opening quote. - .skip_while(|&c| c != '"') - .skip(1) - // Take until closing quote. - .take_while(|&c| c != '"') - // Short SHA format is 7 digits. - .take(7) - .collect(); - - Ok(Some(sha1)) - } else { - Ok(None) - } - } - - fn git_instructions() -> Result { - const INCLUDE_UNTRACKED: bool = true; - const SHORT_SHA: bool = true; - - vergen_gitcl::GitclBuilder::default() - .branch(true) - .dirty(INCLUDE_UNTRACKED) - .sha(SHORT_SHA) - .build() - .context("Building git instructions") - } - - fn cargo_instructions() -> Result { - vergen_gitcl::CargoBuilder::default() - .debug(true) - .features(true) - .target_triple(true) - .opt_level(true) - .build() - .context("Building git instructions") - } - - fn rustc_instructions() -> Result { - vergen_gitcl::RustcBuilder::default() - .semver(true) - .host_triple(true) - .build() - .context("Building rustc instructions") - } -} diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index ebb6145b77..6115e7cff3 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -21,9 +21,12 @@ anyhow = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-protocol = { workspace = true } +miden-tx = { workspace = true } +thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["transport"], workspace = true } +tonic = { default-features = true, features = ["transport"], workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs new file mode 100644 index 0000000000..416b2beb92 --- /dev/null +++ b/crates/validator/src/block_validation/mod.rs @@ -0,0 +1,51 @@ +use std::sync::Arc; + +use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::errors::ProposedBlockError; +use miden_protocol::transaction::TransactionId; + +use crate::server::ValidatedTransactions; + +// BLOCK VALIDATION ERROR +// ================================================================================================ + +#[derive(thiserror::Error, Debug)] +pub enum BlockValidationError { + #[error("transaction {0} in block {1} has not been validated")] + TransactionNotValidated(TransactionId, BlockNumber), + #[error("failed to build block")] + BlockBuildingFailed(#[from] ProposedBlockError), +} + +// BLOCK VALIDATION +// ================================================================================================ + +/// Validates a block by checking that all transactions in the proposed block have been processed by +/// the validator in the past. +/// +/// Removes the validated transactions from the cache upon success. +pub async fn validate_block( + proposed_block: ProposedBlock, + signer: &S, + validated_transactions: Arc, +) -> Result { + // Check that all transactions in the proposed block have been validated + for tx_header in proposed_block.transactions() { + let tx_id = tx_header.id(); + if validated_transactions.get(&tx_id).await.is_none() { + return Err(BlockValidationError::TransactionNotValidated( + tx_id, + proposed_block.block_num(), + )); + } + } + + // Build the block header. + let (header, _) = proposed_block.into_header_and_body()?; + + // Sign the header. + let signature = signer.sign(&header); + + Ok(signature) +} diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index d467b33fb5..a45112d275 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,4 +1,6 @@ +mod block_validation; mod server; +mod tx_validation; pub use server::Validator; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 2ce3212906..bab8b5d628 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -1,18 +1,39 @@ use std::net::SocketAddr; +use std::num::NonZeroUsize; +use std::sync::Arc; use std::time::Duration; use anyhow::Context; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; +use miden_node_utils::ErrorReport; +use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_protocol::block::{BlockSigner, ProposedBlock}; +use miden_protocol::transaction::{ + ProvenTransaction, + TransactionHeader, + TransactionId, + TransactionInputs, +}; +use miden_tx::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; +use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; use crate::COMPONENT; +use crate::block_validation::validate_block; +use crate::tx_validation::validate_transaction; + +/// Number of transactions to keep in the validated transactions cache. +const NUM_VALIDATED_TRANSACTIONS: NonZeroUsize = NonZeroUsize::new(10000).unwrap(); + +/// A type alias for a LRU cache that stores validated transactions. +pub type ValidatedTransactions = LruCache; // VALIDATOR // ================================================================================ @@ -20,16 +41,19 @@ use crate::COMPONENT; /// The handle into running the gRPC validator server. /// /// Facilitates the running of the gRPC server which implements the validator API. -pub struct Validator { +pub struct Validator { /// The address of the validator component. pub address: SocketAddr, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. pub grpc_timeout: Duration, + + /// The signer used to sign blocks. + pub signer: S, } -impl Validator { +impl Validator { /// Serves the validator RPC API. /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is @@ -60,7 +84,7 @@ impl Validator { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .timeout(self.grpc_timeout) - .add_service(api_server::ApiServer::new(ValidatorServer {})) + .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer))) .add_service(reflection_service) .add_service(reflection_service_alpha) .serve_with_incoming(TcpListenerStream::new(listener)) @@ -75,10 +99,21 @@ impl Validator { /// The underlying implementation of the gRPC validator server. /// /// Implements the gRPC API for the validator. -struct ValidatorServer {} +struct ValidatorServer { + signer: S, + validated_transactions: Arc, +} + +impl ValidatorServer { + fn new(signer: S) -> Self { + let validated_transactions = + Arc::new(ValidatedTransactions::new(NUM_VALIDATED_TRANSACTIONS)); + Self { signer, validated_transactions } + } +} #[tonic::async_trait] -impl api_server::Api for ValidatorServer { +impl api_server::Api for ValidatorServer { /// Returns the status of the validator. async fn status( &self, @@ -93,8 +128,61 @@ impl api_server::Api for ValidatorServer { /// Receives a proven transaction, then validates and stores it. async fn submit_proven_transaction( &self, - _request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { - todo!() + let request = request.into_inner(); + // Deserialize the transaction. + let proven_tx = + ProvenTransaction::read_from_bytes(&request.transaction).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid proven transaction")) + })?; + + // Deserialize the transaction inputs. + let Some(tx_inputs) = request.transaction_inputs else { + return Err(Status::invalid_argument("Missing transaction inputs")); + }; + let tx_inputs = TransactionInputs::read_from_bytes(&tx_inputs).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) + })?; + + // Validate the transaction. + let validated_tx_header = + validate_transaction(proven_tx, tx_inputs).await.map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction")) + })?; + + // Register the validated transaction. + let tx_id = validated_tx_header.id(); + self.validated_transactions.put(tx_id, validated_tx_header).await; + + Ok(tonic::Response::new(())) + } + + /// Validates a proposed block and returns the block header and body. + async fn sign_block( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let proposed_block_bytes = request.into_inner().proposed_block; + + // Deserialize the proposed block. + let proposed_block = + ProposedBlock::read_from_bytes(&proposed_block_bytes).map_err(|err| { + tonic::Status::invalid_argument(format!( + "Failed to deserialize proposed block: {err}", + )) + })?; + + // Validate the block. + let signature = + validate_block(proposed_block, &self.signer, self.validated_transactions.clone()) + .await + .map_err(|err| { + tonic::Status::invalid_argument(format!("Failed to validate block: {err}",)) + })?; + + // Send the signature. + let response = proto::blockchain::BlockSignature { signature: signature.to_bytes() }; + Ok(tonic::Response::new(response)) } } diff --git a/crates/validator/src/tx_validation/data_store.rs b/crates/validator/src/tx_validation/data_store.rs new file mode 100644 index 0000000000..ebd382e44a --- /dev/null +++ b/crates/validator/src/tx_validation/data_store.rs @@ -0,0 +1,107 @@ +/// NOTE: This module contains logic that will eventually be moved to the Validator component +/// when it is added to this repository. +use std::collections::BTreeSet; + +use miden_protocol::Word; +use miden_protocol::account::{AccountId, PartialAccount, StorageMapWitness}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::{AccountInputs, PartialBlockchain, TransactionInputs}; +use miden_protocol::vm::FutureMaybeSend; +use miden_tx::{DataStore, DataStoreError, MastForestStore, TransactionMastStore}; + +// TRANSACTION INPUTS DATA STORE +// ================================================================================================ + +/// A [`DataStore`] implementation that wraps [`TransactionInputs`] +pub struct TransactionInputsDataStore { + tx_inputs: TransactionInputs, + mast_store: TransactionMastStore, +} + +impl TransactionInputsDataStore { + pub fn new(tx_inputs: TransactionInputs) -> Self { + let mast_store = TransactionMastStore::new(); + mast_store.load_account_code(tx_inputs.account().code()); + for code in tx_inputs.foreign_account_code() { + mast_store.load_account_code(code); + } + Self { tx_inputs, mast_store } + } +} + +impl DataStore for TransactionInputsDataStore { + fn get_transaction_inputs( + &self, + account_id: AccountId, + _ref_blocks: BTreeSet, + ) -> impl FutureMaybeSend> + { + async move { + if self.tx_inputs.account().id() != account_id { + return Err(DataStoreError::AccountNotFound(account_id)); + } + + Ok(( + self.tx_inputs.account().clone(), + self.tx_inputs.block_header().clone(), + self.tx_inputs.blockchain().clone(), + )) + } + } + + fn get_foreign_account_inputs( + &self, + foreign_account_id: AccountId, + _ref_block: BlockNumber, + ) -> impl FutureMaybeSend> { + async move { + self.tx_inputs.read_foreign_account_inputs(foreign_account_id).map_err(|err| { + DataStoreError::other_with_source("failed to read foreign account inputs", err) + }) + } + } + + fn get_vault_asset_witnesses( + &self, + _account_id: AccountId, + vault_root: Word, + vault_keys: BTreeSet, + ) -> impl FutureMaybeSend, DataStoreError>> { + async move { + // Retrieve native and foreign account asset witnesses from the advice inputs. + self.tx_inputs + .read_vault_asset_witnesses(vault_root, vault_keys) + .map_err(|err| { + DataStoreError::other_with_source("failed to read vault asset witnesses", err) + }) + } + } + + fn get_storage_map_witness( + &self, + _account_id: AccountId, + _map_root: Word, + _map_key: Word, + ) -> impl FutureMaybeSend> { + async move { + unimplemented!( + "get_storage_map_witness is not used during re-execution of transactions" + ) + } + } + + fn get_note_script( + &self, + _script_root: Word, + ) -> impl FutureMaybeSend, DataStoreError>> { + async move { unimplemented!("get_note_script is not used during re-execution of transactions") } + } +} + +impl MastForestStore for TransactionInputsDataStore { + fn get(&self, procedure_hash: &Word) -> Option> { + self.mast_store.get(procedure_hash) + } +} diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs new file mode 100644 index 0000000000..95419c3927 --- /dev/null +++ b/crates/validator/src/tx_validation/mod.rs @@ -0,0 +1,62 @@ +mod data_store; + +pub use data_store::TransactionInputsDataStore; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; +use miden_tx::auth::UnreachableAuth; +use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; + +// TRANSACTION VALIDATION ERROR +// ================================================================================================ + +#[derive(thiserror::Error, Debug)] +pub enum TransactionValidationError { + #[error("failed to re-executed the transaction")] + ExecutionError(#[from] TransactionExecutorError), + #[error("re-executed transaction did not match the provided proven transaction")] + Mismatch { + proven_tx_header: Box, + executed_tx_header: Box, + }, + #[error("transaction proof verification failed")] + ProofVerificationFailed(#[from] miden_tx::TransactionVerifierError), +} + +// TRANSACTION VALIDATION +// ================================================================================================ + +/// Validates a transaction by verifying its proof, executing it and comparing its header with the +/// provided proven transaction. +/// +/// Returns the header of the executed transaction if successful. +pub async fn validate_transaction( + proven_tx: ProvenTransaction, + tx_inputs: TransactionInputs, +) -> Result { + // First, verify the transaction proof + let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); + tx_verifier.verify(&proven_tx)?; + + // Create a DataStore from the transaction inputs. + let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); + + // Execute the transaction. + let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); + let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = + TransactionExecutor::new(&data_store); + let executed_tx = executor + .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) + .await?; + + // Validate that the executed transaction matches the submitted transaction. + let executed_tx_header: TransactionHeader = (&executed_tx).into(); + let proven_tx_header: TransactionHeader = (&proven_tx).into(); + if executed_tx_header == proven_tx_header { + Ok(executed_tx_header) + } else { + Err(TransactionValidationError::Mismatch { + proven_tx_header: proven_tx_header.into(), + executed_tx_header: executed_tx_header.into(), + }) + } +} diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index 606f5cf446..47706de3b3 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -12,10 +12,10 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [CheckNullifiers](#checknullifiers) -- [GetAccountDetails](#getaccountdetails) -- [GetAccountProofs](#getaccountproofs) +- [GetAccount](#getaccount) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) +- [GetLimits](#getlimits) - [GetNotesById](#getnotesbyid) - [GetNoteScriptByRoot](#getnotescriptbyroot) - [SubmitProvenTransaction](#submitproventransaction) @@ -33,15 +33,79 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] ### CheckNullifiers -Request proofs for a set of nullifiers. +Request Sparse Merkle Tree opening proofs to verify whether nullifiers have been consumed. -### GetAccountDetails +#### Request -Request the latest state of an account. +```protobuf +message NullifierList { + repeated Digest nullifiers = 1; // List of nullifiers to check +} +``` + +#### Response + +```protobuf +message CheckNullifiersResponse { + repeated SmtOpening proofs = 1; // One proof per requested nullifier +} + +message SmtOpening { + SparseMerklePath path = 1; // Merkle authentication path + SmtLeaf leaf = 2; // Leaf at this position +} + +message SmtLeaf { + oneof leaf { + uint64 empty_leaf_index = 1; + SmtLeafEntry single = 2; + SmtLeafEntryList multiple = 3; + } +} +``` + +#### Understanding Proofs + +**Non-Inclusion (Nullifier NOT consumed):** +- `leaf` contains `empty_leaf_index` +- Note can still be consumed + +**Inclusion (Nullifier IS consumed):** +- `leaf` contains `single` or `multiple` with key-value pairs, including the `nullifier` key +- Note has been spent + +#### Verification + +```rust +use miden_crypto::merkle::{SmtProof, SmtProofError}; + +let block_header = get_latest_block_header(); +let nullifier_tree_root = block_header.state_commitment().nullifier_root(); + +let proof: SmtProof = smt_opening.try_into()?; + +match proof.verify_unset(&nullifier, &nullifier_tree_root) { + Ok(()) => { + // Nullifier is NOT in the tree - note can be consumed + } + Err(SmtProofError::ValueMismatch { .. }) => { + // Proof is valid, but nullifier has a value (not empty) - note already consumed + } + Err(_) => { + // Proof is invalid (wrong root, wrong key, etc.) + } +} +``` + +**Limits:** `nullifier` (1000) -### GetAccountProofs +### GetAccount -Request state proofs for accounts, including specific storage slots. +Request an account witness (Merkle proof of inclusion in the account tree) and optionally account details. + +The witness proves the account's state commitment in the account tree. If details are requested, the response also includes the account's header, code, vault assets, and storage data. Account details are only available for public accounts. + +If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. ### GetBlockByNumber @@ -51,10 +115,32 @@ Request the raw data for a specific block. Request a specific block header and its inclusion proof. +### GetLimits + +Returns the query parameter limits configured for RPC endpoints. + +This endpoint allows clients to discover the maximum number of items that can be requested in a single call for various endpoints. The response contains a map of endpoint names to their parameter limits. + +**Example response structure:** + +```json +{ + "endpoints": { + "CheckNullifiers": { "parameters": { "nullifier": 1000 } }, + "SyncNullifiers": { "parameters": { "nullifier": 1000 } }, + "SyncState": { "parameters": { "account_id": 1000, "note_tag": 1000 } }, + "SyncNotes": { "parameters": { "note_tag": 1000 } }, + "GetNotesById": { "parameters": { "note_id": 100 } } + } +} +``` + ### GetNotesById Request a set of notes. +**Limits:** `note_id` (100) + ### GetNoteScriptByRoot Request the script for a note by its root. @@ -88,6 +174,8 @@ Caller specifies the `prefix_len` (currently only 16), the list of prefix values If the response is chunked (i.e., `block_num < block_to`), continue by issuing another request with `block_from = block_num + 1` to retrieve subsequent updates. +**Limits:** `nullifier` (1000) + ### SyncAccountVault Returns information that allows clients to sync asset values for specific public accounts within a block range. @@ -104,6 +192,8 @@ The response includes each note's metadata and inclusion proof. A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the tip of the chain. +**Limits:** `note_tag` (1000) + ### SyncState Iteratively sync data for specific notes and accounts. @@ -114,6 +204,8 @@ Each update response also contains info about new notes, accounts etc. created. The low part of note tags are redacted to preserve some degree of privacy. Returned data therefore contains additional notes which should be filtered out by the client. +**Limits:** `account_id` (1000), `note_tag` (1000) + ### SyncStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/docs/internal/src/rpc.md b/docs/internal/src/rpc.md index dcc9c379a0..c477b940d7 100644 --- a/docs/internal/src/rpc.md +++ b/docs/internal/src/rpc.md @@ -19,6 +19,14 @@ If there is a mismatch in version, clients will encounter an error while executi The server will reject any version that does not have the same major and minor version to it. This behaviour will change after v1.0.0., at which point only the major version will be taken into account. +## Query limits (`GetLimits`) + +The RPC service exposes a `GetLimits` endpoint which returns the query parameter limits enforced by the server for +multi-value parameters (e.g. number of nullifiers, note tags, note IDs, account IDs). + +These limits are defined centrally in `miden_node_utils::limiter` and are enforced at the RPC boundary (and also inside +the store) to keep database queries bounded and to keep response payloads within the ~4 MB budget. + ## Error Handling The RPC component uses domain-specific error enums for structured error reporting instead of proto-generated error types. This provides better control over error codes and makes error handling more maintainable. diff --git a/docs/internal/src/store.md b/docs/internal/src/store.md index 5f6f5b0361..1929b7c491 100644 --- a/docs/internal/src/store.md +++ b/docs/internal/src/store.md @@ -18,5 +18,3 @@ startup its likely that you created the database _before_ making schema changes The store consists mainly of a gRPC server which answers requests from the RPC and block-producer components, as well as new block submissions from the block-producer. - -A lightweight background process performs database query optimisation by analysing database queries and statistics. diff --git a/proto/build.rs b/proto/build.rs index 87eb57e554..3d4047e24d 100644 --- a/proto/build.rs +++ b/proto/build.rs @@ -6,19 +6,19 @@ use miette::{Context, IntoDiagnostic}; use protox::prost::Message; const RPC_PROTO: &str = "rpc.proto"; -const STORE_RPC_PROTO: &str = "store/rpc.proto"; -const STORE_NTX_BUILDER_PROTO: &str = "store/ntx_builder.proto"; -const STORE_BLOCK_PRODUCER_PROTO: &str = "store/block_producer.proto"; -const STORE_SHARED_PROTO: &str = "store/shared.proto"; -const BLOCK_PRODUCER_PROTO: &str = "block_producer.proto"; +// Unified internal store API (store.Rpc, store.BlockProducer, store.NtxBuilder). +// We compile the same file three times to preserve existing descriptor names. +const STORE_RPC_PROTO: &str = "internal/store.proto"; +const STORE_NTX_BUILDER_PROTO: &str = "internal/store.proto"; +const STORE_BLOCK_PRODUCER_PROTO: &str = "internal/store.proto"; +const BLOCK_PRODUCER_PROTO: &str = "internal/block_producer.proto"; const REMOTE_PROVER_PROTO: &str = "remote_prover.proto"; -const VALIDATOR_PROTO: &str = "validator.proto"; +const VALIDATOR_PROTO: &str = "internal/validator.proto"; const RPC_DESCRIPTOR: &str = "rpc_file_descriptor.bin"; const STORE_RPC_DESCRIPTOR: &str = "store_rpc_file_descriptor.bin"; const STORE_NTX_BUILDER_DESCRIPTOR: &str = "store_ntx_builder_file_descriptor.bin"; const STORE_BLOCK_PRODUCER_DESCRIPTOR: &str = "store_block_producer_file_descriptor.bin"; -const STORE_SHARED_DESCRIPTOR: &str = "store_shared_file_descriptor.bin"; const BLOCK_PRODUCER_DESCRIPTOR: &str = "block_producer_file_descriptor.bin"; const REMOTE_PROVER_DESCRIPTOR: &str = "remote_prover_file_descriptor.bin"; const VALIDATOR_DESCRIPTOR: &str = "validator_file_descriptor.bin"; @@ -69,12 +69,6 @@ fn main() -> miette::Result<()> { .into_diagnostic() .wrap_err("writing store block producer file descriptor")?; - let store_shared_file_descriptor = protox::compile([STORE_SHARED_PROTO], includes)?; - let store_shared_path = PathBuf::from(&out).join(STORE_SHARED_DESCRIPTOR); - fs::write(&store_shared_path, store_shared_file_descriptor.encode_to_vec()) - .into_diagnostic() - .wrap_err("writing store shared file descriptor")?; - let block_producer_file_descriptor = protox::compile([BLOCK_PRODUCER_PROTO], includes)?; let block_producer_path = PathBuf::from(&out).join(BLOCK_PRODUCER_DESCRIPTOR); fs::write(&block_producer_path, block_producer_file_descriptor.encode_to_vec()) diff --git a/proto/proto/README.md b/proto/proto/README.md new file mode 100644 index 0000000000..5a3a9e321f --- /dev/null +++ b/proto/proto/README.md @@ -0,0 +1,19 @@ +# Proto Files Organization + +The files are organized by a visibility hierarchy, where the root directory contains the public-facing RPC and remote prover protocols, while the `types` directory contains the data types used by these protocols. The `internal` directory contains the internal protocols used by the node, such as the store, non-transactional data, and block producer protocols. + +The organization of the files is as follows: + +``` +rpc.proto +remote_prover.proto +types/ +├── primitives.proto +└── xxx.proto +internal/ +├── store.proto +├── ntx.proto +└── block_producer.proto +``` + +The public-facing files should only allow the usage of the `types` directory, to avoid service reflection to internal protocols. diff --git a/proto/proto/block_producer.proto b/proto/proto/internal/block_producer.proto similarity index 75% rename from proto/proto/block_producer.proto rename to proto/proto/internal/block_producer.proto index dae8293feb..acd97151a9 100644 --- a/proto/proto/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package block_producer; +import "rpc.proto"; import "types/note.proto"; import "types/blockchain.proto"; import "types/primitives.proto"; @@ -13,10 +14,10 @@ import "google/protobuf/empty.proto"; service Api { // Returns the status info. - rpc Status(google.protobuf.Empty) returns (BlockProducerStatus) {} + rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} - // Submits proven transaction to the Miden network - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (SubmitProvenTransactionResponse) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} // Submits a proven batch to the Miden network. // @@ -28,7 +29,9 @@ service Api { // // All transactions in the batch but not in the mempool must build on the current mempool // state following normal transaction submission rules. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (SubmitProvenBatchResponse) {} + // + // Returns the node's current block height. + rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} // Subscribe to mempool events. // @@ -43,35 +46,6 @@ service Api { rpc MempoolSubscription(MempoolSubscriptionRequest) returns (stream MempoolEvent) {} } -// STATUS -// ================================================================================================ - -// Represents the status of the block producer. -message BlockProducerStatus { - // The block producer's running version. - string version = 1; - - // The block producer's status. - string status = 2; -} - -// SUBMIT PROVEN TRANSACTION -// ================================================================================================ - -// Represents the result of submitting proven transaction. -message SubmitProvenTransactionResponse { - // The node's current block height. - fixed32 block_height = 1; -} - -// SUBMIT PROVEN TRANSACTION -// ================================================================================================ - -message SubmitProvenBatchResponse { - // The node's current block height. - fixed32 block_height = 1; -} - // MEMPOOL SUBSCRIPTION // ================================================================================================ @@ -106,7 +80,7 @@ message MempoolEvent { // Changes to a network account, if any. This includes creation of new network accounts. // // The account delta is encoded using [winter_utils::Serializable] implementation - // for [miden_objects::account::delta::AccountDelta]. + // for [miden_protocol::account::delta::AccountDelta]. optional bytes network_account_delta = 4; } diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto new file mode 100644 index 0000000000..c68e7b30a5 --- /dev/null +++ b/proto/proto/internal/store.proto @@ -0,0 +1,436 @@ +// Unified specification of the internal store gRPC APIs. +syntax = "proto3"; +package store; + +import "google/protobuf/empty.proto"; +import "types/account.proto"; +import "types/blockchain.proto"; +import "types/transaction.proto"; +import "types/note.proto"; +import "types/primitives.proto"; +import "rpc.proto"; + +// RPC STORE API +// ================================================================================================ + +// Store API for the RPC component +service Rpc { + // Returns the status info. + rpc Status(google.protobuf.Empty) returns (rpc.StoreStatus) {} + + // Returns a Sparse Merkle Tree opening proof for each requested nullifier + // + // Each proof demonstrates either: + // - **Inclusion**: Nullifier exists in the tree (note was consumed) + // - **Non-inclusion**: Nullifier does not exist (note was not consumed) + // + // The `leaf` field indicates the status: + // - `empty_leaf_index`: Non-inclusion proof + // - `single` or `multiple`: Inclusion proof if the nullifier key is present + // + // Verify proofs against the nullifier tree root in the latest block header. + rpc CheckNullifiers(rpc.NullifierList) returns (rpc.CheckNullifiersResponse) {} + + // Returns the latest details the specified account. + rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} + + // Returns raw block data for the specified block number. + rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns a list of committed notes matching the provided note IDs. + rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + // + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(rpc.SyncNullifiersRequest) returns (rpc.SyncNullifiersResponse) {} + + // Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + // + // requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + // matching notes for. The request will then return the next block containing any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + // tip of the chain. + rpc SyncNotes(rpc.SyncNotesRequest) returns (rpc.SyncNotesResponse) {} + + // Returns info which can be used by the requester to sync up to the latest state of the chain + // for the objects (accounts, notes, nullifiers) the requester is interested in. + // + // This request returns the next block containing requested data. It also returns `chain_tip` + // which is the latest block number in the chain. requester is expected to repeat these requests + // in a loop until `response.block_header.block_num == response.chain_tip`, at which point + // the requester is fully synchronized with the chain. + // + // Each request also returns info about new notes, nullifiers etc. created. It also returns + // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + // MMR peaks and chain MMR nodes. + // + // For preserving some degree of privacy, note tags and nullifiers filters contain only high + // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + // additional filtering of that data on its side. + rpc SyncState(rpc.SyncStateRequest) returns (rpc.SyncStateResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} + + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncStorageMaps(rpc.SyncStorageMapsRequest) returns (rpc.SyncStorageMapsResponse) {} + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(rpc.SyncTransactionsRequest) returns (rpc.SyncTransactionsResponse) {} +} + +// BLOCK PRODUCER STORE API +// ================================================================================================ + +// Store API for the BlockProducer component +service BlockProducer { + // Applies changes of a new block to the DB and in-memory data structures. + rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns data required to prove the next block. + rpc GetBlockInputs(BlockInputsRequest) returns (BlockInputs) {} + + // Returns the inputs for a transaction batch. + rpc GetBatchInputs(BatchInputsRequest) returns (BatchInputs) {} + + // Returns data required to validate a new transaction. + rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} +} + +// GET BLOCK INPUTS +// ================================================================================================ + +// Returns data required to prove the next block. +message BlockInputsRequest { + // IDs of all accounts updated in the proposed block for which to retrieve account witnesses. + repeated account.AccountId account_ids = 1; + + // Nullifiers of all notes consumed by the block for which to retrieve witnesses. + // + // Due to note erasure it will generally not be possible to know the exact set of nullifiers + // a block will create, unless we pre-execute note erasure. So in practice, this set of + // nullifiers will be the set of nullifiers of all proven batches in the block, which is a + // superset of the nullifiers the block may create. + // + // However, if it is known that a certain note will be erased, it would not be necessary to + // provide a nullifier witness for it. + repeated primitives.Digest nullifiers = 2; + + // Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. + repeated primitives.Digest unauthenticated_notes = 3; + + // Array of block numbers referenced by all batches in the block. + repeated fixed32 reference_blocks = 4; +} + +// Represents the result of getting block inputs. +message BlockInputs { + // A nullifier returned as a response to the `GetBlockInputs`. + message NullifierWitness { + // The nullifier. + primitives.Digest nullifier = 1; + + // The SMT proof to verify the nullifier's inclusion in the nullifier tree. + primitives.SmtOpening opening = 2; + } + // The latest block header. + blockchain.BlockHeader latest_block_header = 1; + + // Proof of each requested unauthenticated note's inclusion in a block, **if it existed in + // the store**. + repeated note.NoteInclusionInBlockProof unauthenticated_note_proofs = 2; + + // The serialized chain MMR which includes proofs for all blocks referenced by the + // above note inclusion proofs as well as proofs for inclusion of the requested blocks + // referenced by the batches in the block. + bytes partial_block_chain = 3; + + // The state commitments of the requested accounts and their authentication paths. + repeated account.AccountWitness account_witnesses = 4; + + // The requested nullifiers and their authentication paths. + repeated NullifierWitness nullifier_witnesses = 5; +} + +// GET BATCH INPUTS +// ================================================================================================ + +// Returns the inputs for a transaction batch. +message BatchInputsRequest { + // List of unauthenticated note commitments to be queried from the database. + repeated primitives.Digest note_commitments = 1; + // Set of block numbers referenced by transactions. + repeated fixed32 reference_blocks = 2; +} + +// Represents the result of getting batch inputs. +message BatchInputs { + // The block header that the transaction batch should reference. + blockchain.BlockHeader batch_reference_block_header = 1; + + // Proof of each _found_ unauthenticated note's inclusion in a block. + repeated note.NoteInclusionInBlockProof note_proofs = 2; + + // The serialized chain MMR which includes proofs for all blocks referenced by the + // above note inclusion proofs as well as proofs for inclusion of the blocks referenced + // by the transactions in the batch. + bytes partial_block_chain = 3; +} + +// GET TRANSACTION INPUTS +// ================================================================================================ + +// Returns data required to validate a new transaction. +message TransactionInputsRequest { + // ID of the account against which a transaction is executed. + account.AccountId account_id = 1; + // Set of nullifiers consumed by this transaction. + repeated primitives.Digest nullifiers = 2; + // Set of unauthenticated note commitments to check for existence on-chain. + // + // These are notes which were not on-chain at the state the transaction was proven, + // but could by now be present. + repeated primitives.Digest unauthenticated_notes = 3; +} + +// Represents the result of getting transaction inputs. +message TransactionInputs { + // An account returned as a response to the `GetTransactionInputs`. + message AccountTransactionInputRecord { + // The account ID. + account.AccountId account_id = 1; + + // The latest account commitment, zero commitment if the account doesn't exist. + primitives.Digest account_commitment = 2; + } + + // A nullifier returned as a response to the `GetTransactionInputs`. + message NullifierTransactionInputRecord { + // The nullifier ID. + primitives.Digest nullifier = 1; + + // The block at which the nullifier has been consumed, zero if not consumed. + fixed32 block_num = 2; + } + + // Account state proof. + AccountTransactionInputRecord account_state = 1; + + // List of nullifiers that have been consumed. + repeated NullifierTransactionInputRecord nullifiers = 2; + + // List of unauthenticated notes that were not found in the database. + repeated primitives.Digest found_unauthenticated_notes = 3; + + // The node's current block height. + fixed32 block_height = 4; + + // Whether the account ID prefix is unique. Only relevant for account creation requests. + optional bool new_account_id_prefix_is_unique = 5; // TODO: Replace this with an error. When a general error message exists. +} + +// NTX BUILDER STORE API +// ================================================================================================ + +// Store API for the network transaction builder component +service NtxBuilder { + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns a paginated list of unconsumed network notes. + rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} + + // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + // header for executing network transactions. If the block number is not provided, the latest + // header and peaks will be retrieved. + rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} + + // Returns the latest state of a network account with the specified account prefix. + rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} + + // Returns a list of all network account ids. + rpc GetNetworkAccountIds(rpc.BlockRange) returns (NetworkAccountIdList) {} + + // Returns the latest details of the specified account. + rpc GetAccount(rpc.AccountRequest) returns (rpc.AccountResponse) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + + // Returns vault asset witnesses for the specified account. + rpc GetVaultAssetWitnesses(VaultAssetWitnessesRequest) returns (VaultAssetWitnessesResponse) {} + + // Returns a storage map witness for the specified account and storage map entry. + rpc GetStorageMapWitness(StorageMapWitnessRequest) returns (StorageMapWitnessResponse) {} +} + +// GET NETWORK ACCOUNT DETAILS BY PREFIX +// ================================================================================================ + +// Account ID prefix. +message AccountIdPrefix { + // Account ID prefix. + fixed32 account_id_prefix = 1; +} + +// Represents the result of getting network account details by prefix. +message MaybeAccountDetails { + // Account details. + optional account.AccountDetails details = 1; +} + +// GET UNCONSUMED NETWORK NOTES +// ================================================================================================ + +// Returns a paginated list of unconsumed network notes for an account. +// +// Notes created or consumed after the specified block are excluded from the result. +message UnconsumedNetworkNotesRequest { + // This should be null on the first call, and set to the response token until the response token + // is null, at which point all data has been fetched. + // + // Note that this token is only valid if used with the same parameters. + optional uint64 page_token = 1; + + // Number of notes to retrieve per page. + uint64 page_size = 2; + + // The full account ID to filter notes by. + account.AccountId account_id = 3; + + // The block number to filter the returned notes by. + // + // Notes that are created or consumed after this block are excluded from the result. + fixed32 block_num = 4; +} + +// Represents the result of getting the unconsumed network notes. +message UnconsumedNetworkNotes { + // An opaque pagination token. + // + // Use this in your next request to get the next + // set of data. + // + // Will be null once there is no more data remaining. + optional uint64 next_token = 1; + + // The list of unconsumed network notes. + repeated note.NetworkNote notes = 2; +} + +// GET NETWORK ACCOUNTS +// ================================================================================================ + +// Represents the result of getting the network account ids. +message NetworkAccountIdList { + // Pagination information. + rpc.PaginationInfo pagination_info = 1; + + // The list of network account ids. + repeated account.AccountId account_ids = 2; +} + +// GET CURRENT BLOCKCHAIN DATA +// ================================================================================================ + +// Current blockchain data based on the requested block number. +message CurrentBlockchainData { + // Commitments that represent the current state according to the MMR. + repeated primitives.Digest current_peaks = 1; + // Current block header. + optional blockchain.BlockHeader current_block_header = 2; +} + +// GET VAULT ASSET WITNESSES +// ================================================================================================ + +// Request for vault asset witnesses for a specific account. +message VaultAssetWitnessesRequest { + // The account ID for which to retrieve vault asset witnesses. + account.AccountId account_id = 1; + + // Set of asset vault keys to retrieve witnesses for. + repeated primitives.Digest vault_keys = 2; + + // The witnesses returned correspond to the account state at the specified block number. + // + // Optional block number. If not provided, uses the latest state. + // + // The specified block number should be relatively near the chain tip else an error will be + // returned. + optional fixed32 block_num = 3; +} + +// Response containing vault asset witnesses. +message VaultAssetWitnessesResponse { + // A vault asset witness containing the asset and its proof. + message VaultAssetWitness { + // The SMT opening proof for the asset's inclusion in the vault. + primitives.SmtOpening proof = 1; + } + + // Block number at which the witnesses were generated. + // + // The witnesses returned corresponds to the account state at the specified block number. + fixed32 block_num = 1; + + // List of asset witnesses. + repeated VaultAssetWitness asset_witnesses = 2; +} + +// GET STORAGE MAP WITNESS +// ================================================================================================ + +// Request for a storage map witness for a specific account and storage slot. +message StorageMapWitnessRequest { + // The account ID for which to retrieve the storage map witness. + account.AccountId account_id = 1; + + // The raw, user-provided storage map key for which to retrieve the witness. + primitives.Digest map_key = 2; + + // Optional block number. If not provided, uses the latest state. + // + // The witness returned corresponds to the account state at the specified block number. + // + // The specified block number should be relatively near the chain tip else an error will be + // returned. + optional fixed32 block_num = 3; + + // The storage slot name for the map. + string slot_name = 4; +} + +// Response containing a storage map witness. +message StorageMapWitnessResponse { + // Storage map witness data. + message StorageWitness { + // The raw, user-provided storage map key. + primitives.Digest key = 1; + + // The SMT opening proof for the key-value pair. + primitives.SmtOpening proof = 3; + } + + // The storage map witness. + StorageWitness witness = 1; + + // Block number at which the witness was generated. + fixed32 block_num = 2; +} diff --git a/proto/proto/validator.proto b/proto/proto/internal/validator.proto similarity index 81% rename from proto/proto/validator.proto rename to proto/proto/internal/validator.proto index 6d4e801b45..e3bb02a61c 100644 --- a/proto/proto/validator.proto +++ b/proto/proto/internal/validator.proto @@ -4,6 +4,7 @@ package validator; import "types/transaction.proto"; import "types/blockchain.proto"; +import "types/primitives.proto"; import "google/protobuf/empty.proto"; // VALIDATOR API @@ -16,9 +17,12 @@ service Api { // Submits a transaction to the validator. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (google.protobuf.Empty) {} + + // Validates a proposed block and returns the block header and body. + rpc SignBlock(blockchain.ProposedBlock) returns (blockchain.BlockSignature) {} } -// STATUS +// VALIDATOR STATUS // ================================================================================================ // Represents the status of the validator. diff --git a/proto/proto/remote_prover.proto b/proto/proto/remote_prover.proto index 49132fd6f1..28a0ad485a 100644 --- a/proto/proto/remote_prover.proto +++ b/proto/proto/remote_prover.proto @@ -33,7 +33,7 @@ message ProofRequest { // type-specific: // - TRANSACTION: TransactionInputs encoded. // - BATCH: ProposedBatch encoded. - // - BLOCK: ProposedBlock encoded. + // - BLOCK: BlockProofRequest encoded. bytes payload = 2; } @@ -42,7 +42,7 @@ message Proof { // Serialized proof bytes. // - TRANSACTION: Returns an encoded ProvenTransaction. // - BATCH: Returns an encoded ProvenBatch. - // - BLOCK: Returns an encoded ProvenBlock. + // - BLOCK: Returns an encoded BlockProof. bytes payload = 1; } @@ -56,8 +56,8 @@ service ProxyStatusApi { // Status of an individual worker in the proxy. message ProxyWorkerStatus { - // The address of the worker. - string address = 1; + // The name of the worker. + string name = 1; // The version of the worker. string version = 2; // The health status of the worker. diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index dd3f1d6d6c..b0f1046f59 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -7,9 +7,6 @@ import "types/blockchain.proto"; import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "block_producer.proto"; -import "store/rpc.proto"; -import "store/shared.proto"; import "google/protobuf/empty.proto"; // RPC API @@ -20,30 +17,37 @@ service Api { // Returns the status info of the node. rpc Status(google.protobuf.Empty) returns (RpcStatus) {} - // Returns a nullifier proof for each of the requested nullifiers. - rpc CheckNullifiers(rpc_store.NullifierList) returns (rpc_store.CheckNullifiersResponse) {} - - // Returns the latest state of an account with the specified ID. - rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} + // Returns a Sparse Merkle Tree opening proof for each requested nullifier + // + // Each proof demonstrates either: + // - **Inclusion**: Nullifier exists in the tree (note was consumed) + // - **Non-inclusion**: Nullifier does not exist (note was not consumed) + // + // The `leaf` field indicates the status: + // * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + // * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + // + // Verify proofs against the nullifier tree root in the latest block header. + rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - // Returns the latest state proof of the specified account. - rpc GetAccountProof(rpc_store.AccountProofRequest) returns (rpc_store.AccountProofResponse) {} + // Returns the latest details of the specified account. + rpc GetAccount(AccountRequest) returns (AccountResponse) {} // Returns raw block data for the specified block number. rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} + rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} // Returns a list of notes matching the provided note IDs. rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} - // Submits proven transaction to the Miden network. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (block_producer.SubmitProvenTransactionResponse) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} // Submits a proven batch of transactions to the Miden network. // @@ -55,15 +59,17 @@ service Api { // // All transactions in the batch but not in the mempool must build on the current mempool // state following normal transaction submission rules. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (block_producer.SubmitProvenBatchResponse) {} + // + // Returns the node's current block height. + rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(rpc_store.SyncNullifiersRequest) returns (rpc_store.SyncNullifiersResponse) {} + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(rpc_store.SyncAccountVaultRequest) returns (rpc_store.SyncAccountVaultResponse) {} + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. // @@ -74,7 +80,7 @@ service Api { // // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the // tip of the chain. - rpc SyncNotes(rpc_store.SyncNotesRequest) returns (rpc_store.SyncNotesResponse) {} + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} // Returns info which can be used by the client to sync up to the latest state of the chain // for the objects (accounts and notes) the client is interested in. @@ -91,13 +97,20 @@ service Api { // For preserving some degree of privacy, note tags contain only high // part of hashes. Thus, returned data contains excessive notes, client can make // additional filtering of that data on its side. - rpc SyncState(rpc_store.SyncStateRequest) returns (rpc_store.SyncStateResponse) {} + rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(rpc_store.SyncStorageMapsRequest) returns (rpc_store.SyncStorageMapsResponse) {} + rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(rpc_store.SyncTransactionsRequest) returns (rpc_store.SyncTransactionsResponse) {} + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} + + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} } // RPC STATUS @@ -112,8 +125,540 @@ message RpcStatus { primitives.Digest genesis_commitment = 2; // The store status. - rpc_store.StoreStatus store = 3; + StoreStatus store = 3; // The block producer status. - block_producer.BlockProducerStatus block_producer = 4; + BlockProducerStatus block_producer = 4; +} + + +// BLOCK PRODUCER STATUS +// ================================================================================================ + + +// Represents the status of the block producer. +message BlockProducerStatus { + // The block producer's running version. + string version = 1; + + // The block producer's status. + string status = 2; + + // The block producer's current view of the chain tip height. + // + // This is the height of the latest block that the block producer considers + // to be part of the canonical chain. + fixed32 chain_tip = 4; + + // Statistics about the mempool. + MempoolStats mempool_stats = 3; +} + +// Statistics about the mempool. +message MempoolStats { + // Number of transactions currently in the mempool waiting to be batched. + uint64 unbatched_transactions = 1; + + // Number of batches currently being proven. + uint64 proposed_batches = 2; + + // Number of proven batches waiting for block inclusion. + uint64 proven_batches = 3; +} + +// STORE STATUS +// ================================================================================================ + +// Represents the status of the store. +message StoreStatus { + // The store's running version. + string version = 1; + + // The store's status. + string status = 2; + + // Number of the latest block in the chain. + fixed32 chain_tip = 3; +} + +// GET BLOCK HEADER BY NUMBER +// ================================================================================================ + +// Returns the block header corresponding to the requested block number, as well as the merkle +// path and current forest which validate the block's inclusion in the chain. +// +// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. +message BlockHeaderByNumberRequest { + // The target block height, defaults to latest if not provided. + optional uint32 block_num = 1; + // Whether or not to return authentication data for the block header. + optional bool include_mmr_proof = 2; +} + +// Represents the result of getting a block header by block number. +message BlockHeaderByNumberResponse { + // The requested block header. + blockchain.BlockHeader block_header = 1; + + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + optional primitives.MerklePath mmr_path = 2; + + // Current chain length. + optional fixed32 chain_length = 3; +} + +// GET NOTE SCRIPT BY ROOT +// ================================================================================================ + +// Represents a note script or nothing. +message MaybeNoteScript { + // The script for a note by its root. + optional note.NoteScript script = 1; +} + +// GET ACCOUNT PROOF +// ================================================================================================ + +// Defines the request for account details. +message AccountRequest { + // Request the details for a public account. + message AccountDetailRequest { + // Represents a storage slot index and the associated map keys. + message StorageMapDetailRequest { + // Indirection required for use in `oneof {..}` block. + message MapKeys { + // A list of map keys associated with this storage slot. + repeated primitives.Digest map_keys = 1; + } + // Storage slot name. + string slot_name = 1; + + oneof slot_data { + // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + // the response will not contain them but must be requested separately. + bool all_entries = 2; + + // A list of map keys associated with the given storage slot identified by `slot_name`. + MapKeys map_keys = 3; + } + } + + // Last known code commitment to the requester. The response will include account code + // only if its commitment is different from this value. + // + // If the field is ommiteed, the response will not include the account code. + optional primitives.Digest code_commitment = 1; + + // Last known asset vault commitment to the requester. The response will include asset vault data + // only if its commitment is different from this value. If the value is not present in the + // request, the response will not contain one either. + // If the number of to-be-returned asset entries exceed a threshold, they have to be requested + // separately, which is signaled in the response message with dedicated flag. + optional primitives.Digest asset_vault_commitment = 2; + + // Additional request per storage map. + repeated StorageMapDetailRequest storage_maps = 3; + } + + // ID of the account for which we want to get data + account.AccountId account_id = 1; + + // Optional block height at which to return the proof. + // + // Defaults to current chain tip if unspecified. + optional blockchain.BlockNumber block_num = 2; + + // Request for additional account details; valid only for public accounts. + optional AccountDetailRequest details = 3; +} + +// Represents the result of getting account proof. +message AccountResponse { + + message AccountDetails { + // Account header. + account.AccountHeader header = 1; + + // Account storage data + AccountStorageDetails storage_details = 2; + + // Account code; empty if code commitments matched or none was requested. + optional bytes code = 3; + + // Account asset vault data; empty if vault commitments matched or the requester + // omitted it in the request. + optional AccountVaultDetails vault_details = 4; + } + + // The block number at which the account witness was created and the account details were observed. + blockchain.BlockNumber block_num = 1; + + // Account ID, current state commitment, and SMT path. + account.AccountWitness witness = 2; + + // Additional details for public accounts. + optional AccountDetails details = 3; +} + +// Account vault details for AccountResponse +message AccountVaultDetails { + // A flag that is set to true if the account contains too many assets. This indicates + // to the user that `SyncAccountVault` endpoint should be used to retrieve the + // account's assets + bool too_many_assets = 1; + + // When too_many_assets == false, this will contain the list of assets in the + // account's vault + repeated primitives.Asset assets = 2; +} + +// Account storage details for AccountResponse +message AccountStorageDetails { + message AccountStorageMapDetails { + // Wrapper for repeated storage map entries including their proofs. + // Used when specific keys are requested to enable client-side verification. + message MapEntriesWithProofs { + // Definition of individual storage entries including a proof. + message StorageMapEntryWithProof { + primitives.Digest key = 1; + primitives.Digest value = 2; + primitives.SmtOpening proof = 3; + } + + repeated StorageMapEntryWithProof entries = 1; + } + + // Wrapper for repeated storage map entries (without proofs). + // Used when all entries are requested for small maps. + message AllMapEntries { + // Definition of individual storage entries. + message StorageMapEntry { + primitives.Digest key = 1; + primitives.Digest value = 2; + } + + repeated StorageMapEntry entries = 1; + } + + // Storage slot name. + string slot_name = 1; + + // True when the number of entries exceeds the response limit. + // When set, clients should use the `SyncStorageMaps` endpoint. + bool too_many_entries = 2; + + // The map entries (with or without proofs). Empty when too_many_entries is true. + oneof entries { + // All storage entries without proofs (for small maps or full requests). + AllMapEntries all_entries = 3; + + // Specific entries with their SMT proofs (for partial requests). + MapEntriesWithProofs entries_with_proofs = 4; + } + } + + // Account storage header (storage slot info for up to 256 slots) + account.AccountStorageHeader header = 1; + + // Additional data for the requested storage maps + repeated AccountStorageMapDetails map_details = 2; +} + +// CHECK NULLIFIERS +// ================================================================================================ + +// List of nullifiers to return proofs for. +message NullifierList { + // List of nullifiers to return proofs for. + repeated primitives.Digest nullifiers = 1; +} + +// Represents the result of checking nullifiers. +message CheckNullifiersResponse { + // Each requested nullifier has its corresponding nullifier proof at the same position. + repeated primitives.SmtOpening proofs = 1; +} + +// SYNC NULLIFIERS +// ================================================================================================ + +// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. +message SyncNullifiersRequest { + // Block number from which the nullifiers are requested (inclusive). + BlockRange block_range = 1; + + // Number of bits used for nullifier prefix. Currently the only supported value is 16. + uint32 prefix_len = 2; + + // List of nullifiers to check. Each nullifier is specified by its prefix with length equal + // to `prefix_len`. + repeated uint32 nullifiers = 3; +} + +// Represents the result of syncing nullifiers. +message SyncNullifiersResponse { + // Represents a single nullifier update. + message NullifierUpdate { + // Nullifier ID. + primitives.Digest nullifier = 1; + + // Block number. + fixed32 block_num = 2; + } + + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of nullifiers matching the prefixes specified in the request. + repeated NullifierUpdate nullifiers = 2; +} + +// SYNC ACCOUNT VAULT +// ================================================================================================ + +// Account vault synchronization request. +// +// Allows requesters to sync asset values for specific public accounts within a block range. +message SyncAccountVaultRequest { + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; + + // Account for which we want to sync asset vault. + account.AccountId account_id = 2; +} + +message SyncAccountVaultResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of asset updates for the account. + // + // Multiple updates can be returned for a single asset, and the one with a higher `block_num` + // is expected to be retained by the caller. + repeated AccountVaultUpdate updates = 2; +} + +message AccountVaultUpdate { + // Vault key associated with the asset. + primitives.Digest vault_key = 1; + + // Asset value related to the vault key. + // If not present, the asset was removed from the vault. + optional primitives.Asset asset = 2; + + // Block number at which the above asset was updated in the account vault. + fixed32 block_num = 3; +} + +// SYNC NOTES +// ================================================================================================ + +// Note synchronization request. +// +// Specifies note tags that requester is interested in. The server will return the first block which +// contains a note matching `note_tags` or the chain tip. +message SyncNotesRequest { + // Block range from which to start synchronizing. + BlockRange block_range = 1; + + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 2; +} + +// Represents the result of syncing notes request. +message SyncNotesResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; + + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + // + // An MMR proof can be constructed for the leaf of index `block_header.block_num` of + // an MMR of forest `chain_tip` with this path. + primitives.MerklePath mmr_path = 3; + + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 4; +} + +// SYNC STATE +// ================================================================================================ + +// State synchronization request. +// +// Specifies state updates the requester is interested in. The server will return the first block which +// contains a note matching `note_tags` or the chain tip. And the corresponding updates to +// `account_ids` for that block range. +message SyncStateRequest { + // Last block known by the requester. The response will contain data starting from the next block, + // until the first block which contains a note of matching the requested tag, or the chain tip + // if there are no notes. + fixed32 block_num = 1; + + // Accounts' commitment to include in the response. + // + // An account commitment will be included if-and-only-if it is the latest update. Meaning it is + // possible there was an update to the account for the given range, but if it is not the latest, + // it won't be included in the response. + repeated account.AccountId account_ids = 2; + + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 3; +} + +// Represents the result of syncing state request. +message SyncStateResponse { + // Number of the latest block in the chain. + fixed32 chain_tip = 1; + + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; + + // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. + primitives.MmrDelta mmr_delta = 3; + + // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. + repeated account.AccountSummary accounts = 5; + + // List of transactions executed against requested accounts between `request.block_num + 1` and + // `response.block_header.block_num`. + repeated transaction.TransactionSummary transactions = 6; + + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 7; +} + +// SYNC STORAGE MAP +// ================================================================================================ + +// Storage map synchronization request. +// +// Allows requesters to sync storage map values for specific public accounts within a block range, +// with support for cursor-based pagination to handle large storage maps. +message SyncStorageMapsRequest { + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; + + // Account for which we want to sync storage maps. + account.AccountId account_id = 3; +} + +message SyncStorageMapsResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // The list of storage map updates. + // + // Multiple updates can be returned for a single slot index and key combination, and the one + // with a higher `block_num` is expected to be retained by the caller. + repeated StorageMapUpdate updates = 2; +} + +// Represents a single storage map update. +message StorageMapUpdate { + // Block number in which the slot was updated. + fixed32 block_num = 1; + + // Storage slot name. + string slot_name = 2; + + // The storage map key. + primitives.Digest key = 3; + + // The storage map value. + primitives.Digest value = 4; +} + +// BLOCK RANGE +// ================================================================================================ + +// Represents a block range. +message BlockRange { + // Block number from which to start (inclusive). + fixed32 block_from = 1; + + // Block number up to which to check (inclusive). If not specified, checks up to the latest block. + optional fixed32 block_to = 2; +} + +// PAGINATION INFO +// ================================================================================================ + +// Represents pagination information for chunked responses. +// +// Pagination is done using block numbers as the axis, allowing requesters to request +// data in chunks by specifying block ranges and continuing from where the previous +// response left off. +// +// To request the next chunk, the requester should use `block_num + 1` from the previous response +// as the `block_from` for the next request. +message PaginationInfo { + // Current chain tip + fixed32 chain_tip = 1; + + // The block number of the last check included in this response. + // + // For chunked responses, this may be less than `request.block_range.block_to`. + // If it is less than request.block_range.block_to, the user is expected to make a subsequent request + // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + fixed32 block_num = 2; +} + +// SYNC TRANSACTIONS +// ================================================================================================ + +// Transactions synchronization request. +// +// Allows requesters to sync transactions for specific accounts within a block range. +message SyncTransactionsRequest { + // Block range from which to start synchronizing. + BlockRange block_range = 1; + + // Accounts to sync transactions for. + repeated account.AccountId account_ids = 2; +} + +// Represents the result of syncing transactions request. +message SyncTransactionsResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of transaction records. + repeated TransactionRecord transactions = 2; +} + +// Represents a transaction record. +message TransactionRecord { + // Block number in which the transaction was included. + fixed32 block_num = 1; + + // A transaction header. + transaction.TransactionHeader header = 2; +} + +// RPC LIMITS +// ================================================================================================ + +// Represents the query parameter limits for RPC endpoints. +message RpcLimits { + // Maps RPC endpoint names to their parameter limits. + // Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + // Value: map of parameter names to their limit values + map endpoints = 1; +} + +// Represents the parameter limits for a single endpoint. +message EndpointLimits { + // Maps parameter names to their limit values. + // Key: parameter name (e.g., "nullifier", "account_id") + // Value: limit value + map parameters = 1; } diff --git a/proto/proto/store/block_producer.proto b/proto/proto/store/block_producer.proto deleted file mode 100644 index e0218bd0fa..0000000000 --- a/proto/proto/store/block_producer.proto +++ /dev/null @@ -1,164 +0,0 @@ -// Specification of the Block Producer store RPC. -syntax = "proto3"; -package block_producer_store; - -import "google/protobuf/empty.proto"; -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// BLOCK PRODUCER STORE API -// ================================================================================================ - -// Store API for the BlockProducer component -service BlockProducer { - // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns data required to prove the next block. - rpc GetBlockInputs(BlockInputsRequest) returns (BlockInputs) {} - - // Returns the inputs for a transaction batch. - rpc GetBatchInputs(BatchInputsRequest) returns (BatchInputs) {} - - // Returns data required to validate a new transaction. - rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} -} - -// GET BLOCK INPUTS -// ================================================================================================ - -// Returns data required to prove the next block. -message BlockInputsRequest { - // IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - repeated account.AccountId account_ids = 1; - - // Nullifiers of all notes consumed by the block for which to retrieve witnesses. - // - // Due to note erasure it will generally not be possible to know the exact set of nullifiers - // a block will create, unless we pre-execute note erasure. So in practice, this set of - // nullifiers will be the set of nullifiers of all proven batches in the block, which is a - // superset of the nullifiers the block may create. - // - // However, if it is known that a certain note will be erased, it would not be necessary to - // provide a nullifier witness for it. - repeated primitives.Digest nullifiers = 2; - - // Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - repeated primitives.Digest unauthenticated_notes = 3; - - // Array of block numbers referenced by all batches in the block. - repeated fixed32 reference_blocks = 4; -} - -// Represents the result of getting block inputs. -message BlockInputs { - // A nullifier returned as a response to the `GetBlockInputs`. - message NullifierWitness { - // The nullifier. - primitives.Digest nullifier = 1; - - // The SMT proof to verify the nullifier's inclusion in the nullifier tree. - primitives.SmtOpening opening = 2; - } - // The latest block header. - blockchain.BlockHeader latest_block_header = 1; - - // Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - // the store**. - repeated note.NoteInclusionInBlockProof unauthenticated_note_proofs = 2; - - // The serialized chain MMR which includes proofs for all blocks referenced by the - // above note inclusion proofs as well as proofs for inclusion of the requested blocks - // referenced by the batches in the block. - bytes partial_block_chain = 3; - - // The state commitments of the requested accounts and their authentication paths. - repeated account.AccountWitness account_witnesses = 4; - - // The requested nullifiers and their authentication paths. - repeated NullifierWitness nullifier_witnesses = 5; -} - -// GET BATCH INPUTS -// ================================================================================================ - -// Returns the inputs for a transaction batch. -message BatchInputsRequest { - // List of unauthenticated note commitments to be queried from the database. - repeated primitives.Digest note_commitments = 1; - // Set of block numbers referenced by transactions. - repeated fixed32 reference_blocks = 2; -} - -// Represents the result of getting batch inputs. -message BatchInputs { - // The block header that the transaction batch should reference. - blockchain.BlockHeader batch_reference_block_header = 1; - - // Proof of each _found_ unauthenticated note's inclusion in a block. - repeated note.NoteInclusionInBlockProof note_proofs = 2; - - // The serialized chain MMR which includes proofs for all blocks referenced by the - // above note inclusion proofs as well as proofs for inclusion of the blocks referenced - // by the transactions in the batch. - bytes partial_block_chain = 3; -} - -// GET TRANSACTION INPUTS -// ================================================================================================ - -// Returns data required to validate a new transaction. -message TransactionInputsRequest { - // ID of the account against which a transaction is executed. - account.AccountId account_id = 1; - // Set of nullifiers consumed by this transaction. - repeated primitives.Digest nullifiers = 2; - // Set of unauthenticated note commitments to check for existence on-chain. - // - // These are notes which were not on-chain at the state the transaction was proven, - // but could by now be present. - repeated primitives.Digest unauthenticated_notes = 3; -} - -// Represents the result of getting transaction inputs. -message TransactionInputs { - // An account returned as a response to the `GetTransactionInputs`. - message AccountTransactionInputRecord { - // The account ID. - account.AccountId account_id = 1; - - // The latest account commitment, zero commitment if the account doesn't exist. - primitives.Digest account_commitment = 2; - } - - // A nullifier returned as a response to the `GetTransactionInputs`. - message NullifierTransactionInputRecord { - // The nullifier ID. - primitives.Digest nullifier = 1; - - // The block at which the nullifier has been consumed, zero if not consumed. - fixed32 block_num = 2; - } - - // Account state proof. - AccountTransactionInputRecord account_state = 1; - - // List of nullifiers that have been consumed. - repeated NullifierTransactionInputRecord nullifiers = 2; - - // List of unauthenticated notes that were not found in the database. - repeated primitives.Digest found_unauthenticated_notes = 3; - - // The node's current block height. - fixed32 block_height = 4; - - // Whether the account ID prefix is unique. Only relevant for account creation requests. - optional bool new_account_id_prefix_is_unique = 5; // TODO: Replace this with an error. When a general error message exists. -} diff --git a/proto/proto/store/ntx_builder.proto b/proto/proto/store/ntx_builder.proto deleted file mode 100644 index 15144447ba..0000000000 --- a/proto/proto/store/ntx_builder.proto +++ /dev/null @@ -1,113 +0,0 @@ -// Specification of the NTX Builder store RPC. -syntax = "proto3"; -package ntx_builder_store; - -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// NTX BUILDER STORE API -// ================================================================================================ - -// Store API for the network transaction builder component -service NtxBuilder { - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns a paginated list of unconsumed network notes. - rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} - - // Returns a paginated list of a network account's unconsumed notes up to a specified block number. - rpc GetUnconsumedNetworkNotesForAccount(UnconsumedNetworkNotesForAccountRequest) returns (UnconsumedNetworkNotes) {} - - // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - // header for executing network transactions. If the block number is not provided, the latest - // header and peaks will be retrieved. - rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} - - // Returns the latest state of a network account with the specified account prefix. - rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} -} - -// GET NETWORK ACCOUNT DETAILS BY PREFIX -// ================================================================================================ - -// Account ID prefix. -message AccountIdPrefix { - // Account ID prefix. - fixed32 account_id_prefix = 1; -} - -// Represents the result of getting network account details by prefix. -message MaybeAccountDetails { - // Account details. - optional account.AccountDetails details = 1; -} - -// GET UNCONSUMED NETWORK NOTES -// ================================================================================================ - -// Returns a list of unconsumed network notes using pagination. -message UnconsumedNetworkNotesRequest { - // An opaque token used to paginate through the notes. - // - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; -} - -// Returns a paginated list of unconsumed network notes for an account. -// -// Notes created or consumed after the specified block are excluded from the result. -message UnconsumedNetworkNotesForAccountRequest { - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - // - // Note that this token is only valid if used with the same parameters. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; - - // The network account ID prefix to filter notes by. - uint32 network_account_id_prefix = 3; - - // The block number to filter the returned notes by. - // - // Notes that are created or consumed after this block are excluded from the result. - fixed32 block_num = 4; -} - -// Represents the result of getting the unconsumed network notes. -message UnconsumedNetworkNotes { - // An opaque pagination token. - // - // Use this in your next request to get the next - // set of data. - // - // Will be null once there is no more data remaining. - optional uint64 next_token = 1; - - // The list of unconsumed network notes. - repeated note.NetworkNote notes = 2; -} - -// GET CURRENT BLOCKCHAIN DATA -// ================================================================================================ - -// Current blockchain data based on the requested block number. -message CurrentBlockchainData { - // Commitments that represent the current state according to the MMR. - repeated primitives.Digest current_peaks = 1; - // Current block header. - optional blockchain.BlockHeader current_block_header = 2; -} diff --git a/proto/proto/store/rpc.proto b/proto/proto/store/rpc.proto deleted file mode 100644 index 1fc3e1936e..0000000000 --- a/proto/proto/store/rpc.proto +++ /dev/null @@ -1,509 +0,0 @@ -// Specification of the store RPC. -// -// This provided access to the blockchain data to the other nodes. -syntax = "proto3"; -package rpc_store; - -import "google/protobuf/empty.proto"; -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/transaction.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// RPC STORE API -// ================================================================================================ - -// Store API for the RPC component -service Rpc { - // Returns the status info. - rpc Status(google.protobuf.Empty) returns (StoreStatus) {} - - // Returns a nullifier proof for each of the requested nullifiers. - rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - - // Returns the latest state of an account with the specified ID. - rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} - - // Returns the latest state proof of the specified account. - rpc GetAccountProof(AccountProofRequest) returns (AccountProofResponse) {} - - // Returns raw block data for the specified block number. - rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns a list of committed notes matching the provided note IDs. - rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} - - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} - - // Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - // - // requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - - // Returns info which can be used by the requester to sync up to the latest state of the chain - // for the objects (accounts, notes, nullifiers) the requester is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. requester is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the requester is fully synchronized with the chain. - // - // Each request also returns info about new notes, nullifiers etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags and nullifiers filters contain only high - // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - // additional filtering of that data on its side. - rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} - - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} -} - -// STORE STATUS -// ================================================================================================ - -// Represents the status of the store. -message StoreStatus { - // The store's running version. - string version = 1; - - // The store's status. - string status = 2; - - // Number of the latest block in the chain. - fixed32 chain_tip = 3; -} - -// GET ACCOUNT PROOF -// ================================================================================================ - -// Returns the latest state proof of the specified account. -message AccountProofRequest { - // Request the details for a public account. - message AccountDetailRequest { - // Represents a storage slot index and the associated map keys. - message StorageMapDetailRequest { - // Indirection required for use in `oneof {..}` block. - message MapKeys { - // A list of map keys associated with this storage slot. - repeated primitives.Digest map_keys = 1; - } - // Storage slot index (`[0..255]`). - uint32 slot_index = 1; - - oneof slot_data { - // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - // the response will not contain them but must be requested separately. - bool all_entries = 2; - - // A list of map keys associated with the given storage slot identified by `slot_index`. - MapKeys map_keys = 3; - } - } - - // Last known code commitment to the requester. The response will include account code - // only if its commitment is different from this value. - // - // If the field is ommiteed, the response will not include the account code. - optional primitives.Digest code_commitment = 1; - - // Last known asset vault commitment to the requester. The response will include asset vault data - // only if its commitment is different from this value. If the value is not present in the - // request, the response will not contain one either. - // If the number of to-be-returned asset entries exceed a threshold, they have to be requested - // separately, which is signaled in the response message with dedicated flag. - optional primitives.Digest asset_vault_commitment = 2; - - // Additional request per storage map. - repeated StorageMapDetailRequest storage_maps = 3; - } - - // ID of the account for which we want to get data - account.AccountId account_id = 1; - - // Block at which we'd like to get this data. If present, must be close to the chain tip. - // If not present, data from the latest block will be returned. - optional blockchain.BlockNumber block_num = 2; - - // Request for additional account details; valid only for public accounts. - optional AccountDetailRequest details = 3; -} - -// Represents the result of getting account proof. -message AccountProofResponse { - - message AccountDetails { - // Account header. - account.AccountHeader header = 1; - - // Account storage data - AccountStorageDetails storage_details = 2; - - // Account code; empty if code commitments matched or none was requested. - optional bytes code = 3; - - // Account asset vault data; empty if vault commitments matched or the requester - // omitted it in the request. - optional AccountVaultDetails vault_details = 4; - } - - // The block number at which the account witness was created and the account details were observed. - blockchain.BlockNumber block_num = 1; - - // Account ID, current state commitment, and SMT path. - account.AccountWitness witness = 2; - - // Additional details for public accounts. - optional AccountDetails details = 3; -} - -// Account vault details for AccountProofResponse -message AccountVaultDetails { - // A flag that is set to true if the account contains too many assets. This indicates - // to the user that `SyncAccountVault` endpoint should be used to retrieve the - // account's assets - bool too_many_assets = 1; - - // When too_many_assets == false, this will contain the list of assets in the - // account's vault - repeated primitives.Asset assets = 2; -} - -// Account storage details for AccountProofResponse -message AccountStorageDetails { - message AccountStorageMapDetails { - // Wrapper for repeated storage map entries - message MapEntries { - // Definition of individual storage entries. - message StorageMapEntry { - primitives.Digest key = 1; - primitives.Digest value = 2; - } - - repeated StorageMapEntry entries = 1; - } - // slot index of the storage map - uint32 slot_index = 1; - - // A flag that is set to `true` if the number of to-be-returned entries in the - // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - // endpoint should be used to get all storage map data. - bool too_many_entries = 2; - - // By default we provide all storage entries. - MapEntries entries = 3; - } - - // Account storage header (storage slot info for up to 256 slots) - account.AccountStorageHeader header = 1; - - // Additional data for the requested storage maps - repeated AccountStorageMapDetails map_details = 2; -} - - -// CHECK NULLIFIERS -// ================================================================================================ - -// List of nullifiers to return proofs for. -message NullifierList { - // List of nullifiers to return proofs for. - repeated primitives.Digest nullifiers = 1; -} - -// Represents the result of checking nullifiers. -message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the same position. - repeated primitives.SmtOpening proofs = 1; -} - -// SYNC NULLIFIERS -// ================================================================================================ - -// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -message SyncNullifiersRequest { - // Block number from which the nullifiers are requested (inclusive). - BlockRange block_range = 1; - - // Number of bits used for nullifier prefix. Currently the only supported value is 16. - uint32 prefix_len = 2; - - // List of nullifiers to check. Each nullifier is specified by its prefix with length equal - // to `prefix_len`. - repeated uint32 nullifiers = 3; -} - -// Represents the result of syncing nullifiers. -message SyncNullifiersResponse { - // Represents a single nullifier update. - message NullifierUpdate { - // Nullifier ID. - primitives.Digest nullifier = 1; - - // Block number. - fixed32 block_num = 2; - } - - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of nullifiers matching the prefixes specified in the request. - repeated NullifierUpdate nullifiers = 2; -} - -// SYNC STATE -// ================================================================================================ - -// State synchronization request. -// -// Specifies state updates the requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -// `account_ids` for that block range. -message SyncStateRequest { - // Last block known by the requester. The response will contain data starting from the next block, - // until the first block which contains a note of matching the requested tag, or the chain tip - // if there are no notes. - fixed32 block_num = 1; - - // Accounts' commitment to include in the response. - // - // An account commitment will be included if-and-only-if it is the latest update. Meaning it is - // possible there was an update to the account for the given range, but if it is not the latest, - // it won't be included in the response. - repeated account.AccountId account_ids = 2; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 3; -} - -// Represents the result of syncing state request. -message SyncStateResponse { - // Number of the latest block in the chain. - fixed32 chain_tip = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - primitives.MmrDelta mmr_delta = 3; - - // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - repeated account.AccountSummary accounts = 5; - - // List of transactions executed against requested accounts between `request.block_num + 1` and - // `response.block_header.block_num`. - repeated transaction.TransactionSummary transactions = 6; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 7; -} - -// SYNC ACCOUNT VAULT -// ================================================================================================ - -// Account vault synchronization request. -// -// Allows requesters to sync asset values for specific public accounts within a block range. -message SyncAccountVaultRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; - - // Account for which we want to sync asset vault. - account.AccountId account_id = 2; -} - -message SyncAccountVaultResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of asset updates for the account. - // - // Multiple updates can be returned for a single asset, and the one with a higher `block_num` - // is expected to be retained by the caller. - repeated AccountVaultUpdate updates = 2; -} - -message AccountVaultUpdate { - // Vault key associated with the asset. - primitives.Digest vault_key = 1; - - // Asset value related to the vault key. - // If not present, the asset was removed from the vault. - optional primitives.Asset asset = 2; - - // Block number at which the above asset was updated in the account vault. - fixed32 block_num = 3; -} - -// SYNC NOTES -// ================================================================================================ - -// Note synchronization request. -// -// Specifies note tags that requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. -message SyncNotesRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 2; -} - -// Represents the result of syncing notes request. -message SyncNotesResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - // - // An MMR proof can be constructed for the leaf of index `block_header.block_num` of - // an MMR of forest `chain_tip` with this path. - primitives.MerklePath mmr_path = 3; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 4; -} - -// SYNC STORAGE MAP -// ================================================================================================ - -// Storage map synchronization request. -// -// Allows requesters to sync storage map values for specific public accounts within a block range, -// with support for cursor-based pagination to handle large storage maps. -message SyncStorageMapsRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; - - // Account for which we want to sync storage maps. - account.AccountId account_id = 3; -} - -message SyncStorageMapsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // The list of storage map updates. - // - // Multiple updates can be returned for a single slot index and key combination, and the one - // with a higher `block_num` is expected to be retained by the caller. - repeated StorageMapUpdate updates = 2; -} - -// Represents a single storage map update. -message StorageMapUpdate { - // Block number in which the slot was updated. - fixed32 block_num = 1; - - // Slot index ([0..255]). - uint32 slot_index = 2; - - // The storage map key. - primitives.Digest key = 3; - - // The storage map value. - primitives.Digest value = 4; -} - -// BLOCK RANGE -// ================================================================================================ - -// Represents a block range. -message BlockRange { - // Block number from which to start (inclusive). - fixed32 block_from = 1; - - // Block number up to which to check (inclusive). If not specified, checks up to the latest block. - optional fixed32 block_to = 2; -} - -// PAGINATION INFO -// ================================================================================================ - -// Represents pagination information for chunked responses. -// -// Pagination is done using block numbers as the axis, allowing requesters to request -// data in chunks by specifying block ranges and continuing from where the previous -// response left off. -// -// To request the next chunk, the requester should use `block_num + 1` from the previous response -// as the `block_from` for the next request. -message PaginationInfo { - // Current chain tip - fixed32 chain_tip = 1; - - // The block number of the last check included in this response. - // - // For chunked responses, this may be less than `request.block_range.block_to`. - // If it is less than request.block_range.block_to, the user is expected to make a subsequent request - // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - fixed32 block_num = 2; -} - -// SYNC TRANSACTIONS -// ================================================================================================ - -// Transactions synchronization request. -// -// Allows requesters to sync transactions for specific accounts within a block range. -message SyncTransactionsRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; - - // Accounts to sync transactions for. - repeated account.AccountId account_ids = 2; -} - -// Represents the result of syncing transactions request. -message SyncTransactionsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of transaction records. - repeated TransactionRecord transaction_records = 2; -} - -// Represents a transaction record. -message TransactionRecord { - // Block number in which the transaction was included. - fixed32 block_num = 1; - - // A transaction header. - transaction.TransactionHeader transaction_header = 2; -} diff --git a/proto/proto/store/shared.proto b/proto/proto/store/shared.proto deleted file mode 100644 index 1d162087ef..0000000000 --- a/proto/proto/store/shared.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Shared messages for the store RPC. -syntax = "proto3"; -package shared; - -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/transaction.proto"; -import "types/primitives.proto"; -import "types/note.proto"; -import "google/protobuf/empty.proto"; - -// GET BLOCK HEADER BY NUMBER -// ================================================================================================ - -// Returns the block header corresponding to the requested block number, as well as the merkle -// path and current forest which validate the block's inclusion in the chain. -// -// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -message BlockHeaderByNumberRequest { - // The target block height, defaults to latest if not provided. - optional uint32 block_num = 1; - // Whether or not to return authentication data for the block header. - optional bool include_mmr_proof = 2; -} - -// Represents the result of getting a block header by block number. -message BlockHeaderByNumberResponse { - // The requested block header. - blockchain.BlockHeader block_header = 1; - - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - optional primitives.MerklePath mmr_path = 2; - - // Current chain length. - optional fixed32 chain_length = 3; -} - -// GET NOTE SCRIPT BY ROOT -// ================================================================================================ - -// Represents a note script or nothing. -message MaybeNoteScript { - // The script for a note by its root. - optional note.NoteScript script = 1; -} diff --git a/proto/proto/types/account.proto b/proto/proto/types/account.proto index 6953c228be..e61db64ae2 100644 --- a/proto/proto/types/account.proto +++ b/proto/proto/types/account.proto @@ -12,7 +12,7 @@ import "types/primitives.proto"; // and a random user-provided seed. message AccountId { // 15 bytes (120 bits) encoded using [winter_utils::Serializable] implementation for - // [miden_objects::account::account_id::AccountId]. + // [miden_protocol::account::account_id::AccountId]. bytes id = 1; } @@ -32,14 +32,19 @@ message AccountSummary { message AccountStorageHeader { // A single storage slot in the account storage header. message StorageSlot { + // The name of the storage slot. + string slot_name = 1; + // The type of the storage slot. - uint32 slot_type = 1; + uint32 slot_type = 2; - // The commitment (Word) for this storage slot. - primitives.Digest commitment = 2; + // The data (Word) for this storage slot. + // For value slots (slot_type=0), this is the actual value stored in the slot. + // For map slots (slot_type=1), this is the root of the storage map. + primitives.Digest commitment = 3; } - // Storage slots with their types and commitments. + // Storage slots with their types and data. repeated StorageSlot slots = 1; } @@ -49,7 +54,7 @@ message AccountDetails { AccountSummary summary = 1; // Account details encoded using [winter_utils::Serializable] implementation for - // [miden_objects::account::Account]. + // [miden_protocol::account::Account]. optional bytes details = 2; } diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 28a35ae33e..6f53cd4f33 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -10,14 +10,21 @@ import "types/primitives.proto"; // Represents a block. message Block { // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_objects::block::Block]. + // [miden_protocol::block::Block]. bytes block = 1; } +// Represents a proposed block. +message ProposedBlock { + // Block data encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::block::ProposedBlock]. + bytes proposed_block = 1; +} + // Represents a block or nothing. message MaybeBlock { // The requested block data encoded using [winter_utils::Serializable] implementation for - // [miden_objects::block::Block]. + // [miden_protocol::block::Block]. optional bytes block = 1; } @@ -59,8 +66,8 @@ message BlockHeader { // A commitment to a set of IDs of transactions which affected accounts in this block. primitives.Digest tx_commitment = 8; - // A commitment to a STARK proof attesting to the correct state transition. - primitives.Digest proof_commitment = 9; + // The validator's ECDSA public key. + ValidatorPublicKey validator_key = 9; // A commitment to all transaction kernels supported by this block. primitives.Digest tx_kernel_commitment = 10; @@ -72,6 +79,27 @@ message BlockHeader { fixed32 timestamp = 12; } +// PUBLIC KEY +// ================================================================================================ + +// Validator ECDSA public key. +message ValidatorPublicKey { + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::PublicKey]. + bytes validator_key = 1; +} + +// BLOCK SIGNATURE +// ================================================================================================ + +// Block ECDSA Signature. +message BlockSignature { + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::Signature]. + bytes signature = 1; +} + + // FEE PARAMETERS // ================================================================================================ @@ -82,3 +110,13 @@ message FeeParameters { // The base fee (in base units) capturing the cost for the verification of a transaction. fixed32 verification_base_fee = 2; } + +// BLOCK BODY +// ================================================================================================ + +// Represents a block body. +message BlockBody { + // Block body data encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::block::BlockBody]. + bytes block_body = 1; +} diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index 709c521f84..ac125daa06 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -29,16 +29,13 @@ message NoteMetadata { // A value which can be used by the recipient(s) to identify notes intended for them. // - // See `miden_objects::note::note_tag` for more info. + // See `miden_protocol::note::note_tag` for more info. fixed32 tag = 3; - // Specifies when a note is ready to be consumed. + // Serialized note attachment // - // See `miden_objects::note::execution_hint` for more info. - fixed64 execution_hint = 4; - - // An arbitrary user-defined value. - fixed64 aux = 5; + // See `miden_protocol::note::NoteAttachment` for more info. + bytes attachment = 4; } // Represents a note. diff --git a/proto/proto/types/primitives.proto b/proto/proto/types/primitives.proto index aed31cec05..3c8d279b02 100644 --- a/proto/proto/types/primitives.proto +++ b/proto/proto/types/primitives.proto @@ -9,7 +9,7 @@ message Asset { primitives.Digest asset = 1; } -// SMT +// SMT (Sparse Merkle Tree) // ================================================================================================ // Represents a single SMT leaf entry. @@ -21,9 +21,9 @@ message SmtLeafEntry { Digest value = 2; } -// Represents multiple leaf entries in an SMT. +// Multiple leaf entries when hash collisions occur at the same leaf position. message SmtLeafEntryList { - // The entries list. + // The list of entries at this leaf. repeated SmtLeafEntry entries = 1; } diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index a600e6327a..a0e716457d 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -11,16 +11,16 @@ import "types/primitives.proto"; // Submits proven transaction to the Miden network. message ProvenTransaction { // Transaction encoded using [winter_utils::Serializable] implementation for - // [miden_objects::transaction::proven_tx::ProvenTransaction]. + // [miden_protocol::transaction::proven_tx::ProvenTransaction]. bytes transaction = 1; // Transaction inputs encoded using [winter_utils::Serializable] implementation for - // [miden_objects::transaction::TransactionInputs]. + // [miden_protocol::transaction::TransactionInputs]. optional bytes transaction_inputs = 2; } message ProvenTransactionBatch { // Encoded using [winter_utils::Serializable] implementation for - // [miden_objects::transaction::proven_tx::ProvenTransaction]. + // [miden_protocol::transaction::proven_tx::ProvenTransaction]. bytes encoded = 1; } @@ -54,7 +54,7 @@ message TransactionHeader { primitives.Digest final_state_commitment = 3; // Nullifiers of the input notes of the transaction. - repeated primitives.Digest input_notes = 4; + repeated primitives.Digest nullifiers = 4; // Output notes of the transaction. repeated note.NoteSyncRecord output_notes = 5; diff --git a/proto/src/lib.rs b/proto/src/lib.rs index 14ded322fb..8e8440d19d 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -41,14 +41,6 @@ pub fn store_block_producer_api_descriptor() -> FileDescriptorSet { .expect("bytes should be a valid file descriptor created by build.rs") } -/// Returns the Protobuf file descriptor for the store shared API. -#[cfg(feature = "internal")] -pub fn store_shared_api_descriptor() -> FileDescriptorSet { - let bytes = include_bytes!(concat!(env!("OUT_DIR"), "/", "store_shared_file_descriptor.bin")); - FileDescriptorSet::decode(&bytes[..]) - .expect("bytes should be a valid file descriptor created by build.rs") -} - /// Returns the Protobuf file descriptor for the block-producer API. #[cfg(feature = "internal")] pub fn block_producer_api_descriptor() -> FileDescriptorSet { diff --git a/scripts/check-msrv.sh b/scripts/check-msrv.sh index 0bde2955f0..6058a0ace2 100755 --- a/scripts/check-msrv.sh +++ b/scripts/check-msrv.sh @@ -90,9 +90,9 @@ while IFS=$'\t' read -r pkg_id package_name manifest_path rust_version; do echo "Searching for correct MSRV for $package_name..." - # Determine the currently-installed stable toolchain version (e.g., "1.81.0") + # Determine the currently-installed stable toolchain version (e.g., "1.91.1") latest_stable="$(rustup run stable rustc --version 2>/dev/null | awk '{print $2}')" - if [[ -z "$latest_stable" ]]; then latest_stable="1.81.0"; fi + if [[ -z "$latest_stable" ]]; then latest_stable="1.91.1"; fi # Search for the actual MSRV starting from the current one if actual_msrv=$(cargo msrv find \ @@ -150,4 +150,4 @@ if [[ -n "$failed_packages" ]]; then else echo "ALL WORKSPACE MEMBERS PASSED MSRV CHECKS!" exit 0 -fi \ No newline at end of file +fi