diff --git a/.github/actions/debian/action.yml b/.github/actions/debian/action.yml index 888dec4d5a..302e29e81a 100644 --- a/.github/actions/debian/action.yml +++ b/.github/actions/debian/action.yml @@ -28,15 +28,15 @@ inputs: options: - miden-node - miden-remote-prover - service: + package: required: true - description: The service to build the packages for. + description: The Debian package name. type: choice options: - miden-node - miden-prover - miden-prover-proxy - package: + packaging_dir: required: true description: Name of packaging directory. type: choice @@ -78,7 +78,7 @@ runs: - name: Create package directories shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} mkdir -p \ packaging/deb/$pkg/DEBIAN \ packaging/deb/$pkg/usr/bin \ @@ -89,15 +89,18 @@ runs: - name: Copy package install scripts shell: bash run: | - svc=${{ inputs.service }} pkg=${{ inputs.package }} + pkg_dir=${{ inputs.packaging_dir }} crate=${{ inputs.crate_dir }} - git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$svc/lib/systemd/system/$svc.env - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/$svc.service > packaging/deb/$svc/lib/systemd/system/$svc.service - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postinst > packaging/deb/$svc/DEBIAN/postinst - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postrm > packaging/deb/$svc/DEBIAN/postrm - chmod 0775 packaging/deb/$svc/DEBIAN/postinst - chmod 0775 packaging/deb/$svc/DEBIAN/postrm + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postinst > packaging/deb/$pkg/DEBIAN/postinst + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postrm > packaging/deb/$pkg/DEBIAN/postrm + for service_file in $(ls packaging/$pkg_dir/*.service | sed "s/.*miden/miden/g"); do + svc=$(echo $service_file | sed "s/.service//g") + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/$service_file > packaging/deb/$pkg/lib/systemd/system/$service_file + git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$pkg/lib/systemd/system/$svc.env + done + chmod 0775 packaging/deb/$pkg/DEBIAN/postinst + chmod 0775 packaging/deb/$pkg/DEBIAN/postrm - name: Create control files shell: bash @@ -108,7 +111,7 @@ runs: # Control file's version field must be x.y.z format so strip the rest. version=$(git describe --tags --abbrev=0 | sed 's/[^0-9.]//g' ) - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} cat > packaging/deb/$pkg/DEBIAN/control << EOF Package: $pkg Version: $version @@ -132,14 +135,14 @@ runs: - name: Copy binary files shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} bin=${{ inputs.crate }} cp -p ./bin/$bin packaging/deb/$pkg/usr/bin/ - name: Build packages shell: bash run: | - dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.service }} + dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.package }} # Save the .deb files, delete the rest. mv packaging/deb/*.deb . @@ -148,12 +151,12 @@ runs: - name: Package names shell: bash run: | - echo "package=${{ inputs.service }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV + echo "package=${{ inputs.package }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV - name: Rename package files shell: bash run: | - mv ${{ inputs.service }}.deb ${{ env.package }} + mv ${{ inputs.package}}.deb ${{ env.package }} - name: shasum packages shell: bash diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 0e7fe0c073..b259c23fd9 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -12,38 +12,16 @@ permissions: jobs: docker-build: - strategy: - matrix: - component: [node] runs-on: Linux-ARM64-Runner - name: Build ${{ matrix.component }} steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Configure AWS credentials - if: github.event.pull_request.head.repo.fork == false - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: ${{ secrets.AWS_REGION }} - role-to-assume: ${{ secrets.AWS_ROLE }} - role-session-name: GithubActionsSession - - - name: Set cache parameters - if: github.event.pull_request.head.repo.fork == false - run: | - echo "CACHE_FROM=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - echo "CACHE_TO=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - with: - cache-binary: true - - name: Build Docker image - uses: docker/build-push-action@v5 + - name: Build and push + uses: docker/build-push-action@v6 with: push: false - file: ./bin/${{ matrix.component }}/Dockerfile - cache-from: ${{ env.CACHE_FROM || '' }} - cache-to: ${{ env.CACHE_TO || '' }} + file: ./bin/node/Dockerfile + cache-from: type=gha + # Only save cache on push into next + cache-to: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' && 'type=gha,mode=max' || '' }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 016aeba77a..b8bea522e2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,43 @@ jobs: save-if: ${{ github.ref == 'refs/heads/next' }} - name: cargo build run: cargo build --workspace --all-targets --locked + - name: Check static linkage + run: | + # Ensure database libraries are statically linked to avoid system library dependencies + # + # It explodes our possible dependency matrix when debugging, particularly + # in the case of sqlite and rocksdb as embedded databases, we want them + # shipped in identical versions we test with. Those are notoriously difficult + # to compile time configure and OSes make very opinionated choices. + metadata=$(cargo metadata --no-deps --format-version 1) + mapfile -t bin_targets < <( + echo "${metadata}" | jq -r '.packages[].targets[] | select(.kind[] == "bin") | .name' | sort -u + ) + if [[ ${#bin_targets[@]} -eq 0 ]]; then + echo "error: No binary targets found in cargo manifest." + exit 1 + fi + for bin_target in "${bin_targets[@]}"; do + # Ensure the binary was built by the previous step. + binary_path="target/debug/${bin_target}" + if ! [[ -x "${binary_path}" ]]; then + echo "error: Missing binary or missing executable bit: ${binary_path}"; + exit 2; + fi + # ldd exits non-zero for static binaries, so we inspect its output instead. + # if ldd fails we use an empty string instead + ldd_output="$(ldd "${binary_path}" 2>&1 || true)" + if echo "${ldd_output}" | grep -E -q 'not a dynamic executable'; then + continue + fi + # librocksdb/libsqlite entries indicate dynamic linkage (bad). + if echo "${ldd_output}" | grep -E -q 'librocksdb|libsqlite'; then + echo "error: Dynamic linkage detected for ${bin_target}." + echo "${ldd_output}" + exit 3 + fi + done + echo "Static linkage check passed for all of ${bin_targets[@]}" clippy: name: lint - clippy @@ -167,11 +204,12 @@ jobs: cargo run --bin miden-node-stress-test seed-store \ --data-directory ${{ env.DATA_DIR }} \ --num-accounts 500 --public-accounts-percentage 50 - - name: Benchmark state sync - run: | - cargo run --bin miden-node-stress-test benchmark-store \ - --data-directory ${{ env.DATA_DIR }} \ - --iterations 10 --concurrency 1 sync-state + # TODO re-introduce + # - name: Benchmark state sync + # run: | + # cargo run --bin miden-node-stress-test benchmark-store \ + # --data-directory ${{ env.DATA_DIR }} \ + # --iterations 10 --concurrency 1 sync-state - name: Benchmark notes sync run: | cargo run --bin miden-node-stress-test benchmark-store \ diff --git a/.github/workflows/cleanup-workflows.yml b/.github/workflows/cleanup-workflows.yml new file mode 100644 index 0000000000..a7a6d2b428 --- /dev/null +++ b/.github/workflows/cleanup-workflows.yml @@ -0,0 +1,284 @@ +# Manual workflow to cleanup deleted workflows runs. +# +# Github keeps workflows runs around even if the workflow is deleted. +# This has the side effect that these still display in the UI which gets cluttered. +# Once the runs of a workflow are deleted, they also get removed from the UI. +name: Cleanup Workflow + +on: + workflow_dispatch: + inputs: + mode: + description: "Choose 'dry run' to preview or 'execute' to delete runs" + required: true + default: "dry run" + type: choice + options: + - "dry run" + - "execute" + +jobs: + cleanup: + name: Cleanup deleted workflows + runs-on: ubuntu-latest + permissions: + actions: write # required for deleting workflow runs + contents: read + + steps: + - name: Checkout repo + uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Workflows on main + id: main + run: | + git fetch origin main + WORKFLOWS=$(git ls-tree -r origin/main --name-only | grep '^.github/workflows/') + printf "%s\n" $WORKFLOWS + { + echo "workflows<> "$GITHUB_OUTPUT" + + - name: Workflows on next + id: next + run: | + git fetch origin next + WORKFLOWS=$(git ls-tree -r origin/next --name-only | grep '^.github/workflows/') + printf "%s\n" $WORKFLOWS + { + echo "workflows<> "$GITHUB_OUTPUT" + + - name: Filter for deleted workflows + id: deleted + env: + GH_TOKEN: ${{ github.token }} + run: | + set -euo pipefail + + # Union of `main` and `next` workflows as a JSON array of strings (paths) + EXISTING=$(printf "%s\n%s\n" \ + "${{ steps.main.outputs.workflows }}" \ + "${{ steps.next.outputs.workflows }}" \ + ) + EXISTING=$(echo "$EXISTING" | sort -u | jq -R . | jq -s .) + + echo "Existing workflows:" + echo "$EXISTING" + + # Get workflows currently on GitHub as JSON array of objects + GITHUB=$(gh api repos/{owner}/{repo}/actions/workflows \ + --jq '.workflows[] | select(.path | startswith(".github")) | { name, node_id, path }' \ + | jq -s '.') + + echo "Workflows on GitHub:" + echo "$GITHUB" + + # Find deleted workflows: present on GitHub but not in main/next + DELETED=$(echo "$GITHUB" | jq -c \ + --argjson existing "$EXISTING" ' + map(select(.path as $p | $existing | index($p) | not)) + ' + ) + + echo "Deleted workflows:" + echo "$DELETED" + + # Output to GitHub Actions + { + echo "workflows<> "$GITHUB_OUTPUT" + + # Performs the actual run deletion. + # + # This contains a lot of code, but the vast majority is just pretty-printing. + - name: Delete runs from deleted workflows + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + MODE: ${{ inputs.mode }} + WORKFLOWS: ${{ steps.deleted.outputs.workflows }} + OWNER: ${{ github.repository_owner }} + REPO: ${{ github.repository }} + shell: bash --noprofile --norc -euo pipefail {0} + run: | + if [ -z "$WORKFLOWS" ]; then + echo "No workflows to delete." + exit 0 + fi + + # ================================================================================================ + # Utility functions + # ================================================================================================ + + # Fetches a page of workflow runs for a given workflow ID and cursor. + # + # We use github's graphql API here which allows us to paginate over workflow runs. + # Unfortunately `gh run list` does not support pagination, so we use the graphql API instead. + gh_workflow_run_page() { + local id="$1" + local cursor="$2" + + gh api graphql -F workflowId="$id" -F after="$cursor" \ + -f query='query($workflowId: ID!, $after: String) { + node(id: $workflowId) { + ... on Workflow { + runs(first: 100, after: $after) { + pageInfo { hasNextPage endCursor } + nodes { databaseId } + } + } + } + }' + } + + # ================================================================================================ + # Print helpers for nice progress and table display + # ================================================================================================ + + # Column widths (table includes three spacers for ' | ' between columns) + widths_index=9 + widths_name=30 + widths_count=14 + widths_total=12 + widths_table=$(( $widths_index + 3 + $widths_name + 3 + $widths_count + 3 + $widths_total )) + + # Repeats a character a given number of times. + repeat_char() { + local char=$1 + local count=$2 + printf "%0.s$char" $(seq 1 $count) + } + + # Prints the given header as `====
====` to match the table layout. + print_table_header() { + local header="$1" + local header_len=${#header} + local left_pad=$(( ( $widths_table - header_len - 2) / 2 )) + local right_pad=$(( $widths_table - header_len - 2 - left_pad )) + printf " \n%s %s %s\n" $(repeat_char = $left_pad) "$header" $(repeat_char = $right_pad) + } + + # Prints |---+---+---+---| with appropriate widths to accomodate the table headers. + print_table_separator() { + printf "%s+%s+%s+%s\n" \ + "$(repeat_char - $((widths_index + 1)))" \ + "$(repeat_char - $((widths_name + 2)))" \ + "$(repeat_char - $((widths_count + 2)))" \ + "$(repeat_char - $((widths_total + 1)))" + } + + # Prints a row of the table (index, workflow name, workflow count, global total) + print_table_row() { + local index=$1 + local name=$2 + local count=$3 + local total=$4 + printf "%*s | %-*s | %*s | %*s\n" \ + "$widths_index" "$index" \ + "$widths_name" "$name" \ + "$widths_count" "$count" \ + "$widths_total" "$total" + } + + # Alias for print_table_row() with empty index and total columns. + print_summary_row() { + local name=$1 + local count=$2 + print_table_row "" "$name" "$count" "" + } + + # ================================================================================================ + # Print progress table header + # ================================================================================================ + print_table_header "Workflow Cleanup Progress" + print_table_row "Index" "Workflow" "Workflow Count" "Global Total" + print_table_separator + + # ================================================================================================ + # Core workflow loop, iterate over workflows + # ================================================================================================ + + n_workflows=$(echo "$WORKFLOWS" | jq -r '. | length') + total=0 + summary=() + index=0 + + mapfile -t WF_ARRAY < <(echo "$WORKFLOWS" | jq -c '.[]') + for wf in "${WF_ARRAY[@]}"; do + index=$((index + 1)) + name=$(echo "$wf" | jq -r '.name') + count=0 + id=$(echo "$wf" | jq -r '.node_id') + + # Safety checks + if [ -z "$name" ]; then + echo "::error title=Workflow name empty::Resolved workflow name is empty at index $index" + exit 1 + fi + if [ -z "$id" ]; then + echo "::error title=Workflow ID missing::Workflow '$name' has no ID" + exit 1 + fi + + cursor="" + + # Paginate over workflow runs + while true; do + response=$(gh_workflow_run_page "$id" "$cursor") + + run_ids=$(echo "$response" | jq -r '.data.node.runs.nodes[].databaseId') + has_next=$(echo "$response" | jq -r '.data.node.runs.pageInfo.hasNextPage') + cursor=$(echo "$response" | jq -r '.data.node.runs.pageInfo.endCursor') + + [ -z "$run_ids" ] && break + + deleted=$(echo "$run_ids" | wc -l | tr -d ' ') + count=$((count + deleted)) + total=$((total + deleted)) + + # Print progress + print_table_row "[$index/$n_workflows]" "$name" "$count" "$total" + + if [ "$MODE" = "execute" ]; then + for run_id in $run_ids; do + gh run delete "$run_id" >/dev/null + done + fi + + [ "$has_next" != "true" ] && break + done + + summary+=("$name|$count") + done + + # ================================================================================================ + # Print a summary table + # ================================================================================================ + print_table_header "Workflow Cleanup Summary" + print_summary_row "Workflow" "Runs" + print_table_separator + for entry in "${summary[@]}"; do + wf="${entry%%|*}" + count="${entry##*|}" + print_summary_row "$wf" "$count" + done + + # ================================================================================================ + # Print totals as a footer + # ================================================================================================ + print_table_separator + print_summary_row "TOTAL" "$total" + + if [ "$MODE" != "execute" ]; then + echo "Dry run complete. No runs were deleted." + else + echo "Cleanup complete." + fi diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index a5d6e3cae1..1d37553412 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@v6 with: - ref: 'next' + ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - name: Install RocksDB @@ -42,7 +42,7 @@ jobs: steps: - uses: actions/checkout@v6 with: - ref: 'next' + ref: "next" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner - name: Install RocksDB @@ -54,15 +54,65 @@ jobs: - name: Check all feature combinations run: make check-features - # Check that our MSRV complies with our specified rust version. + workspace-packages: + name: list packages + runs-on: ubuntu-latest + outputs: + packages: ${{ steps.package-matrix.outputs.packages }} + # Deliberately use stable rust instead of the toolchain.toml version. + # This prevents installing the toolchain version which isn't crucial for this operation. + env: + RUSTUP_TOOLCHAIN: stable + steps: + - uses: actions/checkout@v6 + with: + ref: "next" + - name: Extract workspace packages + id: package-matrix + run: | + PACKAGES=$(cargo metadata --format-version 1 --no-deps \ + | jq -c ' + .workspace_members as $members + | .packages + | map(select(.id as $id | $members | index($id))) + | map(.name) + ') + + echo "packages=$PACKAGES" >> "$GITHUB_OUTPUT" + msrv: - name: msrv check - runs-on: ubuntu-24.04 + needs: workspace-packages + runs-on: ubuntu-latest + strategy: + matrix: + package: ${{ fromJson(needs.workspace-packages.outputs.packages) }} + # Deliberately use stable rust instead of the toolchain.toml version. + # This is prevents issues where e.g. `cargo-msrv` requires a newer version of rust than the toolchain.toml version. + env: + RUSTUP_TOOLCHAIN: stable steps: - uses: actions/checkout@v6 with: - ref: 'next' - - name: check + ref: "next" + - name: Install binstall + uses: cargo-bins/cargo-binstall@main + - name: Install cargo-msrv + run: cargo binstall --no-confirm cargo-msrv + - name: Get manifest path for package + id: pkg + run: | + MANIFEST_PATH=$(cargo metadata --format-version 1 --no-deps \ + | jq -r ' + .packages[] + | select(.name == "${{ matrix.package }}") + | .manifest_path + ') + echo "manifest_path=$MANIFEST_PATH" >> "$GITHUB_OUTPUT" + - name: Show package info + run: | + echo "Package: ${{ matrix.package }}" + echo "Manifest path: ${{ steps.pkg.outputs.manifest_path }}" + cargo msrv show --manifest-path "${{ steps.pkg.outputs.manifest_path }}" + - name: Check MSRV run: | - export PATH="$HOME/.cargo/bin:$PATH" - ./scripts/check-msrv.sh + cargo msrv verify --manifest-path "${{ steps.pkg.outputs.manifest_path }}" diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index a6d63d5035..76e65d0eb7 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -39,8 +39,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: node - service: miden-node - package: node + package: miden-node + packaging_dir: node crate: miden-node arch: ${{ matrix.arch }} @@ -62,8 +62,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover - package: prover + package: miden-prover + packaging_dir: prover crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -85,8 +85,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover-proxy - package: prover-proxy + package: miden-prover-proxy + packaging_dir: prover-proxy crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -108,7 +108,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: network-monitor - service: miden-network-monitor - package: network-monitor + package: miden-network-monitor + packaging_dir: network-monitor crate: miden-network-monitor arch: ${{ matrix.arch }} diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index 81e8d74475..d17d065325 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -3,8 +3,8 @@ name: Publish Debian Package on: workflow_dispatch: inputs: - service: - description: "Name of service to publish" + package: + description: "Name of package to publish" required: true type: choice options: @@ -20,7 +20,7 @@ on: - network-monitor - node - remote-prover - package: + packaging_dir: required: true description: "Name of packaging directory" type: choice @@ -48,7 +48,7 @@ permissions: jobs: publish: - name: Publish ${{ inputs.service }} ${{ matrix.arch }} Debian + name: Publish ${{ inputs.package }} ${{ matrix.arch }} Debian strategy: matrix: arch: [amd64, arm64] @@ -69,7 +69,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ inputs.version }} crate_dir: ${{ inputs.crate_dir }} - service: ${{ inputs.service }} package: ${{ inputs.package }} + packaging_dir: ${{ inputs.packaging_dir }} crate: ${{ inputs.crate }} arch: ${{ matrix.arch }} diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml deleted file mode 100644 index c84a08d34e..0000000000 --- a/.github/workflows/publish-dry-run.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Publish (dry-run) - -permissions: - contents: read - -on: - push: - branches: [main, next] - -concurrency: - group: "${{ github.workflow }} @ ${{ github.ref }}" - cancel-in-progress: true - -jobs: - publish-dry-run: - name: Cargo publish dry-run - runs-on: Linux-ARM64-Runner - if: ${{ github.repository_owner == '0xMiden' }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner - - name: Install RocksDB - uses: ./.github/actions/install-rocksdb - - name: Install dependencies - run: sudo apt-get update && sudo apt-get install -y jq - - name: Update Rust toolchain - run: rustup update --no-self-update - - uses: taiki-e/install-action@v2 - with: - tool: cargo-binstall@1.16.6 - - name: Install cargo-msrv - run: cargo binstall --no-confirm --force cargo-msrv - - name: Check MSRV for each workspace member - run: | - export PATH="$HOME/.cargo/bin:$PATH" - ./scripts/check-msrv.sh - - name: Run cargo publish dry-run - run: cargo publish --workspace --dry-run - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/.gitignore b/.gitignore index 0a086d3d0b..a4d92ce8ed 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ # will have compiled files and executables debug/ target/ +miden-node-stress-test-* # Generated by protox `file_descriptor_set.bin` *.bin diff --git a/CHANGELOG.md b/CHANGELOG.md index 2dc3173446..b74d3cc595 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## v0.14.0 (TBD) + +### Enhancements + +- [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/miden-node/pull/1579)). +- [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). +- Validator now persists validated transactions ([#1614](https://github.com/0xMiden/miden-node/pull/1614)). +- [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). +- Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)). +- Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/miden-node/issues/1701)). + +### Changes + +- [BREAKING] Removed obsolete `SyncState` RPC endpoint; clients should use `SyncNotes`, `SyncNullifiers`, `SyncAccountVault`, `SyncAccountStorageMaps`, `SyncTransactions`, or `SyncChainMmr` instead ([#1636](https://github.com/0xMiden/miden-node/pull/1636)). +- Added account ID limits for `SyncTransactions`, `SyncAccountVault`, and `SyncAccountStorageMaps` to `GetLimits` responses ([#1636](https://github.com/0xMiden/miden-node/pull/1636)). +- [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/miden-node/pull/1646)). +- Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). +- Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). +- Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). +- Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). +- Added support for generic account loading at genesis ([#1624](https://github.com/0xMiden/miden-node/pull/1624)). +- Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) + - Replaced NTX Builder's in-memory state management with SQLite-backed persistence; account states, notes, and transaction effects are now stored in the database and inflight state is purged on startup ([#1662](https://github.com/0xMiden/miden-node/pull/1662)). +- [BREAKING] Reworked `miden-remote-prover`, removing the `worker`/`proxy` distinction and simplifying to a `worker` with a request queue ([#1688](https://github.com/0xMiden/miden-node/pull/1688)). + ## v0.13.7 (2026-02-25) - Updated `SyncAccountStorageMaps` and `SyncAccountVault` to allow all accounts with public state, including network accounts ([#1711](https://github.com/0xMiden/node/pull/1711)). @@ -47,6 +72,7 @@ ### Enhancements +- Cleanup old account data from the database on apply block ([#1304](https://github.com/0xMiden/miden-node/issues/1304)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). @@ -68,10 +94,12 @@ - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). - Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). -- Pined tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). -- Added `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). -- Added check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)). ### Changes @@ -136,7 +164,7 @@ - Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - - This presented as a database locked error and in rare cases a desync between the mempool and store. + - This presented as a database locked error and in rare cases a desync between the mempool and store. ## v0.12.6 (2026-01-12) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 94e6830753..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,126 +0,0 @@ -# Contributing to Miden Node - -#### First off, thanks for taking the time to contribute! - -We want to make contributing to this project as easy and transparent as possible, whether it's: - -- Reporting a [bug](https://github.com/0xMiden/miden-node/issues/new?assignees=&labels=bug&projects=&template=1-bugreport.yml) -- Taking part in [discussions](https://github.com/0xMiden/miden-node/discussions) -- Submitting a [fix](https://github.com/0xMiden/miden-node/pulls) -- Proposing new [features](https://github.com/0xMiden/miden-node/issues/new?assignees=&labels=enhancement&projects=&template=2-feature-request.yml) - -  - -## Flow - -We are using [Github Flow](https://docs.github.com/en/get-started/quickstart/github-flow), so all code changes happen through pull requests from a [forked repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). - -### Branching - -- The current active branch is `next`. Every branch with a fix/feature must be forked from `next`. - -- The branch name should contain a short issue/feature description separated with hyphens [(kebab-case)](https://en.wikipedia.org/wiki/Letter_case#Kebab_case). - - For example, if the issue title is `Fix functionality X in component Y` then the branch name will be something like: `fix-x-in-y`. - -- New branch should be rebased from `next` before submitting a PR in case there have been changes to avoid merge commits. i.e. this branches state: - - ``` - A---B---C fix-x-in-y - / - D---E---F---G next - | | - (F, G) changes happened after `fix-x-in-y` forked - ``` - - should become this after rebase: - - ``` - A'--B'--C' fix-x-in-y - / - D---E---F---G next - ``` - - More about rebase [here](https://git-scm.com/docs/git-rebase) and [here](https://www.atlassian.com/git/tutorials/rewriting-history/git-rebase#:~:text=What%20is%20git%20rebase%3F,of%20a%20feature%20branching%20workflow.) - -### Commit messages - -- Commit messages should be written in a short, descriptive manner and be prefixed with tags for the change type and scope (if possible) according to the [semantic commit](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) scheme. For example, a new change to the `miden-node-store` crate might have the following message: `feat(miden-node-store): fix block-headers database schema` - -- Also squash commits to logically separated, distinguishable stages to keep git log clean: - - ``` - 7hgf8978g9... Added A to X \ - \ (squash) - gh354354gh... oops, typo --- * ---------> 9fh1f51gh7... feat(X): add A && B - / - 85493g2458... Added B to X / - - - 789fdfffdf... Fixed D in Y \ - \ (squash) - 787g8fgf78... blah blah --- * ---------> 4070df6f00... fix(Y): fixed D && C - / - 9080gf6567... Fixed C in Y / - ``` - -### Code Style and Documentation - -- For documentation in the codebase, we follow the [rustdoc](https://doc.rust-lang.org/rust-by-example/meta/doc.html) convention with no more than 100 characters per line. -- For code sections, we use code separators like the following to a width of 100 characters:: - - ``` - // CODE SECTION HEADER - // ================================================================================ - ``` - -- [Rustfmt](https://github.com/rust-lang/rustfmt), [Clippy](https://github.com/rust-lang/rust-clippy) and [Rustdoc](https://doc.rust-lang.org/rustdoc/index.html) linting is included in CI pipeline. Anyways it's preferable to run linting locally before push. To simplify running these commands in a reproducible manner we use `make` commands, you can run: - - ``` - make lint - ``` - -You can find more information about the `make` commands in the [Makefile](Makefile) - -### Testing - -After writing code different types of tests (unit, integration, end-to-end) are required to make sure that the correct behavior has been achieved and that no bugs have been introduced. You can run tests using the following command: - -``` -make test -``` - -### Versioning - -We use [semver](https://semver.org/) naming convention. - -  - -## Pre-PR checklist - -To make sure all commits adhere to our programming standards please follow the checklist: - -1. Repo forked and branch created from `next` according to the naming convention. -2. Commit messages and code style follow conventions. -3. Tests added for new functionality. -4. Documentation/comments updated for all changes according to our documentation convention. -5. Spellchecking ([typos](https://github.com/crate-ci/typos/tree/master?tab=readme-ov-file#install)), Rustfmt, Clippy and Rustdoc linting passed (run with `make lint`). -6. New branch rebased from `next`. - -  - -## Write bug reports with detail, background, and sample code - -**Great Bug Reports** tend to have: - -- A quick summary and/or background -- Steps to reproduce -- What you expected would happen -- What actually happens -- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) - -  - -## Any contributions you make will be under the MIT Software License - -In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern. diff --git a/Cargo.lock b/Cargo.lock index e8e6c19983..763c7cf5be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,19 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "getrandom 0.3.4", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.4" @@ -50,18 +37,12 @@ dependencies = [ ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" +name = "alloca" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" dependencies = [ - "alloc-no-stdlib", + "cc", ] [[package]] @@ -137,21 +118,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.100" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" -dependencies = [ - "backtrace", -] - -[[package]] -name = "arc-swap" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" -dependencies = [ - "rustversion", -] +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "arrayref" @@ -188,16 +157,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "atomic" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" -dependencies = [ - "bytemuck", + "syn 2.0.117", ] [[package]] @@ -207,21 +167,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] -name = "atty" -version = "0.2.14" +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", + "aws-lc-sys", + "zeroize", ] [[package]] -name = "autocfg" -version = "1.5.0" +name = "aws-lc-sys" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] [[package]] name = "axum" @@ -348,16 +319,16 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -377,24 +348,9 @@ checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" - -[[package]] -name = "blake2" -version = "0.10.6" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest", -] +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "blake3" @@ -419,38 +375,11 @@ dependencies = [ "generic-array", ] -[[package]] -name = "brotli" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bumpalo" -version = "3.19.1" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" - -[[package]] -name = "bytemuck" -version = "1.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "byteorder" @@ -460,9 +389,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "bzip2-sys" @@ -482,9 +411,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.54" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", "jobserver", @@ -492,6 +421,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -501,40 +436,18 @@ dependencies = [ "nom", ] -[[package]] -name = "cf-rustracing" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f85c3824e4191621dec0551e3cef3d511f329da9a8990bf3e450a85651d97e" -dependencies = [ - "backtrace", - "rand 0.8.5", - "tokio", - "trackable", -] - -[[package]] -name = "cf-rustracing-jaeger" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a5f80d44c257c3300a7f45ada676c211e64bbbac591bbec19344a8f61fbcab" -dependencies = [ - "cf-rustracing", - "hostname", - "local-ip-address", - "percent-encoding", - "rand 0.9.2", - "thrift_codec", - "tokio", - "trackable", -] - [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -561,9 +474,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", "js-sys", @@ -623,82 +536,43 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.25" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "atty", - "bitflags 1.3.2", - "clap_derive 3.2.25", - "clap_lex 0.2.4", - "indexmap 1.9.3", - "once_cell", - "strsim 0.10.0", - "termcolor", - "textwrap", -] - -[[package]] -name = "clap" -version = "4.5.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", - "clap_derive 4.5.49", + "clap_derive", ] [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.7", - "strsim 0.11.1", -] - -[[package]] -name = "clap_derive" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" -dependencies = [ - "heck 0.4.1", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", + "clap_lex", + "strsim", ] [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", + "syn 2.0.117", ] [[package]] name = "clap_lex" -version = "0.7.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "cmake" @@ -715,6 +589,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -762,36 +646,26 @@ dependencies = [ "libc", ] -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - [[package]] name = "criterion" -version = "0.5.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" dependencies = [ + "alloca", "anes", "cast", "ciborium", - "clap 4.5.54", + "clap", "criterion-plot", - "is-terminal", - "itertools 0.10.5", + "itertools 0.13.0", "num-traits", - "once_cell", "oorandom", + "page_size", "plotters", "rayon", "regex", "serde", - "serde_derive", "serde_json", "tinytemplate", "walkdir", @@ -799,12 +673,12 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.5.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" dependencies = [ "cast", - "itertools 0.10.5", + "itertools 0.13.0", ] [[package]] @@ -826,15 +700,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "crossbeam-queue" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -894,26 +759,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "daemonize" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e" -dependencies = [ - "libc", -] - -[[package]] -name = "darling" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" -dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", + "syn 2.0.117", ] [[package]] @@ -922,22 +768,8 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", -] - -[[package]] -name = "darling_core" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.114", + "darling_core", + "darling_macro", ] [[package]] @@ -950,19 +782,8 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.11.1", - "syn 2.0.114", -] - -[[package]] -name = "darling_macro" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" -dependencies = [ - "darling_core 0.20.11", - "quote", - "syn 2.0.114", + "strsim", + "syn 2.0.117", ] [[package]] @@ -971,9 +792,9 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core 0.21.3", + "darling_core", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1015,7 +836,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524bc3df0d57e98ecd022e21ba31166c2625e7d3e5bcc4510efaeeab4abcab04" dependencies = [ "deadpool-runtime", - "tracing", ] [[package]] @@ -1030,55 +850,13 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" -dependencies = [ - "darling 0.20.11", - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "derive_builder_macro" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" -dependencies = [ - "derive_builder_core", - "syn 2.0.114", -] - [[package]] name = "derive_more" version = "2.1.1" @@ -1097,7 +875,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1127,7 +905,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1147,7 +925,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1176,7 +954,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1197,14 +975,20 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd122633e4bef06db27737f21d3738fb89c8f6d5360d6d9d7635dda142a7757e" dependencies = [ - "darling 0.21.3", + "darling", "either", - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ecdsa" version = "0.16.9" @@ -1271,9 +1055,9 @@ dependencies = [ [[package]] name = "ena" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +checksum = "eabffdaee24bd1bf95c5ef7cec31260444317e72ea56c4c91750e8b7ee58d5f1" dependencies = [ "log", ] @@ -1289,9 +1073,9 @@ dependencies = [ [[package]] name = "env_filter" -version = "0.1.4" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" +checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" dependencies = [ "log", "regex", @@ -1299,9 +1083,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" +checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" dependencies = [ "anstream", "anstyle", @@ -1348,27 +1132,11 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" -[[package]] -name = "figment" -version = "0.10.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" -dependencies = [ - "atomic", - "parking_lot", - "pear", - "serde", - "tempfile", - "toml 0.8.23", - "uncased", - "version_check", -] - [[package]] name = "find-msvc-tools" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "fixedbitset" @@ -1376,17 +1144,6 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" -[[package]] -name = "flate2" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" -dependencies = [ - "crc32fast", - "libz-ng-sys", - "miniz_oxide", -] - [[package]] name = "flume" version = "0.11.1" @@ -1417,21 +1174,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.2" @@ -1443,18 +1185,24 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" +checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" dependencies = [ "autocfg", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1467,9 +1215,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1477,15 +1225,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -1494,32 +1242,32 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -1529,9 +1277,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -1541,7 +1289,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -1599,15 +1346,18 @@ dependencies = [ ] [[package]] -name = "getset" -version = "0.1.6" +name = "getrandom" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" dependencies = [ - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.114", + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasip3", + "wasm-bindgen", ] [[package]] @@ -1645,7 +1395,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.13.0", + "indexmap", "slab", "tokio", "tokio-util", @@ -1663,20 +1413,12 @@ dependencies = [ "zerocopy", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "allocator-api2", - "equivalent", "foldhash 0.1.5", ] @@ -1694,27 +1436,12 @@ dependencies = [ "serde_core", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.5.2" @@ -1745,17 +1472,6 @@ dependencies = [ "digest", ] -[[package]] -name = "hostname" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" -dependencies = [ - "cfg-if", - "libc", - "windows-link", -] - [[package]] name = "http" version = "1.4.0" @@ -1859,32 +1575,15 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ "base64", "bytes", "futures-channel", - "futures-core", "futures-util", "http", "http-body", @@ -1903,9 +1602,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.64" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2006,6 +1705,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -2039,16 +1744,6 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.13.0" @@ -2057,14 +1752,10 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", + "serde", + "serde_core", ] -[[package]] -name = "inlinable_string" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" - [[package]] name = "inout" version = "0.1.4" @@ -2090,17 +1781,6 @@ dependencies = [ "serde", ] -[[package]] -name = "is-terminal" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" -dependencies = [ - "hermit-abi 0.5.2", - "libc", - "windows-sys 0.61.2", -] - [[package]] name = "is_ci" version = "1.2.0" @@ -2115,9 +1795,9 @@ checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" -version = "0.10.5" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] @@ -2139,9 +1819,9 @@ checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jiff" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" +checksum = "b3e3d65f018c6ae946ab16e80944b97096ed73c35b221d1c478a6c81d8f57940" dependencies = [ "jiff-static", "log", @@ -2152,15 +1832,37 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" +checksum = "a17c2b211d863c7fde02cbea8a3c1a439b98e109286554f2860bdded7ff83818" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", +] + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", ] +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.34" @@ -2173,9 +1875,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "14dc6f6450b3f6d4ed5b16327f38fed626d375a886159ca555bd7822c0c3a5a6" dependencies = [ "once_cell", "wasm-bindgen", @@ -2197,9 +1899,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -2215,7 +1917,7 @@ dependencies = [ "ena", "itertools 0.14.0", "lalrpop-util", - "petgraph", + "petgraph 0.7.1", "regex", "regex-syntax", "sha3", @@ -2240,11 +1942,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" -version = "0.2.180" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libloading" @@ -2258,9 +1966,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "librocksdb-sys" @@ -2287,33 +1995,17 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-ng-sys" -version = "1.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf914b7dd154ca9193afec311d8e39345c1bd93b48b3faa77329f0db8f553c0" -dependencies = [ - "cmake", - "libc", -] - [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" dependencies = [ "cc", "pkg-config", "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -2322,9 +2014,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" @@ -2332,17 +2024,6 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" -[[package]] -name = "local-ip-address" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" -dependencies = [ - "libc", - "neli", - "windows-sys 0.61.2", -] - [[package]] name = "lock_api" version = "0.4.14" @@ -2380,7 +2061,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2407,18 +2088,15 @@ dependencies = [ [[package]] name = "lru" -version = "0.14.0" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8cc7106155f10bdf99a6f379688f543ad6596a415375b36a59a054ceda1198" -dependencies = [ - "hashbrown 0.15.5", -] +checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" [[package]] -name = "lru" -version = "0.16.3" +name = "lru-slab" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "lz4-sys" @@ -2453,24 +2131,14 @@ checksum = "120fa187be19d9962f0926633453784691731018a2bf936ddb4e29101b79c4a7" [[package]] name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "memoffset" -version = "0.6.5" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "miden-agglayer" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a867217bab689c0539f6b4797cb452f0932de6904479a38f1322e045b9383b" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "fs-err", "miden-assembly", @@ -2485,9 +2153,9 @@ dependencies = [ [[package]] name = "miden-air" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" +checksum = "5cca9632323bd4e32ae5b21b101ed417a646f5d72196b1bf3f1ca889a148322a" dependencies = [ "miden-core", "miden-utils-indexing", @@ -2498,9 +2166,9 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c6a18e29c03141cf9044604390a00691c7342924ec865b4acfdd560ff41ede" +checksum = "2395b2917aea613a285d3425d1ca07e6c45442e2b34febdea2081db555df62fc" dependencies = [ "env_logger", "log", @@ -2513,9 +2181,9 @@ dependencies = [ [[package]] name = "miden-assembly-syntax" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7458ff670f5a514bf972aa84d6e1851a4c4e9afa351f53b71bdc2218b99254b6" +checksum = "1f9bed037d137f209b9e7b28811ec78c0536b3f9259d6f4ceb5823c87513b346" dependencies = [ "aho-corasick", "env_logger", @@ -2537,9 +2205,8 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e92a0ddae8d0983e37bc636edba741947b1e3dc63baed2ad85921342080154a" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2547,9 +2214,9 @@ dependencies = [ [[package]] name = "miden-core" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a5c9c8c3d42ae8381ed49e47ff9ad2d2e345c4726761be36b7d4000ebb40ae" +checksum = "8714aa5f86c59e647b7417126b32adc4ef618f835964464f5425549df76b6d03" dependencies = [ "derive_more", "itertools 0.14.0", @@ -2569,9 +2236,9 @@ dependencies = [ [[package]] name = "miden-core-lib" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6556494ea5576803730fa15015bee6bd9d1a117450f22e7df0883421e7423674" +checksum = "1bb16a4d39202c59a7964d3585cd5af21a46a759ff6452cb5f20723ed5af4362" dependencies = [ "env_logger", "fs-err", @@ -2586,9 +2253,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" +checksum = "999926d48cf0929a39e06ce22299084f11d307ca9e765801eb56bf192b07054b" dependencies = [ "blake3", "cc", @@ -2603,8 +2270,8 @@ dependencies = [ "miden-crypto-derive", "num", "num-complex", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_core 0.9.5", "rand_hc", "rayon", @@ -2621,19 +2288,19 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" +checksum = "3550b5656b791fec59c0b6089b4d0368db746a34749ccd47e59afb01aa877e9e" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-debug-types" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19123e896f24b575e69921a79a39a0a4babeb98404a8601017feb13b75d653b3" +checksum = "cd1494f102ad5b9fa43e391d2601186dc601f41ab7dcd8a23ecca9bf3ef930f4" dependencies = [ "memchr", "miden-crypto", @@ -2643,7 +2310,7 @@ dependencies = [ "miden-utils-sync", "paste", "serde", - "serde_spanned 1.0.4", + "serde_spanned", "thiserror 2.0.18", ] @@ -2658,9 +2325,9 @@ dependencies = [ [[package]] name = "miden-mast-package" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d6a322b91efa1bb71e224395ca1fb9ca00e2614f89427e35d8c42a903868a3" +checksum = "692185bfbe0ecdb28bf623f1f8c88282cd6727ba081a28e23b301bdde1b45be4" dependencies = [ "derive_more", "miden-assembly-syntax", @@ -2691,7 +2358,7 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.114", + "syn 2.0.117", "terminal_size 0.3.0", "textwrap", "thiserror 2.0.18", @@ -2707,26 +2374,27 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-network-monitor" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "axum", - "clap 4.5.54", + "clap", "hex", "humantime", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", "miden-testing", "miden-tx", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "reqwest", "serde", "serde_json", @@ -2740,16 +2408,16 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", - "clap 4.5.54", - "figment", + "clap", "fs-err", "hex", "humantime", "miden-node-block-producer", "miden-node-ntx-builder", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-rpc", "miden-node-store", "miden-node-utils", @@ -2761,26 +2429,26 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", "futures", "itertools 0.14.0", - "miden-block-prover", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-test-macro", "miden-node-utils", + "miden-node-validator", "miden-protocol", "miden-remote-prover-client", "miden-standards", "miden-tx", "miden-tx-batch-prover", "pretty_assertions", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rstest", "serial_test", "tempfile", @@ -2795,22 +2463,37 @@ dependencies = [ "winterfell", ] +[[package]] +name = "miden-node-db" +version = "0.14.0" +dependencies = [ + "deadpool", + "deadpool-diesel", + "deadpool-sync", + "diesel", + "miden-protocol", + "thiserror 2.0.18", + "tracing", +] + [[package]] name = "miden-node-grpc-error-macro" -version = "0.13.7" +version = "0.14.0" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-node-ntx-builder" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", + "diesel", + "diesel_migrations", "futures", - "indexmap 2.13.0", "libsqlite3-sys", + "miden-node-db", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", @@ -2818,7 +2501,10 @@ dependencies = [ "miden-remote-prover-client", "miden-standards", "miden-tx", + "prost", + "rand_chacha", "rstest", + "tempfile", "thiserror 2.0.18", "tokio", "tokio-stream", @@ -2830,7 +2516,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", @@ -2839,6 +2525,7 @@ dependencies = [ "http", "miden-node-grpc-error-macro", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -2854,7 +2541,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.13.7" +version = "0.14.0" dependencies = [ "fs-err", "miette", @@ -2862,9 +2549,13 @@ dependencies = [ "tonic-prost-build", ] +[[package]] +name = "miden-node-rocksdb-cxx-linkage-fix" +version = "0.14.0" + [[package]] name = "miden-node-rpc" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "futures", @@ -2896,59 +2587,66 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", "criterion", "deadpool", "deadpool-diesel", - "deadpool-sync", "diesel", "diesel_migrations", "fs-err", + "futures", "hex", - "indexmap 2.13.0", + "indexmap", "libsqlite3-sys", + "miden-agglayer", + "miden-block-prover", "miden-crypto", + "miden-node-db", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-test-macro", "miden-node-utils", "miden-protocol", + "miden-remote-prover-client", "miden-standards", "pretty_assertions", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "regex", "serde", + "tempfile", "termtree", "thiserror 2.0.18", "tokio", "tokio-stream", - "toml 0.9.11+spec-1.1.0", + "toml 1.0.3+spec-1.1.0", "tonic", "tonic-reflection", "tower-http", "tracing", + "url", ] [[package]] name = "miden-node-stress-test" -version = "0.13.7" +version = "0.14.0" dependencies = [ - "clap 4.5.54", + "clap", "fs-err", "futures", "miden-air", - "miden-block-prover", "miden-node-block-producer", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-store", "miden-node-utils", "miden-protocol", "miden-standards", - "rand 0.9.2", + "rand", "rayon", "tokio", "tonic", @@ -2960,26 +2658,25 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-node-utils" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "bytes", - "figment", "http", "http-body-util", "itertools 0.14.0", - "lru 0.16.3", + "lru", + "miden-node-rocksdb-cxx-linkage-fix", "miden-protocol", "opentelemetry", "opentelemetry-otlp", "opentelemetry_sdk", - "rand 0.9.2", - "serde", + "rand", "thiserror 2.0.18", "tokio", "tonic", @@ -2993,9 +2690,12 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", + "diesel", + "diesel_migrations", + "miden-node-db", "miden-node-proto", "miden-node-proto-build", "miden-node-utils", @@ -3012,9 +2712,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a659fac55de14647e2695f03d96b83ff94fe65fd31e74d81c225ec52af25acf" +checksum = "0e09f7916b1e7505f74a50985a185fdea4c0ceb8f854a34c90db28e3f7da7ab6" dependencies = [ "itertools 0.14.0", "miden-air", @@ -3032,9 +2732,8 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "785be319a826c9cb43d2e1a41a1fb1eee3f2baafe360e0d743690641f7c93ad5" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "bech32", "fs-err", @@ -3049,34 +2748,33 @@ dependencies = [ "miden-protocol-macros", "miden-utils-sync", "miden-verifier", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_xoshiro", "regex", "semver 1.0.27", "serde", "thiserror 2.0.18", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", "walkdir", "winter-rand-utils", ] [[package]] name = "miden-protocol-macros" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dc854c1b9e49e82d3f39c5710345226e0b2a62ec0ea220c616f1f3a099cfb3" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "miden-prover" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5df61f50f27886f6f777d6e0cdf785f7db87dd881799a84a801e7330c189c8" +checksum = "d45e30526be72b8af0fd1d8b24c9cba8ac1187ca335dcee38b8e5e20234e7698" dependencies = [ "miden-air", "miden-debug-types", @@ -3088,18 +2786,17 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.13.7" +version = "0.14.0" dependencies = [ "anyhow", "async-trait", - "axum", - "bytes", - "clap 4.5.54", + "clap", "http", "humantime", "miden-block-prover", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -3108,36 +2805,25 @@ dependencies = [ "miden-tx-batch-prover", "miette", "opentelemetry", - "pingora", - "pingora-core", - "pingora-limits", - "pingora-proxy", - "prometheus 0.14.0", "prost", - "reqwest", - "semver 1.0.27", - "serde", - "serde_qs", - "thiserror 2.0.18", "tokio", "tokio-stream", "tonic", "tonic-health", "tonic-prost", "tonic-prost-build", + "tonic-reflection", "tonic-web", "tower-http", "tracing", - "tracing-opentelemetry", - "uuid", ] [[package]] name = "miden-remote-prover-client" -version = "0.13.7" +version = "0.14.0" dependencies = [ "fs-err", - "getrandom 0.3.4", + "getrandom 0.4.1", "miden-node-proto-build", "miden-protocol", "miden-tx", @@ -3154,9 +2840,8 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e33771fc35e1e640582bcd26c88b2ab449dd3a70888b315546d0d3447f4bb3" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "fs-err", "miden-assembly", @@ -3164,7 +2849,7 @@ dependencies = [ "miden-core-lib", "miden-processor", "miden-protocol", - "rand 0.9.2", + "rand", "regex", "thiserror 2.0.18", "walkdir", @@ -3172,9 +2857,8 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5d41a888d1a5e520a9312a170975d0fbadefb1b9200543cebdf54dd0960310" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3187,16 +2871,16 @@ dependencies = [ "miden-standards", "miden-tx", "miden-tx-batch-prover", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", + "thiserror 2.0.18", "winterfell", ] [[package]] name = "miden-tx" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "430e4ee02b5efb71b104926e229441e0071a93a259a70740bf8c436495caa64f" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-processor", "miden-protocol", @@ -3208,9 +2892,8 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03bc209b6487ebac0de230461e229a99d17ed73596c7d99fc59eea47a28a89cc" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", "miden-tx", @@ -3218,9 +2901,9 @@ dependencies = [ [[package]] name = "miden-utils-core-derive" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa207ffd8b26a79d9b5b246a352812f0015c0bb8f75492ec089c5c8e6d5f9e2b" +checksum = "a1b1d490e6d7b509622d3c2cc69ffd66ad48bf953dc614579b568fe956ce0a6c" dependencies = [ "proc-macro2", "quote", @@ -3229,9 +2912,9 @@ dependencies = [ [[package]] name = "miden-utils-diagnostics" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2f55477d410542a5d8990ca04856adf5bef91bfa3b54ca3c03a5ff14a6e25c" +checksum = "52658f6dc091c1c78e8b35ee3e7ff3dad53051971a3c514e461f581333758fe7" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3242,18 +2925,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" +checksum = "eeff7bcb7875b222424bdfb657a7cf21a55e036aa7558ebe1f5d2e413b440d0d" dependencies = [ "thiserror 2.0.18", ] [[package]] name = "miden-utils-sync" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7fa8f5fd27f122c83f55752f2a964bbfc2b713de419e9c152f7dcc05c194ec" +checksum = "41d53d1ab5b275d8052ad9c4121071cb184bc276ee74354b0d8a2075e5c1d1f0" dependencies = [ "lock_api", "loom", @@ -3262,9 +2945,9 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.20.2" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" +checksum = "b13816663794beb15c8a4721c15252eb21f3b3233525684f60c7888837a98ff4" dependencies = [ "miden-air", "miden-core", @@ -3311,7 +2994,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3321,7 +3004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", ] [[package]] @@ -3354,7 +3037,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", - "simd-adler32", ] [[package]] @@ -3383,70 +3065,12 @@ dependencies = [ "getrandom 0.2.17", ] -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe 0.1.6", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "neli" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" -dependencies = [ - "bitflags 2.10.0", - "byteorder", - "derive_builder", - "getset", - "libc", - "log", - "neli-proc-macros", - "parking_lot", -] - -[[package]] -name = "neli-proc-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" -dependencies = [ - "either", - "proc-macro2", - "quote", - "serde", - "syn 2.0.114", -] - [[package]] name = "new_debug_unreachable" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" -[[package]] -name = "nix" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", - "memoffset", -] - [[package]] name = "nom" version = "7.1.3" @@ -3501,9 +3125,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-derive" @@ -3513,7 +3137,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3563,7 +3187,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.5.2", + "hermit-abi", "libc", ] @@ -3601,60 +3225,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] -name = "openssl" -version = "0.10.75" +name = "openssl-probe" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" -dependencies = [ - "bitflags 2.10.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] -name = "openssl-macros" -version = "0.1.1" +name = "opentelemetry" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - -[[package]] -name = "openssl-probe" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" - -[[package]] -name = "openssl-sys" -version = "0.9.111" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "opentelemetry" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", @@ -3704,23 +3284,27 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.2", + "rand", "thiserror 2.0.18", "tokio", "tokio-stream", ] [[package]] -name = "os_str_bytes" -version = "6.6.1" +name = "owo-colors" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" +checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" [[package]] -name = "owo-colors" -version = "4.2.3" +name = "page_size" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "parking_lot" @@ -3751,29 +3335,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "pear" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" -dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn 2.0.114", -] - [[package]] name = "percent-encoding" version = "2.3.2" @@ -3787,7 +3348,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.13.0", + "indexmap", +] + +[[package]] +name = "petgraph" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +dependencies = [ + "fixedbitset", + "hashbrown 0.15.5", + "indexmap", ] [[package]] @@ -3816,7 +3388,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3831,251 +3403,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pingora" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a1f02a6347e81953ab831fdcf090a028db12d67ec3badf47831d1299dac6e20" -dependencies = [ - "pingora-core", - "pingora-http", - "pingora-load-balancing", - "pingora-proxy", - "pingora-timeout", -] - -[[package]] -name = "pingora-cache" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" -dependencies = [ - "ahash", - "async-trait", - "blake2", - "bytes", - "cf-rustracing", - "cf-rustracing-jaeger", - "hex", - "http", - "httparse", - "httpdate", - "indexmap 1.9.3", - "log", - "lru 0.14.0", - "once_cell", - "parking_lot", - "pingora-core", - "pingora-error", - "pingora-header-serde", - "pingora-http", - "pingora-lru", - "pingora-timeout", - "rand 0.8.5", - "regex", - "rmp", - "rmp-serde", - "serde", - "strum", - "tokio", -] - -[[package]] -name = "pingora-core" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" -dependencies = [ - "ahash", - "async-trait", - "brotli", - "bytes", - "chrono", - "clap 3.2.25", - "daemonize", - "derivative", - "flate2", - "futures", - "h2", - "http", - "httparse", - "httpdate", - "libc", - "log", - "nix", - "once_cell", - "openssl-probe 0.1.6", - "parking_lot", - "percent-encoding", - "pingora-error", - "pingora-http", - "pingora-pool", - "pingora-runtime", - "pingora-timeout", - "prometheus 0.13.4", - "rand 0.8.5", - "regex", - "serde", - "serde_yaml", - "sfv", - "socket2", - "strum", - "strum_macros", - "tokio", - "tokio-test", - "unicase", - "windows-sys 0.59.0", - "zstd", -] - -[[package]] -name = "pingora-error" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52119570d3f4644e09654ad24df2b7d851bf12eaa8c4148b4674c7f90916598e" - -[[package]] -name = "pingora-header-serde" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "252a16def05c7adbbdda776e87b2be36e9481c8a77249207a2f3b563e8933b35" -dependencies = [ - "bytes", - "http", - "httparse", - "pingora-error", - "pingora-http", - "thread_local", - "zstd", - "zstd-safe", -] - -[[package]] -name = "pingora-http" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3542fd0fd0a83212882c5066ae739ba51804f20d624ff7e12ec85113c5c89a" -dependencies = [ - "bytes", - "http", - "pingora-error", -] - -[[package]] -name = "pingora-ketama" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5dd8546b1874d5cfca594375c1cfb852c3dffd4f060428fa031a6e790dea18" -dependencies = [ - "crc32fast", -] - -[[package]] -name = "pingora-limits" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" -dependencies = [ - "ahash", -] - -[[package]] -name = "pingora-load-balancing" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5bb0314830a64b73b50b3782f3089f87947b61b4324c804d6f8d4ff9ce1c70" -dependencies = [ - "arc-swap", - "async-trait", - "derivative", - "fnv", - "futures", - "http", - "log", - "pingora-core", - "pingora-error", - "pingora-http", - "pingora-ketama", - "pingora-runtime", - "rand 0.8.5", - "tokio", -] - -[[package]] -name = "pingora-lru" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" -dependencies = [ - "arrayvec", - "hashbrown 0.16.1", - "parking_lot", - "rand 0.8.5", -] - -[[package]] -name = "pingora-pool" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "996c574f30a6e1ad10b47ac1626a86e0e47d5075953dd049d60df16ba5f7076e" -dependencies = [ - "crossbeam-queue", - "log", - "lru 0.14.0", - "parking_lot", - "pingora-timeout", - "thread_local", - "tokio", -] - -[[package]] -name = "pingora-proxy" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c4097fd2639905bf5b81f3618551cd826d5e03aac063e17fd7a4137f19c1a5b" -dependencies = [ - "async-trait", - "bytes", - "clap 3.2.25", - "futures", - "h2", - "http", - "log", - "once_cell", - "pingora-cache", - "pingora-core", - "pingora-error", - "pingora-http", - "rand 0.8.5", - "regex", - "tokio", -] - -[[package]] -name = "pingora-runtime" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccc165021cf55a39b9e760121b22c4260b17a0b2c530d5b93092fc5bc765b94" -dependencies = [ - "once_cell", - "rand 0.8.5", - "thread_local", - "tokio", -] - -[[package]] -name = "pingora-timeout" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548cd21d41611c725827677937e68f2cd008bbfa09f3416d3fbad07e1e42f6d7" -dependencies = [ - "once_cell", - "parking_lot", - "pin-project-lite", - "thread_local", - "tokio", -] - [[package]] name = "pkcs8" version = "0.10.2" @@ -4133,15 +3460,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" dependencies = [ "portable-atomic", ] @@ -4193,7 +3520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4202,53 +3529,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" -dependencies = [ - "proc-macro2", - "quote", -] - -[[package]] -name = "proc-macro-error2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" -dependencies = [ - "proc-macro-error-attr2", - "proc-macro2", - "quote", - "syn 2.0.114", + "toml_edit", ] [[package]] @@ -4260,61 +3541,18 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "proc-macro2-diagnostics" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", - "version_check", - "yansi", -] - -[[package]] -name = "prometheus" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" -dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf 2.28.0", - "thiserror 1.0.69", -] - -[[package]] -name = "prometheus" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" -dependencies = [ - "cfg-if", - "fnv", - "lazy_static", - "memchr", - "parking_lot", - "protobuf 3.7.2", - "thiserror 2.0.18", -] - [[package]] name = "proptest" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.10.0", + "bitflags", "num-traits", - "rand 0.9.2", - "rand_chacha 0.9.0", + "rand", + "rand_chacha", "rand_xorshift", "regex-syntax", "rusty-fork", @@ -4330,7 +3568,7 @@ checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4345,23 +3583,22 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ - "heck 0.5.0", + "heck", "itertools 0.14.0", "log", "multimap", - "once_cell", - "petgraph", + "petgraph 0.8.3", "prettyplease", "prost", "prost-types", "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.114", + "syn 2.0.117", "tempfile", ] @@ -4375,7 +3612,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4392,39 +3629,13 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ "prost", ] -[[package]] -name = "protobuf" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" - -[[package]] -name = "protobuf" -version = "3.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" -dependencies = [ - "once_cell", - "protobuf-support", - "thiserror 1.0.69", -] - -[[package]] -name = "protobuf-support" -version = "3.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" -dependencies = [ - "thiserror 1.0.69", -] - [[package]] name = "protox" version = "0.9.1" @@ -4454,20 +3665,20 @@ dependencies = [ [[package]] name = "pulldown-cmark" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +checksum = "83c41efbf8f90ac44de7f3a868f0867851d261b56291732d0cbf7cceaaeb55a6" dependencies = [ - "bitflags 2.10.0", + "bitflags", "memchr", "unicase", ] [[package]] name = "pulldown-cmark-to-cmark" -version = "21.1.0" +version = "22.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" +checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" dependencies = [ "pulldown-cmark", ] @@ -4479,49 +3690,84 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] -name = "quote" -version = "1.0.44" +name = "quinn" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ - "proc-macro2", + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", ] [[package]] -name = "r-efi" -version = "5.3.0" +name = "quinn-proto" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] [[package]] -name = "rand" -version = "0.8.5" +name = "quinn-udp" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ + "cfg_aliases", "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", ] [[package]] -name = "rand" -version = "0.9.2" +name = "quote" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.5", + "proc-macro2", ] [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", + "rand_chacha", + "rand_core 0.9.5", ] [[package]] @@ -4605,14 +3851,14 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags", ] [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -4622,9 +3868,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -4633,9 +3879,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "relative-path" @@ -4645,9 +3891,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.28" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64", "bytes", @@ -4659,21 +3905,22 @@ dependencies = [ "http-body-util", "hyper", "hyper-rustls", - "hyper-tls", "hyper-util", "js-sys", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", + "tokio-rustls", "tower", "tower-http", "tower-service", @@ -4707,25 +3954,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rmp" -version = "0.8.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c" -dependencies = [ - "num-traits", -] - -[[package]] -name = "rmp-serde" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155" -dependencies = [ - "rmp", - "serde", -] - [[package]] name = "rocksdb" version = "0.24.0" @@ -4771,20 +3999,10 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] -[[package]] -name = "rust_decimal" -version = "1.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" -dependencies = [ - "arrayvec", - "num-traits", -] - [[package]] name = "rustc-demangle" version = "0.1.27" @@ -4821,32 +4039,33 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.10.0", + "bitflags", "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "rustix" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.10.0", + "bitflags", "errno", "libc", - "linux-raw-sys 0.11.0", + "linux-raw-sys 0.12.1", "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.36" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", @@ -4862,10 +4081,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -4874,15 +4093,44 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4908,9 +4156,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -4973,24 +4221,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.10.0", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" -dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -4999,9 +4234,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -5059,7 +4294,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5086,26 +4321,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "serde_qs" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352" -dependencies = [ - "percent-encoding", - "serde", - "thiserror 2.0.18", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - [[package]] name = "serde_spanned" version = "1.0.4" @@ -5127,23 +4342,11 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap 1.9.3", - "ryu", - "serde", - "yaml-rust", -] - [[package]] name = "serial_test" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" +checksum = "911bd979bf1070a3f3aa7b691a3b3e9968f339ceeec89e08c280a8a22207a32f" dependencies = [ "futures-executor", "futures-util", @@ -5156,24 +4359,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" +checksum = "0a7d91949b85b0d2fb687445e448b40d322b6b3e4af6b44a29b21d9a5f33e6d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "sfv" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" -dependencies = [ - "base64", - "indexmap 2.13.0", - "rust_decimal", + "syn 2.0.117", ] [[package]] @@ -5232,23 +4424,17 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "simd-adler32" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" - [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" @@ -5325,44 +4511,16 @@ dependencies = [ name = "strip-ansi-escapes" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a8f8038e7e7969abb3f1b7c2a811225e9296da208539e0f79c5251d6cac0025" -dependencies = [ - "vte", -] - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "strum" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +checksum = "2a8f8038e7e7969abb3f1b7c2a811225e9296da208539e0f79c5251d6cac0025" dependencies = [ - "strum_macros", + "vte", ] [[package]] -name = "strum_macros" -version = "0.26.4" +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" -dependencies = [ - "heck 0.5.0", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.114", -] +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" @@ -5404,9 +4562,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.114" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -5430,16 +4588,16 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.10.0", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -5462,14 +4620,14 @@ checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" [[package]] name = "tempfile" -version = "3.24.0" +version = "3.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.1", "once_cell", - "rustix 1.1.3", + "rustix 1.1.4", "windows-sys 0.61.2", ] @@ -5507,15 +4665,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.1.3", + "rustix 1.1.4", "windows-sys 0.60.2", ] [[package]] name = "termtree" -version = "0.5.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" +checksum = "d4d1330fe7f7f872cd05165130b10602d667b205fd85be09be2814b115d4ced9" [[package]] name = "textwrap" @@ -5554,7 +4712,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5565,7 +4723,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5577,21 +4735,11 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "thrift_codec" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d957f535b242b91aa9f47bde08080f9a6fef276477e55b0079979d002759d5" -dependencies = [ - "byteorder", - "trackable", -] - [[package]] name = "time" -version = "0.3.45" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -5604,15 +4752,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -5638,6 +4786,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.49.0" @@ -5663,17 +4826,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "syn 2.0.117", ] [[package]] @@ -5698,17 +4851,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "tokio-test" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6d24790a10a7af737693a3e8f1d03faef7e6ca0cc99aae5066f533766de545" -dependencies = [ - "futures-core", - "tokio", - "tokio-stream", -] - [[package]] name = "tokio-util" version = "0.7.18" @@ -5724,25 +4866,13 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned 0.6.9", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", -] - -[[package]] -name = "toml" -version = "0.9.11+spec-1.1.0" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ - "indexmap 2.13.0", + "indexmap", "serde_core", - "serde_spanned 1.0.4", + "serde_spanned", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", @@ -5750,12 +4880,18 @@ dependencies = [ ] [[package]] -name = "toml_datetime" -version = "0.6.11" +name = "toml" +version = "1.0.3+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +checksum = "c7614eaf19ad818347db24addfa201729cf2a9b6fdfd9eb0ab870fcacc606c0c" dependencies = [ - "serde", + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime 1.0.0+spec-1.1.0", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -5768,17 +4904,12 @@ dependencies = [ ] [[package]] -name = "toml_edit" -version = "0.22.27" +name = "toml_datetime" +version = "1.0.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" dependencies = [ - "indexmap 2.13.0", - "serde", - "serde_spanned 0.6.9", - "toml_datetime 0.6.11", - "toml_write", - "winnow", + "serde_core", ] [[package]] @@ -5787,7 +4918,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.13.0", + "indexmap", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -5795,19 +4926,13 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - [[package]] name = "toml_writer" version = "1.0.6+spec-1.1.0" @@ -5816,9 +4941,9 @@ checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ "async-trait", "axum", @@ -5847,21 +4972,21 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" +checksum = "1882ac3bf5ef12877d7ed57aad87e75154c11931c2ba7e6cde5e22d63522c734" dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "tonic-health" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a82868bf299e0a1d2e8dce0dc33a46c02d6f045b2c1f1d6cc8dc3d0bf1812ef" +checksum = "f4ff0636fef47afb3ec02818f5bceb4377b8abb9d6a386aeade18bd6212f8eb7" dependencies = [ "prost", "tokio", @@ -5872,9 +4997,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" dependencies = [ "bytes", "prost", @@ -5883,25 +5008,25 @@ dependencies = [ [[package]] name = "tonic-prost-build" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" +checksum = "f3144df636917574672e93d0f56d7edec49f90305749c668df5101751bb8f95a" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "prost-types", "quote", - "syn 2.0.114", + "syn 2.0.117", "tempfile", "tonic-build", ] [[package]] name = "tonic-reflection" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34da53e8387581d66db16ff01f98a70b426b091fdf76856e289d5c1bd386ed7b" +checksum = "aaf0685a51e6d02b502ba0764002e766b7f3042aed13d9234925b6ffbfa3fca7" dependencies = [ "prost", "prost-types", @@ -5913,9 +5038,9 @@ dependencies = [ [[package]] name = "tonic-web" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75214f6b6bd28c19aa752ac09fdf0eea546095670906c21fe3940e180a4c43f2" +checksum = "29453d84de05f4f1b573db22e6f9f6c95c189a6089a440c9a098aa9dea009299" dependencies = [ "base64", "bytes", @@ -5931,9 +5056,9 @@ dependencies = [ [[package]] name = "tonic-web-wasm-client" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898cd44be5e23e59d2956056538f1d6b3c5336629d384ffd2d92e76f87fb98ff" +checksum = "e8e21e20b94f808d6f2244a5d960d02c28dd82066abddd2f27019bac0535f310" dependencies = [ "base64", "byteorder", @@ -5962,7 +5087,7 @@ checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.13.0", + "indexmap", "pin-project-lite", "slab", "sync_wrapper", @@ -5979,7 +5104,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.10.0", + "bitflags", "bytes", "futures-util", "http", @@ -6025,7 +5150,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6040,9 +5165,9 @@ dependencies = [ [[package]] name = "tracing-forest" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3298fe855716711a00474eceb89cc7dc254bbe67f6bc4afafdeec5f0c538771c" +checksum = "f09cb459317a3811f76644334473239d696cd8efc606963ae7d1c308cead3b74" dependencies = [ "chrono", "smallvec", @@ -6109,25 +5234,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trackable" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" -dependencies = [ - "trackable_derive", -] - -[[package]] -name = "trackable_derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -6136,9 +5242,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.114" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17e807bff86d2a06b52bca4276746584a78375055b6e45843925ce2802b335" +checksum = "47c635f0191bd3a2941013e5062667100969f8c4e9cd787c14f977265d73616e" dependencies = [ "dissimilar", "glob", @@ -6147,7 +5253,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 0.9.11+spec-1.1.0", + "toml 1.0.3+spec-1.1.0", ] [[package]] @@ -6162,15 +5268,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "uncased" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" -dependencies = [ - "version_check", -] - [[package]] name = "unicase" version = "2.9.0" @@ -6179,9 +5276,9 @@ checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-linebreak" @@ -6248,17 +5345,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" -[[package]] -name = "uuid" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" -dependencies = [ - "getrandom 0.3.4", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "valuable" version = "0.1.1" @@ -6329,11 +5415,20 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "60722a937f594b7fde9adb894d7c092fc1bb6612897c46368d18e7a20208eff2" dependencies = [ "cfg-if", "once_cell", @@ -6344,9 +5439,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "8a89f4650b770e4521aa6573724e2aed4704372151bd0de9d16a3bbabb87441a" dependencies = [ "cfg-if", "futures-util", @@ -6358,9 +5453,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "0fac8c6395094b6b91c4af293f4c79371c163f9a6f56184d2c9a85f5a95f3950" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6368,31 +5463,53 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "ab3fabce6159dc20728033842636887e4877688ae94382766e00b180abac9d60" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "de0e091bdb824da87dc01d967388880d017a0a9bc4f3bdc0d86ee9f9336e3bb5" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + [[package]] name = "wasm-streams" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +checksum = "9d1ec4f6517c9e11ae630e200b2b65d193279042e28edd4a2cda233e46670bbb" dependencies = [ "futures-util", "js-sys", @@ -6401,11 +5518,23 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver 1.0.27", +] + [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "705eceb4ce901230f8625bd1d665128056ccbe4b7408faa625eec1ba80f59a97" dependencies = [ "js-sys", "wasm-bindgen", @@ -6421,6 +5550,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -6473,7 +5611,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6484,7 +5622,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6524,27 +5662,27 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.42.2", ] [[package]] name = "windows-sys" -version = "0.52.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] name = "windows-sys" -version = "0.59.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] @@ -6567,6 +5705,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -6615,6 +5768,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -6633,6 +5792,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -6651,6 +5816,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -6681,6 +5852,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -6699,6 +5876,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -6717,6 +5900,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -6735,6 +5924,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -6814,7 +6009,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6838,7 +6033,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4ff3b651754a7bd216f959764d0a5ab6f4b551c9a3a08fb9ccecbed594b614a" dependencies = [ - "rand 0.9.2", + "rand", "winter-utils", ] @@ -6880,6 +6075,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver 1.0.27", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" @@ -6897,15 +6174,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" version = "1.0.1" @@ -6931,28 +6199,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.33" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.33" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6972,7 +6240,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -7012,39 +6280,11 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zmij" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" - -[[package]] -name = "zstd" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" -dependencies = [ - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "2.0.16+zstd.1.5.7" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" -dependencies = [ - "cc", - "pkg-config", -] +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml index a1a9387756..3bcb715eca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,10 +5,12 @@ members = [ "bin/remote-prover", "bin/stress-test", "crates/block-producer", + "crates/db", "crates/grpc-error-macro", "crates/ntx-builder", "crates/proto", "crates/remote-prover-client", + "crates/rocksdb-cxx-linkage-fix", "crates/rpc", "crates/store", "crates/test-macro", @@ -27,37 +29,45 @@ homepage = "https://miden.xyz" license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" -rust-version = "1.90" -version = "0.13.7" +rust-version = "1.91" +version = "0.14.0" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] opt-level = 2 +# Avoid running the expensive debug assertion in winter-prover +[profile.test.package.winter-prover] +debug-assertions = false + [profile.release] debug = true [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } -miden-node-proto = { path = "crates/proto", version = "0.13" } -miden-node-proto-build = { path = "proto", version = "0.13" } -miden-node-rpc = { path = "crates/rpc", version = "0.13" } -miden-node-store = { path = "crates/store", version = "0.13" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } +miden-node-db = { path = "crates/db", version = "0.14" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } +miden-node-proto = { path = "crates/proto", version = "0.14" } +miden-node-proto-build = { path = "proto", version = "0.14" } +miden-node-rpc = { path = "crates/rpc", version = "0.14" } +miden-node-store = { path = "crates/store", version = "0.14" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.13" } -miden-node-validator = { path = "crates/validator", version = "0.13" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } +miden-node-utils = { path = "crates/utils", version = "0.14" } +miden-node-validator = { path = "crates/validator", version = "0.14" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } +# Temporary workaround until +# is part of `rocksdb-rust` release +miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "0.14" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.13" } -miden-protocol = { default-features = false, version = "0.13" } -miden-standards = { version = "0.13" } -miden-testing = { version = "0.13" } -miden-tx = { default-features = false, version = "0.13" } -miden-tx-batch-prover = { version = "0.13" } +miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } +miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } +miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } @@ -68,6 +78,11 @@ anyhow = { version = "1.0" } assert_matches = { version = "1.5" } async-trait = { version = "0.1" } clap = { features = ["derive"], version = "4.5" } +deadpool = { default-features = false, version = "0.12" } +deadpool-diesel = { version = "0.6" } +deadpool-sync = { default-features = false, version = "0.1" } +diesel = { version = "2.3" } +diesel_migrations = { version = "2.3" } fs-err = { version = "3" } futures = { version = "0.3" } hex = { version = "0.4" } @@ -84,13 +99,16 @@ pretty_assertions = { version = "1.4" } prost = { default-features = false, version = "=0.14.3" } protox = { version = "=0.9.1" } rand = { version = "0.9" } -rand_chacha = { version = "0.9" } +rand_chacha = { default-features = false, version = "0.9" } +reqwest = { version = "0.13" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +tempfile = { version = "3" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } -toml = { version = "0.9" } +tokio-util = { version = "0.7" } +toml = "1.0" tonic = { default-features = false, version = "0.14" } tonic-health = { version = "0.14" } tonic-prost = { version = "0.14" } @@ -107,6 +125,7 @@ url = { features = ["serde"], version = "2.5" } # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. pedantic = { level = "warn", priority = -1 } +allow_attributes = "deny" cast_possible_truncation = "allow" # Overly many instances especially regarding indices. collapsible-if = "allow" # Too new to enforce. from_iter_instead_of_collect = "allow" # at times `FromIter` is much more readable @@ -119,3 +138,7 @@ must_use_candidate = "allow" # This marks many fn's which isn't helpfu needless_for_each = "allow" # Context dependent if that's useful. should_panic_without_expect = "allow" # We don't care about the specific panic message. # End of pedantic lints. + +# Configure `cargo-typos` +[workspace.metadata.typos] +files.extend-exclude = ["*.svg"] # Ignore SVG files. diff --git a/Makefile b/Makefile index 64aa55bf4f..8eb4435446 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ help: WARNINGS=RUSTDOCFLAGS="-D warnings" BUILD_PROTO=BUILD_PROTO=1 CONTAINER_RUNTIME ?= docker +STRESS_TEST_DATA_DIR ?= stress-test-store-$(shell date +%Y%m%d-%H%M%S) # -- linting -------------------------------------------------------------------------------------- @@ -106,7 +107,16 @@ install-node: ## Installs node .PHONY: install-remote-prover install-remote-prover: ## Install remote prover's CLI - $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --features concurrent --locked + $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --locked + +.PHONY: stress-test-smoke +stress-test: ## Runs stress-test benchmarks + ${BUILD_PROTO} cargo build --release --locked -p miden-node-stress-test + @mkdir -p $(STRESS_TEST_DATA_DIR) + ./target/release/miden-node-stress-test seed-store --data-directory $(STRESS_TEST_DATA_DIR) --num-accounts 500 --public-accounts-percentage 50 + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-state + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-notes + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 .PHONY: install-stress-test install-stress-test: ## Installs stress-test binary diff --git a/README.md b/README.md index bca1cdbf4f..ae38d6d959 100644 --- a/README.md +++ b/README.md @@ -33,15 +33,17 @@ The documentation in the `docs/external` folder is built using Docusaurus and is Developer documentation and onboarding guide is available [here](https://0xMiden.github.io/miden-node/developer/index.html). -At minimum, please see our [contributing](CONTRIBUTING.md) guidelines and our [makefile](Makefile) for example workflows +At minimum, please see our [contributing](https://github.com/0xMiden/.github?tab=contributing-ov-file) guidelines and our [makefile](Makefile) for example workflows e.g. run the testsuite using ```sh make test ``` -Note that we do _not_ accept low-effort contributions or AI generated code. For typos and documentation errors please -rather open an issue. +In particular, please note that we do _not_ accept [low-effort contributions](https://github.com/0xMiden/.github?tab=contributing-ov-file#contribution-quality) or AI generated code. For typos and documentation errors please open an issue instead. + +> [!IMPORTANT] +> PRs will be closed unless you have been assigned an issue by a maintainer. ## License diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 11c2b19059..357169c025 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -26,9 +26,9 @@ miden-protocol = { features = ["std", "testing"], workspace = true } miden-standards = { workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["std"], workspace = true } -rand = { version = "0.9" } -rand_chacha = { version = "0.9" } -reqwest = { features = ["json"], version = "0.12" } +rand = { workspace = true } +rand_chacha = { workspace = true } +reqwest = { features = ["json", "query"], workspace = true } serde = { features = ["derive"], version = "1.0" } serde_json = { version = "1.0" } sha2 = { version = "0.10" } @@ -37,3 +37,6 @@ tonic = { features = ["codegen", "tls-native-roots", "transport"], wo tonic-health = { workspace = true } tracing = { workspace = true } url = { features = ["serde"], workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/network-monitor/build.rs b/bin/network-monitor/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/bin/network-monitor/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c044267331..c2b9d0835a 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -22,10 +22,10 @@ use miden_protocol::note::{ NoteAssets, NoteAttachment, NoteExecutionHint, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, }; @@ -751,7 +751,7 @@ fn load_counter_account(file_path: &Path) -> Result { } /// Create and submit a network note that targets the counter account. -#[allow(clippy::too_many_arguments)] +#[expect(clippy::too_many_arguments)] #[instrument( parent = None, target = COMPONENT, @@ -872,7 +872,7 @@ fn create_network_note( Felt::new(rng.random()), ]); - let recipient = NoteRecipient::new(serial_num, script, NoteInputs::new(vec![])?); + let recipient = NoteRecipient::new(serial_num, script, NoteStorage::new(vec![])?); let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 370d7bb105..caeafe055d 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -47,7 +47,7 @@ pub struct FaucetTestDetails { struct PowChallengeResponse { challenge: String, target: u64, - #[allow(dead_code)] // Timestamp is part of API response but not used + #[expect(dead_code)] // Timestamp is part of API response but not used timestamp: u64, } @@ -55,7 +55,7 @@ struct PowChallengeResponse { #[derive(Debug, Deserialize)] struct GetTokensResponse { tx_id: String, - #[allow(dead_code)] // Note ID is part of API response but not used in monitoring + #[expect(dead_code)] // Note ID is part of API response but not used in monitoring note_id: String, } diff --git a/bin/node/.env b/bin/node/.env index fc4c2793e3..6bdfa9a805 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -10,7 +10,7 @@ MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= -MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= +MIDEN_NODE_VALIDATOR_KEY= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index b6ade3b4da..700ce37060 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -33,6 +33,8 @@ miden-protocol = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] -figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 832b0bb8d2..79464a9877 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,39 +1,47 @@ -FROM rust:1.90-slim-bullseye AS builder - +FROM rust:1.91-slim-bullseye AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y \ + llvm \ + clang \ + libclang-dev \ + cmake \ + pkg-config \ + libssl-dev \ + libsqlite3-dev \ + ca-certificates && \ rm -rf /var/lib/apt/lists/* - +RUN cargo install cargo-chef WORKDIR /app -COPY ./Cargo.toml . -COPY ./Cargo.lock . -COPY ./bin ./bin -COPY ./crates ./crates -COPY ./proto ./proto -RUN cargo install --path bin/node --locked +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json -FROM debian:bullseye-slim +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --locked --bin miden-node -# Update machine & install required packages -# The installation of sqlite3 is needed for correct function of the SQLite database +# Base line runtime image with runtime dependencies installed. +FROM debian:bullseye-slim AS runtime-base RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y --no-install-recommends \ - sqlite3 \ + apt-get install -y --no-install-recommends sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node - +FROM runtime-base AS runtime +COPY --from=builder /app/target/release/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ org.opencontainers.image.url=https://0xMiden.github.io/ \ org.opencontainers.image.documentation=https://github.com/0xMiden/miden-node \ org.opencontainers.image.source=https://github.com/0xMiden/miden-node \ org.opencontainers.image.vendor=Miden \ org.opencontainers.image.licenses=MIT - ARG CREATED ARG VERSION ARG COMMIT @@ -43,6 +51,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 - # Miden node does not spawn sub-processes, so it can be used as the PID1 CMD miden-node diff --git a/bin/node/build.rs b/bin/node/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/bin/node/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 5cfbc78fcc..5d416ea8e5 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -86,7 +86,6 @@ impl BlockProducerCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, @@ -125,7 +124,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, @@ -149,7 +147,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: miden_protocol::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 22f1199a3f..707e01193f 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -4,12 +4,10 @@ use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; -use miden_node_ntx_builder::NetworkTransactionBuilder; use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; -use miden_protocol::block::BlockSigner; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; @@ -20,11 +18,13 @@ use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, + ValidatorConfig, duration_to_human_readable_string, }; @@ -51,12 +51,12 @@ pub enum BundledCommand { /// /// If not provided, a predefined key is used. #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX )] - validator_insecure_secret_key: String, + validator_key: String, }, /// Runs all three node components in the same process. @@ -68,6 +68,10 @@ pub enum BundledCommand { #[arg(long = "rpc.url", env = ENV_RPC_URL, value_name = "URL")] rpc_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which the Store component should store the database and raw block data. #[arg(long = "data-directory", env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -78,6 +82,9 @@ pub enum BundledCommand { #[command(flatten)] ntx_builder: NtxBuilderConfig, + #[command(flatten)] + validator: ValidatorConfig, + /// Enables the exporting of traces for OpenTelemetry. /// /// This can be further configured using environment variables as defined in the official @@ -95,15 +102,6 @@ pub enum BundledCommand { value_name = "DURATION" )] grpc_timeout: Duration, - - /// Insecure, hex-encoded validator secret key for development and testing purposes. - #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", - default_value = INSECURE_VALIDATOR_KEY_HEX - )] - validator_insecure_secret_key: String, }, } @@ -114,14 +112,14 @@ impl BundledCommand { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } => { // Currently the bundled bootstrap is identical to the store's bootstrap. crate::commands::store::StoreCommand::Bootstrap { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } .handle() .await @@ -129,36 +127,37 @@ impl BundledCommand { }, BundledCommand::Start { rpc_url, + block_prover_url, data_directory, block_producer, ntx_builder, + validator, enable_otel: _, grpc_timeout, - validator_insecure_secret_key, } => { - let secret_key_bytes = hex::decode(validator_insecure_secret_key)?; - let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; Self::start( rpc_url, + block_prover_url, data_directory, - ntx_builder, block_producer, + ntx_builder, + validator, grpc_timeout, - signer, ) .await }, } } - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] async fn start( rpc_url: Url, + block_prover_url: Option, data_directory: PathBuf, - ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, + ntx_builder: NtxBuilderConfig, + validator: ValidatorConfig, grpc_timeout: Duration, - signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. @@ -170,17 +169,19 @@ impl BundledCommand { .await .context("Failed to bind to RPC gRPC endpoint")?; - let block_producer_address = TcpListener::bind("127.0.0.1:0") - .await - .context("Failed to bind to block-producer gRPC endpoint")? - .local_addr() - .context("Failed to retrieve the block-producer's gRPC address")?; + let (block_producer_url, block_producer_address) = { + let socket_addr = TcpListener::bind("127.0.0.1:0") + .await + .context("Failed to bind to block-producer gRPC endpoint")? + .local_addr() + .context("Failed to retrieve the block-producer's gRPC address")?; + let url = Url::parse(&format!("http://{socket_addr}")) + .context("Failed to parse Block Producer URL")?; + (url, socket_addr) + }; - let validator_address = TcpListener::bind("127.0.0.1:0") - .await - .context("Failed to bind to validator gRPC endpoint")? - .local_addr() - .context("Failed to retrieve the validator's gRPC address")?; + // Validator URL is either specified remote, or generated local. + let (validator_url, validator_socket_address) = validator.to_addresses().await?; // Store addresses for each exposed API let store_rpc_listener = TcpListener::bind("127.0.0.1:0") @@ -212,6 +213,7 @@ impl BundledCommand { block_producer_listener: store_block_producer_listener, ntx_builder_listener: store_ntx_builder_listener, data_directory: data_directory_clone, + block_prover_url, grpc_timeout, } .serve() @@ -223,105 +225,112 @@ impl BundledCommand { let should_start_ntx_builder = !ntx_builder.disabled; // Start block-producer. The block-producer's endpoint is available after loading completes. - let block_producer_id = join_set - .spawn({ - let store_url = Url::parse(&format!("http://{store_block_producer_address}")) - .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - async move { - BlockProducer { - block_producer_address, - store_url, - validator_url, - batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, - batch_interval: block_producer.batch_interval, - block_interval: block_producer.block_interval, - max_batches_per_block: block_producer.max_batches_per_block, - max_txs_per_batch: block_producer.max_txs_per_batch, - grpc_timeout, - mempool_tx_capacity: block_producer.mempool_tx_capacity, + let block_producer_id = { + let validator_url = validator_url.clone(); + join_set + .spawn({ + let store_url = Url::parse(&format!("http://{store_block_producer_address}")) + .context("Failed to parse URL")?; + async move { + BlockProducer { + block_producer_address, + store_url, + validator_url, + batch_prover_url: block_producer.batch_prover_url, + batch_interval: block_producer.batch_interval, + block_interval: block_producer.block_interval, + max_batches_per_block: block_producer.max_batches_per_block, + max_txs_per_batch: block_producer.max_txs_per_batch, + grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, + } + .serve() + .await + .context("failed while serving block-producer component") } - .serve() - .await - .context("failed while serving block-producer component") - } - }) - .id(); + }) + .id() + }; - let validator_id = join_set - .spawn({ - async move { - Validator { - address: validator_address, + // Start RPC component. + let rpc_id = { + let block_producer_url = block_producer_url.clone(); + let validator_url = validator_url.clone(); + join_set + .spawn(async move { + let store_url = Url::parse(&format!("http://{store_rpc_address}")) + .context("Failed to parse URL")?; + Rpc { + listener: grpc_rpc, + store_url, + block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout, - signer, } .serve() .await - .context("failed while serving validator component") - } - }) - .id(); - - // Start RPC component. - let rpc_id = join_set - .spawn(async move { - let store_url = Url::parse(&format!("http://{store_rpc_address}")) - .context("Failed to parse URL")?; - let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - let validator_url = Url::parse(&format!("http://{validator_address}")) - .context("Failed to parse URL")?; - Rpc { - listener: grpc_rpc, - store_url, - block_producer_url: Some(block_producer_url), - validator_url, - grpc_timeout, - } - .serve() - .await - .context("failed while serving RPC component") - }) - .id(); + .context("failed while serving RPC component") + }) + .id() + }; // Lookup table so we can identify the failed component. let mut component_ids = HashMap::from([ (store_id, "store"), (block_producer_id, "block-producer"), - (validator_id, "validator"), (rpc_id, "rpc"), ]); // Start network transaction builder. The endpoint is available after loading completes. - let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) - .context("Failed to parse URL")?; - if should_start_ntx_builder { - let validator_url = Url::parse(&format!("http://{validator_address}")) + let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) .context("Failed to parse URL")?; + let block_producer_url = block_producer_url.clone(); + let validator_url = validator_url.clone(); + + let builder_config = ntx_builder.into_builder_config( + store_ntx_builder_url, + block_producer_url, + validator_url, + &data_directory, + ); + let id = join_set .spawn(async move { - let block_producer_url = - Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - NetworkTransactionBuilder::new( - store_ntx_builder_url, - block_producer_url, - validator_url, - ntx_builder.tx_prover_url, - ntx_builder.script_cache_size, - ) - .run() - .await - .context("failed while serving ntx builder component") + builder_config + .build() + .await + .context("failed to initialize ntx builder")? + .run() + .await + .context("failed while serving ntx builder component") }) .id(); component_ids.insert(id, "ntx-builder"); } + // Start the Validator if we have bound a socket. + if let Some(address) = validator_socket_address { + let secret_key_bytes = hex::decode(validator.validator_key)?; + let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; + let id = join_set + .spawn({ + async move { + Validator { + address, + grpc_timeout, + signer, + data_directory, + } + .serve() + .await + .context("failed while serving validator component") + } + }) + .id(); + component_ids.insert(id, "validator"); + } + // SAFETY: The joinset is definitely not empty. let component_result = join_set.join_next_with_id().await.unwrap(); diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 7e8fa7e69f..352a6de167 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,12 +1,16 @@ +use std::net::SocketAddr; use std::num::NonZeroUsize; +use std::path::{Path, PathBuf}; use std::time::Duration; +use anyhow::Context; use miden_node_block_producer::{ DEFAULT_BATCH_INTERVAL, DEFAULT_BLOCK_INTERVAL, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH, }; +use tokio::net::TcpListener; use url::Url; pub mod block_producer; @@ -36,7 +40,7 @@ const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE"; -const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; +const ENV_VALIDATOR_KEY: &str = "MIDEN_NODE_VALIDATOR_KEY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -47,7 +51,49 @@ fn duration_to_human_readable_string(duration: Duration) -> String { humantime::format_duration(duration).to_string() } -/// Configuration for the Network Transaction Builder component +/// Configuration for the Validator component. +#[derive(clap::Args)] +pub struct ValidatorConfig { + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// Only used when the Validator URL argument is not set. + #[arg( + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_key: String, + + /// The remote Validator's gRPC URL. If unset, will default to running a Validator + /// in-process. If set, the insecure key argument is ignored. + #[arg(long = "validator.url", env = ENV_VALIDATOR_URL, value_name = "URL")] + validator_url: Option, +} + +impl ValidatorConfig { + /// Converts the [`ValidatorConfig`] into a URL and an optional [`SocketAddr`]. + /// + /// If the `validator_url` is set, it returns the URL and `None` for the [`SocketAddr`]. + /// + /// If `validator_url` is not set, it binds to a random port on localhost, creates a URL, + /// and returns the URL and the bound [`SocketAddr`]. + async fn to_addresses(&self) -> anyhow::Result<(Url, Option)> { + if let Some(url) = &self.validator_url { + Ok((url.clone(), None)) + } else { + let socket_addr = TcpListener::bind("127.0.0.1:0") + .await + .context("Failed to bind to validator gRPC endpoint")? + .local_addr() + .context("Failed to retrieve the validator's gRPC address")?; + let url = Url::parse(&format!("http://{socket_addr}")) + .context("Failed to parse Validator URL")?; + Ok((url, Some(socket_addr))) + } + } +} + +/// Configuration for the Network Transaction Builder component. #[derive(clap::Args)] pub struct NtxBuilderConfig { /// Disable spawning the network transaction builder. @@ -68,6 +114,9 @@ pub struct NtxBuilderConfig { )] pub ticker_interval: Duration, + /// Number of note scripts to cache locally. + /// + /// Note scripts not in cache must first be retrieved from the store. #[arg( long = "ntx-builder.script-cache-size", env = ENV_NTX_SCRIPT_CACHE_SIZE, @@ -75,6 +124,38 @@ pub struct NtxBuilderConfig { default_value_t = DEFAULT_NTX_SCRIPT_CACHE_SIZE )] pub script_cache_size: NonZeroUsize, + + /// Directory for the ntx-builder's persistent database. + /// + /// If not set, defaults to the node's data directory. + #[arg(long = "ntx-builder.data-directory", value_name = "DIR")] + pub data_directory: Option, +} + +impl NtxBuilderConfig { + /// Converts this CLI config into the ntx-builder's internal config. + /// + /// The `node_data_directory` is used as the default location for the ntx-builder's database + /// if `--ntx-builder.data-directory` is not explicitly set. + pub fn into_builder_config( + self, + store_url: Url, + block_producer_url: Url, + validator_url: Url, + node_data_directory: &Path, + ) -> miden_node_ntx_builder::NtxBuilderConfig { + let data_dir = self.data_directory.unwrap_or_else(|| node_data_directory.to_path_buf()); + let database_filepath = data_dir.join("ntx-builder.sqlite3"); + + miden_node_ntx_builder::NtxBuilderConfig::new( + store_url, + block_producer_url, + validator_url, + database_filepath, + ) + .with_tx_prover_url(self.tx_prover_url) + .with_script_cache_size(self.script_cache_size) + } } /// Configuration for the Block Producer component @@ -103,11 +184,6 @@ pub struct BlockProducerConfig { #[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL, value_name = "URL")] pub batch_prover_url: Option, - /// The remote block prover's gRPC url. If unset, will default to running a prover - /// in-process which is expensive. - #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] - pub block_prover_url: Option, - /// The number of transactions per batch. #[arg( long = "max-txs-per-batch", diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 9dd311368f..7bf56f4a85 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -17,14 +17,15 @@ use super::{ }; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; -#[allow(clippy::large_enum_variant, reason = "single use enum")] +#[expect(clippy::large_enum_variant, reason = "single use enum")] #[derive(clap::Subcommand)] pub enum StoreCommand { /// Bootstraps the blockchain database with the genesis block. @@ -45,14 +46,16 @@ pub enum StoreCommand { genesis_config_file: Option, /// Insecure, hex-encoded validator secret key for development and testing purposes. /// + /// Used to sign the genesis block in the bootstrap process. + /// /// If not provided, a predefined key is used. #[arg( - long = "validator.insecure.secret-key", - env = ENV_VALIDATOR_INSECURE_SECRET_KEY, - value_name = "VALIDATOR_INSECURE_SECRET_KEY", + long = "validator.key", + env = ENV_VALIDATOR_KEY, + value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX )] - validator_insecure_secret_key: String, + validator_key: String, }, /// Starts the store component. @@ -72,6 +75,10 @@ pub enum StoreCommand { #[arg(long = "block-producer.url", env = ENV_STORE_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which to store the database and raw block data. #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -104,17 +111,18 @@ impl StoreCommand { data_directory, accounts_directory, genesis_config_file, - validator_insecure_secret_key, + validator_key, } => Self::bootstrap( &data_directory, &accounts_directory, genesis_config_file.as_ref(), - validator_insecure_secret_key, + validator_key, ), StoreCommand::Start { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, enable_otel: _, grpc_timeout, @@ -123,6 +131,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, grpc_timeout, ) @@ -143,6 +152,7 @@ impl StoreCommand { rpc_url: Url, ntx_builder_url: Url, block_producer_url: Url, + block_prover_url: Option, data_directory: PathBuf, grpc_timeout: Duration, ) -> anyhow::Result<()> { @@ -169,6 +179,7 @@ impl StoreCommand { Store { rpc_listener, + block_prover_url, ntx_builder_listener, block_producer_listener, data_directory, @@ -183,16 +194,15 @@ impl StoreCommand { data_directory: &Path, accounts_directory: &Path, genesis_config: Option<&PathBuf>, - validator_insecure_secret_key: String, + validator_key: String, ) -> anyhow::Result<()> { // Decode the validator key. - let signer = SecretKey::read_from_bytes(&hex::decode(validator_insecure_secret_key)?)?; + let signer = SecretKey::read_from_bytes(&hex::decode(validator_key)?)?; // Parse genesis config (or default if not given). let config = genesis_config .map(|file_path| { - let toml_str = fs_err::read_to_string(file_path)?; - GenesisConfig::read_toml(toml_str.as_str()).with_context(|| { + GenesisConfig::read_toml_file(file_path).with_context(|| { format!("failed to parse genesis config from file {}", file_path.display()) }) }) diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index f543be3013..461e446c1a 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -1,3 +1,4 @@ +use std::path::PathBuf; use std::time::Duration; use anyhow::Context; @@ -9,8 +10,9 @@ use url::Url; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_DATA_DIRECTORY, ENV_ENABLE_OTEL, - ENV_VALIDATOR_INSECURE_SECRET_KEY, + ENV_VALIDATOR_KEY, ENV_VALIDATOR_URL, INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, @@ -40,29 +42,42 @@ pub enum ValidatorCommand { )] grpc_timeout: Duration, + /// Directory in which to store the validator's data. + #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] + data_directory: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. /// /// If not provided, a predefined key is used. - #[arg(long = "insecure.secret-key", env = ENV_VALIDATOR_INSECURE_SECRET_KEY, value_name = "INSECURE_SECRET_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] - insecure_secret_key: String, + #[arg(long = "key", env = ENV_VALIDATOR_KEY, value_name = "VALIDATOR_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] + validator_key: String, }, } impl ValidatorCommand { pub async fn handle(self) -> anyhow::Result<()> { let Self::Start { - url, grpc_timeout, insecure_secret_key, .. + url, + grpc_timeout, + validator_key, + data_directory, + .. } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; + let signer = SecretKey::read_from_bytes(hex::decode(validator_key)?.as_ref())?; - Validator { address, grpc_timeout, signer } - .serve() - .await - .context("failed while serving validator component") + Validator { + address, + grpc_timeout, + signer, + data_directory, + } + .serve() + .await + .context("failed while serving validator component") } pub fn is_open_telemetry_enabled(&self) -> bool { diff --git a/bin/remote-prover/.env b/bin/remote-prover/.env index 05593e6989..b7191203d7 100644 --- a/bin/remote-prover/.env +++ b/bin/remote-prover/.env @@ -1,32 +1,6 @@ -# For more info use -h on the relevant commands: -# miden-remote-prover start-worker -h -# miden-remote-prover start-proxy -h +# For more info consult the help output: `miden-remote-prover --help` -# Proxy ############################ -# Port of the proxy -MRP_PORT=8082 -# Port to add / remove workers -MRP_CONTROL_PORT=8083 -# Uncomment the following line to enable Prometheus metrics on port 6192 -# MRP_METRICS_PORT=6192 -MRP_TIMEOUT=100s -MRP_CONNECTION_TIMEOUT=10s -MRP_MAX_QUEUE_ITEMS=10 -MRP_MAX_RETRIES_PER_REQUEST=1 -MRP_MAX_REQ_PER_SEC=5 -MRP_AVAILABLE_WORKERS_POLLING_INTERVAL=20ms -MRP_HEALTH_CHECK_INTERVAL=1s -MRP_ENABLE_METRICS=false -MRP_PROOF_TYPE=transaction -MRP_PROXY_WORKERS_LIST=127.0.0.1:50051 -MRP_GRACE_PERIOD=20s -MRP_GRACEFUL_SHUTDOWN_TIMEOUT=5s -RUST_LOG=info -#################################### - -# Worker ########################### -# Use 127.0.0.1 instead of 0.0.0.0 -MRP_WORKER_LOCALHOST=false -MRP_WORKER_PORT=50051 -MRP_WORKER_PROOF_TYPE=transaction -#################################### +MIDEN_PROVER_PORT=8082 +MIDEN_PROVER_KIND=transaction +MIDEN_PROVER_TIMEOUT=100s +MIDEN_PROVER_CAPACITY=10 diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 85bc355f79..7a3b6a059f 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -1,6 +1,6 @@ [package] authors.workspace = true -description = "Miden blockchain remote prover" +description = "Miden remote prover" edition.workspace = true homepage.workspace = true keywords = ["miden", "prover", "remote"] @@ -11,53 +11,33 @@ repository.workspace = true rust-version.workspace = true version.workspace = true -[[bin]] -name = "miden-remote-prover" -path = "src/main.rs" - -[features] -concurrent = ["miden-tx/concurrent"] -default = ["concurrent"] - [lints] workspace = true [dependencies] -anyhow = { workspace = true } -async-trait = { version = "0.1" } -axum = { version = "0.8" } -bytes = { version = "1.0" } -clap = { features = ["env"], workspace = true } -http = { workspace = true } -humantime = { workspace = true } -miden-block-prover = { workspace = true } -miden-node-proto = { workspace = true } -miden-node-utils = { workspace = true } -miden-protocol = { features = ["std"], workspace = true } -miden-tx = { features = ["std"], workspace = true } -miden-tx-batch-prover = { features = ["std"], workspace = true } -opentelemetry = { version = "0.31" } -pingora = { features = ["lb"], version = "0.6" } -pingora-core = { version = "0.6" } -pingora-limits = { version = "0.6" } -pingora-proxy = { version = "0.6" } -prometheus = { version = "0.14" } -prost = { default-features = false, features = ["derive"], workspace = true } -reqwest = { version = "0.12" } -semver = { version = "1.0" } -serde = { features = ["derive"], version = "1.0" } -serde_qs = { version = "0.15" } -thiserror = { workspace = true } -tokio = { features = ["full"], workspace = true } -tokio-stream = { features = ["net"], version = "0.1" } -tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } -tonic-health = { version = "0.14" } -tonic-prost = { workspace = true } -tonic-web = { version = "0.14" } -tower-http = { features = ["trace"], workspace = true } -tracing = { workspace = true } -tracing-opentelemetry = { version = "0.32" } -uuid = { features = ["v4"], version = "1.16" } +anyhow = { workspace = true } +async-trait = { version = "0.1" } +clap = { features = ["env"], workspace = true } +http = { workspace = true } +humantime = { workspace = true } +miden-block-prover = { workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-protocol = { features = ["std"], workspace = true } +miden-tx = { features = ["concurrent", "std"], workspace = true } +miden-tx-batch-prover = { features = ["std"], workspace = true } +opentelemetry = { version = "0.31" } +prost = { default-features = false, features = ["derive"], workspace = true } +tokio = { features = ["full"], workspace = true } +tokio-stream = { features = ["net"], version = "0.1" } +tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } +tonic-health = { version = "0.14" } +tonic-prost = { workspace = true } +tonic-reflection = { workspace = true } +tonic-web = { version = "0.14" } +tower-http = { features = ["trace"], workspace = true } +tracing = { workspace = true } [dev-dependencies] miden-protocol = { features = ["testing"], workspace = true } @@ -66,6 +46,7 @@ miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } [build-dependencies] -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { features = ["fancy"], version = "7.5" } -tonic-prost-build = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { features = ["fancy"], version = "7.5" } +tonic-prost-build = { workspace = true } diff --git a/bin/remote-prover/README.md b/bin/remote-prover/README.md index 476e2293fe..364cfd56bf 100644 --- a/bin/remote-prover/README.md +++ b/bin/remote-prover/README.md @@ -1,14 +1,42 @@ # Miden remote prover -A service for generating Miden proofs on-demand. The binary enables spawning workers and a proxy for Miden's remote prover. It currently supports proving individual transactions, transaction batches, and blocks. +A gRPC server which provides a service for proving either transactions, batches or blocks for the Miden blockchain. -A worker is a gRPC service that can receive transaction witnesses, proposed batches, or proposed blocks, prove them, and return the generated proofs. It can handle only one request at a time and will return an error if it is already in use. Each worker is specialized on startup to handle exactly one type of proof requests - transactions, batches, or blocks. +This enables weaker devices to offload the proof generation to a beefy remote server running this service. -The proxy uses [Cloudflare's Pingora crate](https://crates.io/crates/pingora), which provides features to create a modular proxy. It is meant to handle multiple workers with a queue, assigning a worker to each request and retrying if the worker is not available. Further information about Pingora and its features can be found in the [official GitHub repository](https://github.com/cloudflare/pingora). +The implementation provides a configurable request queue and proves one request at a time in FIFO order. This is not intended to cover +complex proxy setups nor load-balancing, but can instead be used as a starting point for more advanced setups. -## Debian Installation +The gRPC specification can be found in the [Miden repository](https://github.com/0xMiden/miden-node/blob/main/proto/proto/remote_prover.proto). +Ensure you are viewing the appropriate version tag or commit. -#### Prover +## Quick start + +```bash +# Install the binary. +cargo install miden-remote-prover --locked + +# and start as a transaction prover. +miden-remote-prover \ + --kind transaction \ # Specify the kind of proof to generate (transaction, batch, or block) + --port 50051 +``` + +In a separate terminal, inspect the available services using grpcurl and reflection. + +```bash +grpcurl -plaintext localhost:50051 list +``` + +or query the status of the prover. + +```bash +grpcurl -plaintext localhost:50051 remote_prover.WorkerStatusApi/Status +``` + +## Installation + +### Debian package Install the Debian package: @@ -33,32 +61,7 @@ sudo systemctl enable miden-prover sudo systemctl start miden-prover ``` -#### Prover Proxy - -```bash -set -e - -sudo wget https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-proxy-v0.8-arm64.deb -O prover-proxy.deb -sudo wget -q -O - https://github.com/0xMiden/miden-node/releases/download/v0.8/miden-prover-proxy-v0.8-arm64.deb.checksum | awk '{print $1}' | sudo tee prover-proxy.checksum -sudo sha256sum prover-proxy.deb | awk '{print $1}' > prover-proxy.sha256 -sudo diff prover-proxy.sha256 prover-proxy.checksum -sudo dpkg -i prover-proxy.deb -sudo rm prover-proxy.deb -``` - -Edit the configuration file `/lib/systemd/system/miden-prover-proxy.service.env` - -Edit the service file to specify workers `/lib/systemd/system/miden-prover-proxy.service` - -Run the service: - -```bash -sudo systemctl daemon-reload -sudo systemctl enable miden-prover-proxy -sudo systemctl start miden-prover-proxy -``` - -## Source Installation +### From source To build the service from a local version, from the root of the workspace you can run: @@ -68,274 +71,73 @@ make install-remote-prover The CLI can be installed from the source code using specific git revisions with `cargo install` or from crates.io with `cargo install miden-remote-prover`. -## Worker - -To start the worker service you will need to run: - -```bash -miden-remote-prover start-worker --port 8082 --prover-type transaction -``` - -This will spawn a worker using the port defined in the command option. The host will be 0.0.0.0 by default, or 127.0.0.1 if the --localhost flag is used. In case that the port is not provided, it will default to `50051`. This command will start a worker that can handle transaction and batch proving requests. - -The `--prover-type` flag is required and specifies which type of proof the worker will handle. The available options are: - -- `transaction`: For transaction proofs -- `batch`: For batch proofs -- `block`: For block proofs - -Each worker can only handle one type of proof. If you need to handle multiple proof types, you should start multiple workers, each with a different proof type. Additionally, you can use the `--localhost` flag to bind to 127.0.0.1 instead of 0.0.0.0. - -### Worker Configuration - -The worker can be configured using the following environment variables: - -| Variable | Description | Default | -|---------------------------|---------------------------------|---------------| -| `MRP_WORKER_LOCALHOST` | Use localhost (127.0.0.1) | `false` | -| `MRP_WORKER_PORT` | The port number for the worker | `50051` | -| `MRP_WORKER_PROOF_TYPE` | The supported prover type | `transaction` | +## Configuration -For example: +Quick start: ```bash -export MRP_WORKER_LOCALHOST="true" -export MRP_WORKER_PORT="8082" -export MRP_WORKER_PROOF_TYPE="block" -miden-remote-prover start-worker +miden-remote-prover --kind transaction ``` -## Proxy - -To start the proxy service, you will need to run: - -```bash -miden-remote-prover start-proxy --prover-type transaction --workers [worker1],[worker2],...,[workerN] -``` - -For example: +The prover can be further configured from the command line or using environment variables as per the help message: ```bash -miden-remote-prover start-proxy --prover-type transaction --workers 0.0.0.0:8084,0.0.0.0:8085 -``` +> miden-remote-prover --help -This command will start the proxy using the workers passed as arguments. The workers should be in the format `host:port`. Another way to specify the workers is by using the `MRP_PROXY_WORKERS_LIST` environment variable, which can be set to a comma-separated list of worker addresses. For example: +Usage: miden-remote-prover [OPTIONS] --kind -```bash -export MRP_PROXY_WORKERS_LIST="0.0.0.0:8084,0.0.0.0:8085" -``` +Options: + --port + The port the gRPC server will be hosted on -If no workers are passed, the proxy will start without any workers and will not be able to handle any requests until one is added through the `miden-remote-prover add-worker` command. + [env: MIDEN_PROVER_PORT=] + [default: 50051] -The `--prover-type` flag is required and specifies which type of proof the proxy will handle. The available options are: + --kind + The proof type that the prover will be handling -- `transaction`: For transaction proofs -- `batch`: For batch proofs -- `block`: For block proofs + [env: MIDEN_PROVER_KIND=] + [possible values: transaction, batch, block] -The proxy can only handle one type of proof at a time. When you add workers to the proxy, it will check their supported proof type. Workers that support a different proof type than the proxy will be marked as unhealthy and will not be used for proving requests. + --timeout + Maximum time allowed for a proof request to complete. Once exceeded, the request is aborted -For example, if you start a proxy with `--prover-type transaction` and add these workers: + [env: MIDEN_PROVER_TIMEOUT=] + [default: 60s] -- Worker 1: Transaction proofs (Healthy) -- Worker 2: Batch proofs (Unhealthy - incompatible proof type) -- Worker 3: Block proofs (Unhealthy - incompatible proof type) + --capacity + Maximum number of concurrent proof requests that the prover will allow. -Only Worker 1 will be used for proving requests, while Workers 2 and 3 will be marked as unhealthy due to incompatible proof types. + Note that the prover only proves one request at a time; the rest are queued. + This capacity is used to limit the number of requests that can be queued at any given time, + and includes the one request that is currently being processed. -You can customize the proxy service by setting environment variables. Possible customizations can be found by running `miden-remote-prover start-proxy --help`. + [env: MIDEN_PROVER_CAPACITY=] + [default: 1] -An example `.env` file is provided in the crate's root directory. To use the variables from a file in any Unix-like operating system, you can run `source `. - -At the moment, when a worker added to the proxy stops working and can not connect to it for a request, the connection is marked as retriable meaning that the proxy will try reaching another worker. The number of retries is configurable via the `MRP_MAX_RETRIES_PER_REQUEST` environmental variable. - -## Updating workers on a running proxy - -To update the workers on a running proxy, two commands are provided: `add-workers` and `remove-workers`. These commands will update the workers on the proxy and will not require a restart. To use these commands, you will need to run: - -```bash -miden-remote-prover add-workers --control-port [worker1],[worker2],...,[workerN] -miden-remote-prover remove-workers --control-port [worker1],[worker2],...,[workerN] + -h, --help + Print help (see a summary with '-h') ``` -For example: +## Status, health and monitoring -```bash -# To add 0.0.0.0:8085 and 200.58.70.4:50051 to the workers list: -miden-remote-prover add-workers --control-port 8083 0.0.0.0:8085,200.58.70.4:50051 -# To remove 158.12.12.3:8080 and 122.122.6.6:50051 from the workers list: -miden-remote-prover remove-workers --control-port 8083 158.12.12.3:8080,122.122.6.6:50051 -``` +The server implements the following health and status related gRPC services: -These commands can receive the list of workers to update as a comma-separated list of addresses through the `MRP_PROXY_WORKERS_LIST` environment variable, or as command-line arguments: +- [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) +- [gRPC Reflection](https://grpc.io/docs/guides/reflection/) +- [WorkerStatusApi](https://github.com/0xMiden/miden-node/blob/main/proto/proto/remote_prover.proto) -```bash -export MRP_PROXY_WORKERS_LIST="0.0.0.0:8085,200.58.70.4:50051" -miden-remote-prover add-workers --control-port 8083 -miden-remote-prover remove-workers --control-port 8083 -``` +The server supports OpenTelemetry traces which can be configured using the environment variables specified in the OpenTelemetry documentation. -The `--control-port` flag is required to specify the port where the proxy is listening for updates. The workers are passed as arguments in the format `host:port`. The port can be specified via the `MRP_CONTROL_PORT` environment variable. For example: +For example, to send the traces to [HoneyComb](https://www.honeycomb.io/): ```bash -export MRP_CONTROL_PORT="8083" -miden-remote-prover add-workers 0.0.0.0:8085 +OTEL_SERVICE_NAME=miden-remote-prover +OTEL_EXPORTER_OTLP_ENDPOINT=https://api.honeycomb.io +OTEL_EXPORTER_OTLP_HEADERS=x-honeycomb-team= ``` -Note that, in order to update the workers, the proxy must be running in the same computer as the command is being executed because it will check if the client address is localhost to avoid any security issues. - -### Health check - -The worker service implements the [gRPC Health Check](https://grpc.io/docs/guides/health-checking/) standard, and includes the methods described in this [official proto file](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto). - -The proxy service uses this health check to determine if a worker is available to receive requests. If a worker is not available, it will be removed from the set of workers that the proxy can use to send requests. - -### Status check - -The worker service implements a custom status check that returns information about the worker's current state and supported proof type. The proxy service uses this status check to determine if a worker is available to receive requests and if it supports the required proof type. If a worker is not available or doesn't support the required proof type, it will be removed from the set of workers that the proxy can use to send requests. - -The status check returns: - -- Whether the worker is ready to process requests -- The type of proofs the worker supports (transaction, batch, or block proofs) -- The version of the worker - -### Proxy Status Endpoint - -The proxy service exposes a gRPC status endpoint that provides information about the current state of the proxy and its workers. This endpoint implements the `ProxyStatusApi` service defined in `proxy_status.proto`. - -#### gRPC Service Definition - -The status service provides the following method: - -- `Status(ProxyStatusRequest) -> ProxyStatusResponse`: Returns the current status of the proxy and all its workers - -#### Response Format - -The gRPC response includes the following information: - -- `version`: The version of the proxy -- `supported_proof_type`: The type of proof that the proxy supports (`TRANSACTION`, `BATCH`, or `BLOCK`) -- `workers`: A list of workers with their status information - -Each worker status includes: - -- `address`: The worker's network address -- `version`: The worker's version -- `status`: The worker's health status (`UNKNOWN`, `HEALTHY`, or `UNHEALTHY`) - -#### Example Usage - -You can query the status endpoint using a gRPC client. For example, using `grpcurl`: - -```bash -# Assuming the proxy is running on port 8084 -grpcurl -plaintext -import-path ./proto -proto proxy_status.proto \ - -d '{}' localhost:8084 proxy_status.ProxyStatusApi.Status -``` - -Example response: - -```json -{ - "version": "0.8.0", - "supported_proof_type": "TRANSACTION", - "workers": [ - { - "address": "0.0.0.0:50051", - "version": "0.8.0", - "status": "UNHEALTHY" - }, - { - "address": "0.0.0.0:50052", - "version": "0.8.0", - "status": "HEALTHY" - } - ] -} -``` - -The status endpoint is integrated into the main proxy service and uses the same port as the proxy. The status information is automatically updated during health checks, ensuring it reflects the current state of all workers. - -## Logging and Tracing - -The service uses the [`tracing`](https://docs.rs/tracing/latest/tracing/) crate for both logging and distributed tracing, providing structured, high-performance logs and trace data. - -By default, logs are written to `stdout` and the default logging level is `info`. This can be changed via the `RUST_LOG` environment variable. For example: - -``` -export RUST_LOG=debug -``` - -For tracing, we use OpenTelemetry protocol. By default, traces are exported to the endpoint specified by `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable. To consume and visualize these traces we can use Jaeger or any other OpenTelemetry compatible consumer. - -The simplest way to install Jaeger is by using a [Docker](https://www.docker.com/) container. To do so, run: - -```bash -docker run -d -p4317:4317 -p16686:16686 jaegertracing/all-in-one:latest -``` - -Then access the Jaeger UI at `http://localhost:16686/`. - -If Docker is not an option, Jaeger can also be set up directly on your machine or hosted in the cloud. See the [Jaeger documentation](https://www.jaegertracing.io/docs/) for alternative installation methods. - -## Metrics - -The proxy includes a service that can optionally expose metrics to be consumed by [Prometheus](https://prometheus.io/docs/introduction/overview/). This service is enabled by specifying a metrics port. - -### Enabling Prometheus Metrics - -To enable Prometheus metrics, simply specify a port on which to expose the metrics. This can be done via environment variables or command-line arguments. - -#### Using Environment Variables - -Set the following environment variable: - -```bash -export MRP_METRICS_PORT=6192 # Set to enable metrics on port 6192 -``` - -To disable metrics, simply don't set the MRP_METRICS_PORT environment variable. - -#### Using Command-Line Arguments - -Specify a metrics port using the `--metrics-port` flag when starting the proxy: - -```bash -miden-remote-prover start-proxy --metrics-port 6192 [worker1] [worker2] ... [workerN] -``` - -If you don't specify a metrics port, metrics will be disabled. - -When enabled, the Prometheus metrics will be available at `http://0.0.0.0:` (e.g., `http://0.0.0.0:6192`). - -The metrics architecture works by having the proxy expose metrics at an endpoint (`/metrics`) in a format Prometheus can read. Prometheus periodically scrapes this endpoint, adds timestamps to the metrics, and stores them in its time-series database. Then, we can use tools like Grafana to query Prometheus and visualize these metrics in configurable dashboards. - -The simplest way to install Prometheus and Grafana is by using Docker containers. To do so, run: - -```bash -docker run \ - -d \ - -p 9090:9090 \ - -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \ - prom/prometheus - -docker run -d -p 3000:3000 --name grafana grafana/grafana-enterprise:latest -``` - -In case that Docker is not an option, Prometheus and Grafana can also be set up directly on your machine or hosted in the cloud. See the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/getting_started/) and [Grafana documentation](https://grafana.com/docs/grafana/latest/setup-grafana/) for alternative installation methods. - -A prometheus configuration file is provided in this repository, you will need to modify the `scrape_configs` section to include the URL of the proxy service (e.g., `http://0.0.0.0:6192`). - -Then, to add the new Prometheus collector as a datasource for Grafana, you can [follow this tutorial](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/existing-datasource/). A Grafana dashboard under the name `proxy_grafana_dashboard.json` is provided, see this [link](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/import-dashboards/) to import it. Otherwise, you can [create your own dashboard](https://grafana.com/docs/grafana/latest/getting-started/build-first-dashboard/) using the metrics provided by the proxy and export it by following this [link](https://grafana.com/docs/grafana/latest/dashboards/share-dashboards-panels/#export-a-dashboard-as-json). - -## Features - -Description of this crate's feature: - -| Features | Description | -| ------------ | ------------------------------------------------------ | -| `concurrent` | Enables concurrent code to speed up runtime execution. | +A self-hosted alternative is [Jaeger](https://www.jaegertracing.io/). ## License diff --git a/bin/remote-prover/build.rs b/bin/remote-prover/build.rs index f9b2eaafb3..262ab49aff 100644 --- a/bin/remote-prover/build.rs +++ b/bin/remote-prover/build.rs @@ -12,7 +12,8 @@ const GENERATED_OUT_DIR: &str = "src/generated"; /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); + println!("cargo:rerun-if-env-changed=BUILD_PROTO"); if !BUILD_GENERATED_FILES_IN_SRC { return Ok(()); } diff --git a/bin/remote-prover/grafana_dashboard.json b/bin/remote-prover/grafana_dashboard.json deleted file mode 100644 index bc391feba7..0000000000 --- a/bin/remote-prover/grafana_dashboard.json +++ /dev/null @@ -1,1082 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 1, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 18, - "panels": [], - "title": "Requests", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "red", - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqpm" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Total requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Failed requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "red", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Accepted requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "green", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 9, - "x": 0, - "y": 1 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_count[1m]))", - "hide": false, - "instant": false, - "legendFormat": "Total requests", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_count[1m])) - sum(rate(rate_limited_requests[1m])) - sum(rate(queue_drop_count[1m]))", - "hide": false, - "instant": false, - "legendFormat": "Accepted requests", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "sum(rate(request_failure_count[1m]))", - "legendFormat": "Failed requests", - "range": true, - "refId": "A" - } - ], - "title": "Requests", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqpm" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Rate limited requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "orange", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Queue overflow requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "purple", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 9, - "x": 9, - "y": 1 - }, - "id": 16, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(rate_limited_requests[1m])", - "hide": false, - "instant": false, - "legendFormat": "Rate limited requests", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(queue_drop_count[1m])", - "hide": false, - "instant": false, - "legendFormat": "Queue overflow requests", - "range": true, - "refId": "C" - } - ], - "title": "Rejected requests", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-YlRd" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 5, - "x": 18, - "y": 1 - }, - "id": 17, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(request_retries[1m])", - "legendFormat": "Retry rate", - "range": true, - "refId": "A" - } - ], - "title": "Request retry rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 9 - }, - "id": 13, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "(1 - rate(request_failure_count[1m]) / rate(request_count[1m])) * 100", - "legendFormat": "Success rate over time", - "range": true, - "refId": "A" - } - ], - "title": "Success rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 8, - "y": 9 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(request_latency_sum[1m]) / rate(request_latency_count[1m])", - "legendFormat": "Average request latency", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(queue_latency_sum[1m]) / rate(queue_latency_count[1m])", - "hide": false, - "instant": false, - "legendFormat": "Average queue latency", - "range": true, - "refId": "B" - } - ], - "title": "Latency", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 19, - "panels": [], - "title": "Workers", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 0, - "y": 18 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "worker_count", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Total workers", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "worker_busy", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Busy workers", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Workers", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "red", - "mode": "fixed" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMax": 3, - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 7, - "y": 18 - }, - "id": 21, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "worker_unhealthy", - "legendFormat": "{{worker_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Unhealthy workers", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 7, - "x": 14, - "y": 18 - }, - "id": 12, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "editorMode": "code", - "expr": "rate(worker_request_count[1m])", - "legendFormat": "{{worker_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Requests per worker", - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 20, - "panels": [], - "title": "Queue", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 15 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 27 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.4.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "behiuc5qk89vkb" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "queue_size", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Queue size", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Queue size", - "type": "timeseries" - } - ], - "preload": false, - "refresh": "5s", - "schemaVersion": 40, - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "2025-03-31T19:02:51.110Z", - "to": "2025-03-31T19:04:03.015Z" - }, - "timepicker": {}, - "timezone": "browser", - "title": "tx_prover", - "uid": "be7bobzl5fr40f", - "version": 6, - "weekStart": "" -} diff --git a/bin/remote-prover/prometheus.yml b/bin/remote-prover/prometheus.yml deleted file mode 100644 index 817e92f244..0000000000 --- a/bin/remote-prover/prometheus.yml +++ /dev/null @@ -1,16 +0,0 @@ -global: - scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. - evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. - # scrape_timeout is set to the global default (10s). - -# A scrape configuration containing exactly one endpoint to scrape: -scrape_configs: - # The job name is a label that is used to group targets in the Prometheus UI. - # It can be any string. - - job_name: "remote_prover" - # Here you need to specify the address of the Prometheus service endpoint in the proxy - # We use the default port for Prometheus, but it need to be changed if you use a different host - # or port. In case of using Prometheus in a docker container, you can use the - # `host.docker.internal` address to access the host machine. - static_configs: - - targets: ["127.0.0.1:6192"] diff --git a/bin/remote-prover/src/api/mod.rs b/bin/remote-prover/src/api/mod.rs deleted file mode 100644 index 4aee8807b4..0000000000 --- a/bin/remote-prover/src/api/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use tokio::net::TcpListener; - -use crate::generated::api_server::ApiServer; -use crate::generated::worker_status_api_server::WorkerStatusApiServer; - -pub(crate) mod prover; -mod status; - -pub use prover::{ProofType, ProverRpcApi}; - -pub struct RpcListener { - pub api_service: ApiServer, - pub status_service: WorkerStatusApiServer, - pub listener: TcpListener, -} - -impl RpcListener { - pub fn new(listener: TcpListener, proof_type: ProofType) -> Self { - let prover_rpc_api = ProverRpcApi::new(proof_type); - let status_rpc_api = status::StatusRpcApi::new(proof_type); - let api_service = ApiServer::new(prover_rpc_api); - let status_service = WorkerStatusApiServer::new(status_rpc_api); - Self { api_service, status_service, listener } - } -} diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs deleted file mode 100644 index 24a70f7312..0000000000 --- a/bin/remote-prover/src/api/prover.rs +++ /dev/null @@ -1,355 +0,0 @@ -use miden_block_prover::LocalBlockProver; -use miden_node_proto::BlockProofRequest; -use miden_node_utils::ErrorReport; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; -use miden_protocol::batch::ProposedBatch; -use miden_protocol::transaction::TransactionInputs; -use miden_protocol::utils::Serializable; -use miden_tx::LocalTransactionProver; -use miden_tx_batch_prover::LocalBatchProver; -use serde::{Deserialize, Serialize}; -use tokio::sync::Mutex; -use tonic::{Request, Response, Status}; -use tracing::{info, instrument}; - -use crate::COMPONENT; -use crate::generated::api_server::Api as ProverApi; -use crate::generated::{self as proto}; - -/// Specifies the type of proof supported by the remote prover. -#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] -pub enum ProofType { - #[default] - Transaction, - Batch, - Block, -} - -impl std::fmt::Display for ProofType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ProofType::Transaction => write!(f, "transaction"), - ProofType::Batch => write!(f, "batch"), - ProofType::Block => write!(f, "block"), - } - } -} - -impl std::str::FromStr for ProofType { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "transaction" => Ok(ProofType::Transaction), - "batch" => Ok(ProofType::Batch), - "block" => Ok(ProofType::Block), - _ => Err(format!("Invalid proof type: {s}")), - } - } -} - -/// The prover for the remote prover. -/// -/// This enum is used to store the prover for the remote prover. -/// Only one prover is enabled at a time. -enum Prover { - Transaction(Mutex), - Batch(Mutex), - Block(Mutex), -} - -impl Prover { - fn new(proof_type: ProofType) -> Self { - match proof_type { - ProofType::Transaction => { - info!(target: COMPONENT, proof_type = ?proof_type, "Transaction prover initialized"); - Self::Transaction(Mutex::new(LocalTransactionProver::default())) - }, - ProofType::Batch => { - info!(target: COMPONENT, proof_type = ?proof_type, security_level = MIN_PROOF_SECURITY_LEVEL, "Batch prover initialized"); - Self::Batch(Mutex::new(LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL))) - }, - ProofType::Block => { - info!(target: COMPONENT, proof_type = ?proof_type, security_level = MIN_PROOF_SECURITY_LEVEL, "Block prover initialized"); - Self::Block(Mutex::new(LocalBlockProver::new(MIN_PROOF_SECURITY_LEVEL))) - }, - } - } -} - -pub struct ProverRpcApi { - prover: Prover, -} - -impl ProverRpcApi { - pub fn new(proof_type: ProofType) -> Self { - let prover = Prover::new(proof_type); - - Self { prover } - } - - #[allow(clippy::result_large_err)] - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_tx", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, transaction_id = tracing::field::Empty), - err - )] - pub async fn prove_tx( - &self, - tx_inputs: TransactionInputs, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Transaction(prover) = &self.prover else { - return Err(Status::unimplemented("Transaction prover is not enabled")); - }; - - let locked_prover = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))?; - - // Add a small delay to simulate longer proving time for testing - #[cfg(test)] - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - let proof = locked_prover.prove(tx_inputs).map_err(internal_error)?; - - // Record the transaction_id in the current tracing span - let transaction_id = proof.id(); - tracing::Span::current().record("transaction_id", tracing::field::display(&transaction_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proof.to_bytes() })) - } - - #[allow(clippy::result_large_err)] - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_batch", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, batch_id = tracing::field::Empty), - err - )] - pub fn prove_batch( - &self, - proposed_batch: ProposedBatch, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Batch(prover) = &self.prover else { - return Err(Status::unimplemented("Batch prover is not enabled")); - }; - - let proven_batch = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(proposed_batch) - .map_err(internal_error)?; - - // Record the batch_id in the current tracing span - let batch_id = proven_batch.id(); - tracing::Span::current().record("batch_id", tracing::field::display(&batch_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proven_batch.to_bytes() })) - } - - #[allow(clippy::result_large_err)] - #[instrument( - target = COMPONENT, - name = "remote_prover.prove_block", - skip_all, - ret(level = "debug"), - fields(request_id = %request_id, block_id = tracing::field::Empty), - err - )] - pub fn prove_block( - &self, - proof_request: BlockProofRequest, - request_id: &str, - ) -> Result, tonic::Status> { - let Prover::Block(prover) = &self.prover else { - return Err(Status::unimplemented("Block prover is not enabled")); - }; - let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; - - // Record the commitment of the block in the current tracing span. - let block_id = block_header.commitment(); - tracing::Span::current().record("block_id", tracing::field::display(&block_id)); - - let block_proof = prover - .try_lock() - .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(tx_batches, block_header, block_inputs) - .map_err(internal_error)?; - - Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) - } -} - -#[async_trait::async_trait] -impl ProverApi for ProverRpcApi { - #[instrument( - target = COMPONENT, - name = "remote_prover.prove", - skip_all, - ret(level = "debug"), - fields(request_id = tracing::field::Empty), - err - )] - async fn prove( - &self, - request: Request, - ) -> Result, tonic::Status> { - // Extract X-Request-ID header for trace correlation - let request_id = request - .metadata() - .get("x-request-id") - .and_then(|v| v.to_str().ok()) - .unwrap_or("unknown") - .to_string(); // Convert to owned string to avoid lifetime issues - - // Record the request_id in the current tracing span - tracing::Span::current().record("request_id", &request_id); - - // Extract the proof type and payload - let proof_request = request.into_inner(); - let proof_type = proof_request.proof_type(); - - match proof_type { - proto::remote_prover::ProofType::Transaction => { - let tx_inputs = proof_request.try_into().map_err(invalid_argument)?; - self.prove_tx(tx_inputs, &request_id).await - }, - proto::remote_prover::ProofType::Batch => { - let proposed_batch = proof_request.try_into().map_err(invalid_argument)?; - self.prove_batch(proposed_batch, &request_id) - }, - proto::remote_prover::ProofType::Block => { - let proof_request = proof_request.try_into().map_err(invalid_argument)?; - self.prove_block(proof_request, &request_id) - }, - } - } -} - -// UTILITIES -// ================================================================================================ - -fn internal_error(err: E) -> Status { - Status::internal(err.as_report()) -} - -fn invalid_argument(err: E) -> Status { - Status::invalid_argument(err.as_report()) -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod test { - use std::time::Duration; - - use miden_node_utils::cors::cors_for_grpc_web_layer; - use miden_protocol::asset::{Asset, FungibleAsset}; - use miden_protocol::note::NoteType; - use miden_protocol::testing::account_id::{ - ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, - ACCOUNT_ID_SENDER, - }; - use miden_protocol::transaction::ProvenTransaction; - use miden_testing::{Auth, MockChainBuilder}; - use miden_tx::utils::Serializable; - use tokio::net::TcpListener; - use tonic::Request; - use tonic_web::GrpcWebLayer; - - use crate::api::ProverRpcApi; - use crate::generated::api_client::ApiClient; - use crate::generated::api_server::ApiServer; - use crate::generated::{self as proto}; - - #[tokio::test(flavor = "multi_thread", worker_threads = 3)] - async fn test_prove_transaction() { - // Start the server in the background - let listener = TcpListener::bind("127.0.0.1:50052").await.unwrap(); - - let proof_type = proto::remote_prover::ProofType::Transaction; - - let api_service = ApiServer::new(ProverRpcApi::new(proof_type.into())); - - // Spawn the server as a background task - tokio::spawn(async move { - tonic::transport::Server::builder() - .accept_http1(true) - .layer(cors_for_grpc_web_layer()) - .layer(GrpcWebLayer::new()) - .add_service(api_service) - .serve_with_incoming(tokio_stream::wrappers::TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - - // Give the server some time to start - tokio::time::sleep(Duration::from_secs(1)).await; - - // Set up a gRPC client to send the request - let mut client = ApiClient::connect("http://127.0.0.1:50052").await.unwrap(); - let mut client_2 = ApiClient::connect("http://127.0.0.1:50052").await.unwrap(); - - // Create a mock transaction to send to the server - let mut mock_chain_builder = MockChainBuilder::new(); - let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); - - let fungible_asset_1: Asset = - FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) - .unwrap() - .into(); - let note_1 = mock_chain_builder - .add_p2id_note( - ACCOUNT_ID_SENDER.try_into().unwrap(), - account.id(), - &[fungible_asset_1], - NoteType::Private, - ) - .unwrap(); - - let mock_chain = mock_chain_builder.build().unwrap(); - - let tx_context = mock_chain - .build_tx_context(account.id(), &[note_1.id()], &[]) - .unwrap() - .build() - .unwrap(); - - let executed_transaction = Box::pin(tx_context.execute()).await.unwrap(); - let tx_inputs = executed_transaction.tx_inputs(); - - let request_1 = Request::new(proto::remote_prover::ProofRequest { - proof_type: proto::remote_prover::ProofType::Transaction.into(), - payload: tx_inputs.to_bytes(), - }); - - let request_2 = Request::new(proto::remote_prover::ProofRequest { - proof_type: proto::remote_prover::ProofType::Transaction.into(), - payload: tx_inputs.to_bytes(), - }); - - // Send both requests concurrently - let (response_1, response_2) = - tokio::join!(client.prove(request_1), client_2.prove(request_2)); - - // Check the success response - assert!(response_1.is_ok() || response_2.is_ok()); - - // Check the failure response - assert!(response_1.is_err() || response_2.is_err()); - - let response_success = response_1.or(response_2).unwrap(); - - // Cast into a ProvenTransaction - let _proven_transaction: ProvenTransaction = - response_success.into_inner().try_into().expect("Failed to convert response"); - } -} diff --git a/bin/remote-prover/src/commands/mod.rs b/bin/remote-prover/src/commands/mod.rs deleted file mode 100644 index 13b21d8a50..0000000000 --- a/bin/remote-prover/src/commands/mod.rs +++ /dev/null @@ -1,125 +0,0 @@ -use std::time::Duration; - -use clap::Parser; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use proxy::StartProxy; -use tracing::instrument; -use update_workers::{AddWorkers, RemoveWorkers, UpdateWorkers}; -use worker::StartWorker; - -pub mod proxy; -pub mod update_workers; -pub mod worker; - -pub(crate) const PROXY_HOST: &str = "0.0.0.0"; - -#[derive(Debug, Parser)] -pub(crate) struct ProxyConfig { - /// Interval at which the system polls for available workers to assign new - /// tasks. - #[arg(long, default_value = "20ms", env = "MRP_AVAILABLE_WORKERS_POLLING_INTERVAL", value_parser = humantime::parse_duration)] - pub(crate) available_workers_polling_interval: Duration, - /// Maximum time to establish a connection. - #[arg(long, default_value = "10s", env = "MRP_CONNECTION_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) connection_timeout: Duration, - /// Health check interval. - #[arg(long, default_value = "10s", env = "MRP_HEALTH_CHECK_INTERVAL", value_parser = humantime::parse_duration)] - pub(crate) health_check_interval: Duration, - /// Maximum number of items in the queue. - #[arg(long, default_value = "10", env = "MRP_MAX_QUEUE_ITEMS")] - pub(crate) max_queue_items: usize, - /// Maximum number of requests per second per IP address. - #[arg(long, default_value = "5", env = "MRP_MAX_REQ_PER_SEC")] - pub(crate) max_req_per_sec: isize, - /// Maximum number of retries per request. - #[arg(long, default_value = "1", env = "MRP_MAX_RETRIES_PER_REQUEST")] - pub(crate) max_retries_per_request: usize, - /// Metrics configurations. - #[command(flatten)] - pub(crate) metrics_config: MetricsConfig, - /// Port of the proxy. - #[arg(long, default_value = "8082", env = "MRP_PORT")] - pub(crate) port: u16, - /// Maximum time allowed for a request to complete. Once exceeded, the request is - /// aborted. - #[arg(long, default_value = "100s", env = "MRP_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) timeout: Duration, - /// Control port. - /// - /// Port used to add and remove workers from the proxy. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - pub(crate) control_port: u16, - /// Supported proof type. - /// - /// The type of proof the proxy will handle. Only workers that support the same proof type - /// will be able to connect to the proxy. - #[arg(long, default_value = "transaction", env = "MRP_PROOF_TYPE")] - pub(crate) proof_type: ProofType, - /// Grace period before starting the final step of the graceful shutdown after - /// signaling shutdown. - #[arg(long, default_value = "20s", env = "MRP_GRACE_PERIOD", value_parser = humantime::parse_duration)] - pub(crate) grace_period: std::time::Duration, - /// Timeout of the final step for the graceful shutdown. - #[arg(long, default_value = "5s", env = "MRP_GRACEFUL_SHUTDOWN_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) graceful_shutdown_timeout: std::time::Duration, -} - -#[derive(Debug, Clone, clap::Parser)] -pub struct MetricsConfig { - /// Port for Prometheus-compatible metrics - /// If specified, metrics will be enabled on this port. If not specified, metrics will be - /// disabled. - #[arg(long, env = "MRP_METRICS_PORT")] - pub metrics_port: Option, -} - -/// Root CLI struct -#[derive(Parser, Debug)] -#[command( - name = "miden-remote-prover", - about = "A stand-alone service for proving Miden transactions.", - version, - rename_all = "kebab-case" -)] -pub struct Cli { - #[command(subcommand)] - action: Command, -} - -/// CLI actions -#[derive(Debug, Parser)] -pub enum Command { - /// Starts the workers with the configuration defined in the command. - StartWorker(StartWorker), - /// Starts the proxy. - StartProxy(StartProxy), - /// Adds workers to the proxy. - /// - /// This command will make a request to the proxy to add the specified workers. - AddWorkers(AddWorkers), - /// Removes workers from the proxy. - /// - /// This command will make a request to the proxy to remove the specified workers. - RemoveWorkers(RemoveWorkers), -} - -/// CLI entry point -impl Cli { - #[instrument(target = COMPONENT, name = "cli.execute", skip_all, ret(level = "info"), err)] - pub async fn execute(&self) -> anyhow::Result<()> { - match &self.action { - // For the `StartWorker` command, we need to create a new runtime and run the worker - Command::StartWorker(worker_init) => worker_init.execute().await, - Command::StartProxy(proxy_init) => proxy_init.execute().await, - Command::AddWorkers(update_workers) => { - let update_workers: UpdateWorkers = update_workers.clone().into(); - update_workers.execute().await - }, - Command::RemoveWorkers(update_workers) => { - let update_workers: UpdateWorkers = update_workers.clone().into(); - update_workers.execute().await - }, - } - } -} diff --git a/bin/remote-prover/src/commands/proxy.rs b/bin/remote-prover/src/commands/proxy.rs deleted file mode 100644 index e9266c948b..0000000000 --- a/bin/remote-prover/src/commands/proxy.rs +++ /dev/null @@ -1,129 +0,0 @@ -use clap::Parser; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::error::RemoteProverError; -use pingora::apps::HttpServerOptions; -use pingora::prelude::{Opt, background_service}; -use pingora::server::Server; -use pingora::server::configuration::ServerConf; -use pingora::services::listening::Service; -use pingora_proxy::http_proxy_service; -use tracing::{info, warn}; - -use super::ProxyConfig; -use crate::commands::PROXY_HOST; -use crate::proxy::update_workers::LoadBalancerUpdateService; -use crate::proxy::{LoadBalancer, LoadBalancerState}; -use crate::utils::check_port_availability; - -/// Starts the proxy. -/// -/// Example: `miden-remote-prover start-proxy --workers 0.0.0.0:8080,127.0.0.1:9090` -#[derive(Debug, Parser)] -pub struct StartProxy { - /// List of workers as host:port strings. - /// - /// Example: `127.0.0.1:8080,192.168.1.1:9090` - #[arg(long, env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Proxy configurations. - #[command(flatten)] - proxy_config: ProxyConfig, -} - -impl StartProxy { - /// Starts the proxy using the configuration defined in the command. - /// - /// This method will start a proxy with each worker passed as command argument as a backend, - /// using the configurations passed as options for the commands or the equivalent environmental - /// variables. - /// - /// # Errors - /// Returns an error in the following cases: - /// - The backend cannot be created. - /// - The Pingora configuration fails. - /// - The server cannot be started. - #[tracing::instrument(target = COMPONENT, name = "proxy.execute")] - pub async fn execute(&self) -> anyhow::Result<()> { - // Check if all required ports are available - check_port_availability(self.proxy_config.port, "Proxy")?; - check_port_availability(self.proxy_config.control_port, "Control")?; - - // First, check if the metrics port is specified (metrics enabled) - if let Some(metrics_port) = self.proxy_config.metrics_config.metrics_port { - check_port_availability(metrics_port, "Metrics")?; - } - - let mut conf = ServerConf::new().ok_or(RemoteProverError::PingoraConfigFailed( - "Failed to create server conf".to_string(), - ))?; - conf.grace_period_seconds = Some(self.proxy_config.grace_period.as_secs()); - conf.graceful_shutdown_timeout_seconds = - Some(self.proxy_config.graceful_shutdown_timeout.as_secs()); - - let mut server = Server::new_with_opt_and_conf(Some(Opt::default()), conf); - - server.bootstrap(); - - if self.workers.is_empty() { - warn!(target: COMPONENT, "Starting proxy without any workers"); - } else { - info!(target: COMPONENT, - worker_count = %self.workers.len(), - workers = ?self.workers, - "Proxy starting with workers" - ); - } - - let worker_lb = LoadBalancerState::new(self.workers.clone(), &self.proxy_config).await?; - - let health_check_service = background_service("health_check", worker_lb); - - let worker_lb = health_check_service.task(); - - let updater_service = LoadBalancerUpdateService::new(worker_lb.clone()); - - let mut update_workers_service = - Service::new("update_workers".to_string(), updater_service); - update_workers_service - .add_tcp(format!("{}:{}", PROXY_HOST, self.proxy_config.control_port).as_str()); - - // Set up the load balancer - let mut lb = http_proxy_service(&server.configuration, LoadBalancer(worker_lb.clone())); - - lb.add_tcp(format!("{}:{}", PROXY_HOST, self.proxy_config.port).as_str()); - info!(target: COMPONENT, - endpoint = %format!("{}:{}", PROXY_HOST, self.proxy_config.port), - "Proxy service listening" - ); - let logic = lb - .app_logic_mut() - .ok_or(RemoteProverError::PingoraConfigFailed("app logic not found".to_string()))?; - let mut http_server_options = HttpServerOptions::default(); - - // Enable HTTP/2 for plaintext - http_server_options.h2c = true; - logic.server_options = Some(http_server_options); - - // Enable Prometheus metrics if metrics_port is specified - if let Some(metrics_port) = self.proxy_config.metrics_config.metrics_port { - let metrics_addr = format!("{PROXY_HOST}:{metrics_port}"); - info!(target: COMPONENT, - endpoint = %metrics_addr, - "Metrics service initialized" - ); - let mut prometheus_service = - pingora::services::listening::Service::prometheus_http_service(); - prometheus_service.add_tcp(&metrics_addr); - server.add_service(prometheus_service); - } else { - info!(target: COMPONENT, "Metrics service disabled"); - } - - server.add_service(health_check_service); - server.add_service(update_workers_service); - server.add_service(lb); - tokio::task::spawn_blocking(|| server.run_forever()).await?; - - Ok(()) - } -} diff --git a/bin/remote-prover/src/commands/update_workers.rs b/bin/remote-prover/src/commands/update_workers.rs deleted file mode 100644 index c661a39dde..0000000000 --- a/bin/remote-prover/src/commands/update_workers.rs +++ /dev/null @@ -1,126 +0,0 @@ -use anyhow::Context; -use clap::Parser; -use reqwest::Client; -use serde::{Deserialize, Serialize}; - -use crate::commands::PROXY_HOST; - -// ADD WORKERS -// ================================================================================================ - -/// Add workers to the proxy -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct AddWorkers { - /// Workers to be added to the proxy. - /// - /// The workers are passed as host:port strings. - #[arg(value_name = "WORKERS", env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Port of the proxy endpoint to update workers. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - control_port: u16, -} - -// REMOVE WORKERS -// ================================================================================================ - -/// Remove workers from the proxy -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct RemoveWorkers { - /// Workers to be removed from the proxy. - /// - /// The workers are passed as host:port strings. - #[arg(value_name = "WORKERS", env = "MRP_PROXY_WORKERS_LIST", value_delimiter = ',')] - workers: Vec, - /// Port of the proxy endpoint to update workers. - #[arg(long, default_value = "8083", env = "MRP_CONTROL_PORT")] - control_port: u16, -} - -// UPDATE WORKERS -// ================================================================================================ - -/// Action to perform on the workers -#[derive(clap::ValueEnum, Clone, Debug, Serialize, Deserialize)] -pub enum Action { - Add, - Remove, -} - -/// Update workers in the proxy performing the specified [`Action`] -#[derive(Debug, Parser, Clone, Serialize, Deserialize)] -pub struct UpdateWorkers { - pub action: Action, - pub workers: Vec, - pub control_port: u16, -} - -impl UpdateWorkers { - /// Makes a requests to the update workers endpoint to update the workers. - /// - /// It works by sending a GET request to the proxy with the query parameters. The query - /// parameters are serialized from the struct fields. - /// - /// It uses the URL defined in the env vars or passed as parameter for the proxy. - /// - /// The request will return the new number of workers in the X-Worker-Count header. - /// - /// # Errors - /// - If the query parameters cannot be serialized. - /// - If the request fails. - /// - If the status code is not successful. - /// - If the X-Worker-Count header is missing. - pub async fn execute(&self) -> anyhow::Result<()> { - let query_params = serde_qs::to_string(&self)?; - - println!("Action: {:?}, with workers: {:?}", self.action, self.workers); - - // Create the full URL with fixed host "0.0.0.0" - let url = format!("http://{}:{}?{}", PROXY_HOST, self.control_port, query_params); - - // Create an HTTP/2 client - let client = Client::builder().http2_prior_knowledge().build()?; - - // Make the request - let response = client.get(url).send().await?; - - // Check status code - if !response.status().is_success() { - anyhow::bail!("Request failed with status code: {}", response.status()); - } - - // Read the X-Worker-Count header - let workers_count = response - .headers() - .get("X-Worker-Count") - .context("Missing X-Worker-Count header")? - .to_str()?; - - println!("New number of workers: {workers_count}"); - - Ok(()) - } -} - -// CONVERSIONS -// ================================================================================================ - -impl From for UpdateWorkers { - fn from(remove_workers: RemoveWorkers) -> Self { - UpdateWorkers { - action: Action::Remove, - workers: remove_workers.workers, - control_port: remove_workers.control_port, - } - } -} - -impl From for UpdateWorkers { - fn from(add_workers: AddWorkers) -> Self { - UpdateWorkers { - action: Action::Add, - workers: add_workers.workers, - control_port: add_workers.control_port, - } - } -} diff --git a/bin/remote-prover/src/commands/worker.rs b/bin/remote-prover/src/commands/worker.rs deleted file mode 100644 index 1417e5baa4..0000000000 --- a/bin/remote-prover/src/commands/worker.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::time::Duration; - -use clap::Parser; -use miden_node_utils::cors::cors_for_grpc_web_layer; -use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; -use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::{ProofType, RpcListener}; -use miden_remote_prover::generated::api_server::ApiServer; -use tokio::net::TcpListener; -use tokio_stream::wrappers::TcpListenerStream; -use tonic_health::server::health_reporter; -use tonic_web::GrpcWebLayer; -use tower_http::trace::TraceLayer; -use tracing::{info, instrument}; - -/// Starts a worker. -#[derive(Debug, Parser)] -pub struct StartWorker { - /// Use localhost (127.0.0.1) instead of 0.0.0.0 - #[arg(long, env = "MRP_WORKER_LOCALHOST")] - localhost: bool, - /// The port of the worker - #[arg(long, default_value = "50051", env = "MRP_WORKER_PORT")] - port: u16, - /// The type of proof that the worker will be handling - #[arg(long, env = "MRP_WORKER_PROOF_TYPE")] - proof_type: ProofType, - /// Maximum time allowed for a request to complete. Once exceeded, the request is - /// aborted. - #[arg(long, default_value = "60s", env = "MRP_TIMEOUT", value_parser = humantime::parse_duration)] - pub(crate) timeout: Duration, -} - -impl StartWorker { - /// Starts a worker. - /// - /// This method receives the port from the CLI and starts a worker on that port. - /// The host will be 127.0.0.1 if --localhost is specified, otherwise 0.0.0.0. - /// In case that the port is not provided, it will default to `50051`. - /// - /// The worker includes a health reporter that will mark the service as serving, following the - /// [gRPC health checking protocol]( - /// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto). - #[instrument(target = COMPONENT, name = "worker.execute")] - pub async fn execute(&self) -> anyhow::Result<()> { - let host = if self.localhost { "127.0.0.1" } else { "0.0.0.0" }; - let worker_addr = format!("{}:{}", host, self.port); - let rpc = RpcListener::new(TcpListener::bind(&worker_addr).await?, self.proof_type); - - let server_addr = rpc.listener.local_addr()?; - info!(target: COMPONENT, - endpoint = %server_addr, - proof_type = ?self.proof_type, - host = %host, - port = %self.port, - "Worker server initialized and listening" - ); - - // Create a health reporter - let (health_reporter, health_service) = health_reporter(); - - // Mark the service as serving - health_reporter.set_serving::>().await; - - tonic::transport::Server::builder() - .accept_http1(true) - .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) - .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .layer(cors_for_grpc_web_layer()) - .layer(GrpcWebLayer::new()) - .timeout(self.timeout) - .add_service(rpc.api_service) - .add_service(rpc.status_service) - .add_service(health_service) - .serve_with_incoming(TcpListenerStream::new(rpc.listener)) - .await?; - - Ok(()) - } -} diff --git a/bin/remote-prover/src/error.rs b/bin/remote-prover/src/error.rs deleted file mode 100644 index 16638c04ce..0000000000 --- a/bin/remote-prover/src/error.rs +++ /dev/null @@ -1,27 +0,0 @@ -use axum::http::uri::InvalidUri; -use thiserror::Error; - -// TX PROVER SERVICE ERROR -// ================================================================================================ - -#[derive(Debug, Error)] -pub enum RemoteProverError { - #[error("invalid uri {1}")] - InvalidURI(#[source] InvalidUri, String), - #[error("failed to connect to worker {1}")] - ConnectionFailed(#[source] tonic::transport::Error, String), - #[error("failed to create backend for worker")] - BackendCreationFailed(#[source] Box), - #[error("failed to setup pingora: {0}")] - PingoraConfigFailed(String), - #[error("failed to parse int: {0}")] - ParseError(#[from] std::num::ParseIntError), - #[error("port {1} is already in use: {0}")] - PortAlreadyInUse(#[source] std::io::Error, u16), -} - -impl From for String { - fn from(err: RemoteProverError) -> Self { - err.to_string() - } -} diff --git a/bin/remote-prover/src/generated/conversions.rs b/bin/remote-prover/src/generated/conversions.rs deleted file mode 100644 index e1bdc64069..0000000000 --- a/bin/remote-prover/src/generated/conversions.rs +++ /dev/null @@ -1,90 +0,0 @@ -// CONVERSIONS -// ================================================================================================ - -use miden_node_proto::BlockProofRequest; -use miden_protocol::batch::ProposedBatch; -use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; -use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; - -use crate::api::ProofType; -use crate::generated as proto; - -impl From for proto::Proof { - fn from(value: ProvenTransaction) -> Self { - proto::Proof { payload: value.to_bytes() } - } -} - -impl TryFrom for ProvenTransaction { - type Error = DeserializationError; - - fn try_from(response: proto::Proof) -> Result { - ProvenTransaction::read_from_bytes(&response.payload) - } -} - -impl TryFrom for TransactionInputs { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - TransactionInputs::read_from_bytes(&request.payload) - } -} - -impl TryFrom for ProposedBatch { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - ProposedBatch::read_from_bytes(&request.payload) - } -} - -impl TryFrom for BlockProofRequest { - type Error = DeserializationError; - - fn try_from(request: proto::ProofRequest) -> Result { - BlockProofRequest::read_from_bytes(&request.payload) - } -} - -impl From for proto::ProofType { - fn from(value: ProofType) -> Self { - match value { - ProofType::Transaction => proto::ProofType::Transaction, - ProofType::Batch => proto::ProofType::Batch, - ProofType::Block => proto::ProofType::Block, - } - } -} - -impl From for ProofType { - fn from(value: proto::ProofType) -> Self { - match value { - proto::ProofType::Transaction => ProofType::Transaction, - proto::ProofType::Batch => ProofType::Batch, - proto::ProofType::Block => ProofType::Block, - } - } -} - -impl TryFrom for ProofType { - type Error = String; - fn try_from(value: i32) -> Result { - match value { - 0 => Ok(ProofType::Transaction), - 1 => Ok(ProofType::Batch), - 2 => Ok(ProofType::Block), - _ => Err(format!("unknown ProverType value: {value}")), - } - } -} - -impl From for i32 { - fn from(value: ProofType) -> Self { - match value { - ProofType::Transaction => 0, - ProofType::Batch => 1, - ProofType::Block => 2, - } - } -} diff --git a/bin/remote-prover/src/generated/mod.rs b/bin/remote-prover/src/generated/mod.rs index eb7d89309e..f2af602746 100644 --- a/bin/remote-prover/src/generated/mod.rs +++ b/bin/remote-prover/src/generated/mod.rs @@ -1,7 +1,6 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[rustfmt::skip] -pub mod remote_prover; -mod conversions; - +mod remote_prover; pub use remote_prover::*; diff --git a/bin/remote-prover/src/lib.rs b/bin/remote-prover/src/lib.rs deleted file mode 100644 index 0388ae685e..0000000000 --- a/bin/remote-prover/src/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod api; -pub mod error; -pub mod generated; - -/// Component identifier for structured logging and tracing -pub const COMPONENT: &str = "miden-remote-prover"; diff --git a/bin/remote-prover/src/main.rs b/bin/remote-prover/src/main.rs index d4fc42f6da..e445d80f14 100644 --- a/bin/remote-prover/src/main.rs +++ b/bin/remote-prover/src/main.rs @@ -1,22 +1,20 @@ +use anyhow::Context; use clap::Parser; use miden_node_utils::logging::{OpenTelemetry, setup_tracing}; -use miden_remote_prover::COMPONENT; use tracing::info; -use crate::commands::Cli; +mod generated; +mod server; -pub(crate) mod commands; -pub(crate) mod proxy; -pub(crate) mod utils; +const COMPONENT: &str = "miden-prover"; #[tokio::main] async fn main() -> anyhow::Result<()> { let _otel_guard = setup_tracing(OpenTelemetry::Enabled)?; info!(target: COMPONENT, "Tracing initialized"); - // read command-line args - let cli = Cli::parse(); + let (handle, _port) = + server::Server::parse().spawn().await.context("failed to spawn server")?; - // execute cli action - cli.execute().await + handle.await.context("proof server panicked").flatten() } diff --git a/bin/remote-prover/src/proxy/health_check.rs b/bin/remote-prover/src/proxy/health_check.rs deleted file mode 100644 index b583c09827..0000000000 --- a/bin/remote-prover/src/proxy/health_check.rs +++ /dev/null @@ -1,70 +0,0 @@ -use miden_remote_prover::COMPONENT; -use pingora::prelude::sleep; -use pingora::server::ShutdownWatch; -use pingora::services::background::BackgroundService; -use tonic::async_trait; -use tracing::{debug_span, error}; - -use super::LoadBalancerState; - -/// Implement the [`BackgroundService`] trait for the [`LoadBalancerState`]. -/// -/// A [`BackgroundService`] can be run as part of a Pingora application to add supporting logic that -/// exists outside of the request/response lifecycle. -/// -/// We use this implementation to periodically check the health of the workers and update the list -/// of available workers. -#[async_trait] -impl BackgroundService for LoadBalancerState { - /// Starts the health check background service. - /// - /// This function is called when the Pingora server tries to start all the services. The - /// background service can return at anytime or wait for the `shutdown` signal. - /// - /// The health check background service will periodically check the health of the workers - /// using the gRPC status endpoint. If a worker is not healthy, it will be removed from - /// the list of available workers. - /// - /// # Errors - /// - If the worker has an invalid URI. - async fn start(&self, shutdown: ShutdownWatch) { - Box::pin(async move { - loop { - // Check if the shutdown signal has been received - { - if *shutdown.borrow() { - break; - } - } - - // Create a new spawn to perform the health check - let span = debug_span!(target: COMPONENT, "proxy.health_check"); - let _guard = span.enter(); - { - let mut workers = self.workers.write().await; - - for worker in workers.iter_mut() { - let status_result = worker.check_status(self.supported_proof_type).await; - - if let Err(ref reason) = status_result { - error!( - err = %reason, - worker.name = worker.name(), - "Worker failed health check" - ); - } - - worker.update_status(status_result); - } - } - - // Update the status cache with current worker status - self.update_status_cache().await; - - // Sleep for the defined interval before the next health check - sleep(self.health_check_interval).await; - } - }) - .await; - } -} diff --git a/bin/remote-prover/src/proxy/metrics.rs b/bin/remote-prover/src/proxy/metrics.rs deleted file mode 100644 index 9b5c579d9c..0000000000 --- a/bin/remote-prover/src/proxy/metrics.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::sync::LazyLock; - -use prometheus::{ - Histogram, - IntCounter, - IntCounterVec, - IntGauge, - register_histogram, - register_int_counter, - register_int_counter_vec, - register_int_gauge, -}; - -// SAFETY: The `unwrap` calls here are safe because: -// 1. The metrics being registered (gauges, counters, histograms) use hardcoded names and -// descriptions, which are guaranteed not to conflict within the application. -// 2. Registration errors occur only if there is a naming conflict, which is not possible in this -// context due to controlled metric definitions. -// 3. Any changes to metric names or types should be carefully reviewed to avoid conflicts. - -// QUEUE METRICS -// ================================================================================================ - -pub static QUEUE_SIZE: LazyLock = - LazyLock::new(|| register_int_gauge!("queue_size", "Number of requests in the queue").unwrap()); -pub static QUEUE_LATENCY: LazyLock = LazyLock::new(|| { - register_histogram!( - "queue_latency", - "Time (in seconds) requests spend in the queue", - vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0] - ) - .unwrap() -}); -pub static QUEUE_DROP_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("queue_drop_count", "Number of requests dropped due to a full queue") - .unwrap() -}); - -// WORKER METRICS -// ================================================================================================ - -pub static WORKER_COUNT: LazyLock = - LazyLock::new(|| register_int_gauge!("worker_count", "Total number of workers").unwrap()); -pub static WORKER_UNHEALTHY: LazyLock = LazyLock::new(|| { - register_int_counter_vec!( - "worker_unhealthy", - "Number of times that each worker was registered as unhealthy", - &["worker_id"] - ) - .unwrap() -}); -pub static WORKER_BUSY: LazyLock = - LazyLock::new(|| register_int_gauge!("worker_busy", "Number of busy workers").unwrap()); -pub static WORKER_REQUEST_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter_vec!( - "worker_request_count", - "Number of requests processed by each worker", - &["worker_id"] - ) - .unwrap() -}); - -// REQUEST METRICS -// ================================================================================================ - -pub static REQUEST_FAILURE_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("request_failure_count", "Number of failed requests").unwrap() -}); -pub static REQUEST_RETRIES: LazyLock = LazyLock::new(|| { - register_int_counter!("request_retries", "Number of request retries").unwrap() -}); -pub static REQUEST_COUNT: LazyLock = LazyLock::new(|| { - register_int_counter!("request_count", "Number of requests processed").unwrap() -}); -pub static REQUEST_LATENCY: LazyLock = LazyLock::new(|| { - register_histogram!( - "request_latency", - "Time (in seconds) requests take to process", - vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0] - ) - .unwrap() -}); - -// RATE LIMITING METRICS -// ================================================================================================ - -pub static RATE_LIMITED_REQUESTS: LazyLock = LazyLock::new(|| { - register_int_counter!( - "rate_limited_requests", - "Number of requests blocked due to rate limiting" - ) - .unwrap() -}); -pub static RATE_LIMIT_VIOLATIONS: LazyLock = LazyLock::new(|| { - register_int_counter!("rate_limit_violations", "Number of rate limit violations by clients") - .unwrap() -}); diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs deleted file mode 100644 index 81290d73a9..0000000000 --- a/bin/remote-prover/src/proxy/mod.rs +++ /dev/null @@ -1,774 +0,0 @@ -use std::collections::VecDeque; -use std::sync::{Arc, LazyLock}; -use std::time::{Duration, Instant}; - -use async_trait::async_trait; -use bytes::Bytes; -use metrics::{ - QUEUE_LATENCY, - QUEUE_SIZE, - RATE_LIMIT_VIOLATIONS, - RATE_LIMITED_REQUESTS, - REQUEST_COUNT, - REQUEST_FAILURE_COUNT, - REQUEST_LATENCY, - REQUEST_RETRIES, - WORKER_BUSY, - WORKER_COUNT, - WORKER_REQUEST_COUNT, -}; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use miden_remote_prover::error::RemoteProverError; -use miden_remote_prover::generated::remote_prover::{ProxyStatus, ProxyWorkerStatus}; -use pingora::http::ResponseHeader; -use pingora::prelude::*; -use pingora::protocols::Digest; -use pingora::upstreams::peer::{ALPN, Peer}; -use pingora_core::Result; -use pingora_core::upstreams::peer::HttpPeer; -use pingora_limits::rate::Rate; -use pingora_proxy::{FailToProxy, ProxyHttp, Session}; -use tokio::sync::RwLock; -use tracing::{Span, debug, error, info, info_span, warn}; -use uuid::Uuid; -use worker::Worker; - -use crate::commands::ProxyConfig; -use crate::commands::update_workers::{Action, UpdateWorkers}; -use crate::utils::{ - create_queue_full_response, - create_response_with_error_message, - create_too_many_requests_response, - write_grpc_response_to_session, -}; - -mod health_check; -pub mod metrics; -pub(crate) mod update_workers; -pub(crate) mod worker; - -// CONSTANTS -// ================================================================================================ - -const PROXY_STATUS_PATH: &str = "/remote_prover.ProxyStatusApi/Status"; - -// LOAD BALANCER STATE -// ================================================================================================ - -/// Load balancer that uses a round robin strategy -#[derive(Debug)] -pub struct LoadBalancerState { - workers: Arc>>, - timeout: Duration, - connection_timeout: Duration, - max_queue_items: usize, - max_retries_per_request: usize, - max_req_per_sec: isize, - available_workers_polling_interval: Duration, - health_check_interval: Duration, - supported_proof_type: ProofType, - status_cache_sender: tokio::sync::watch::Sender, - status_cache_receiver: tokio::sync::watch::Receiver, -} - -impl LoadBalancerState { - /// Create a new load balancer - /// - /// # Errors - /// Returns an error if: - /// - The worker cannot be created. - #[tracing::instrument(target = COMPONENT, name = "proxy.new_load_balancer", skip(initial_workers))] - pub(crate) async fn new( - initial_workers: Vec, - config: &ProxyConfig, - ) -> core::result::Result { - let mut workers: Vec = Vec::with_capacity(initial_workers.len()); - - let connection_timeout = config.connection_timeout; - let total_timeout = config.timeout; - - for worker_addr in initial_workers { - match Worker::new(worker_addr, connection_timeout, total_timeout).await { - Ok(w) => workers.push(w), - Err(e) => { - error!("Failed to create worker: {}", e); - }, - } - } - - info!("Workers created: {:?}", workers); - - WORKER_COUNT.set(i64::try_from(workers.len()).expect("worker count greater than i64::MAX")); - RATE_LIMIT_VIOLATIONS.reset(); - RATE_LIMITED_REQUESTS.reset(); - REQUEST_RETRIES.reset(); - - let workers = Arc::new(RwLock::new(workers)); - let supported_proof_type = config.proof_type; - - // Build initial status for the cache - let initial_status = { - let workers_guard = workers.read().await; - build_proxy_status_response(&workers_guard, supported_proof_type) - }; - - // Create the status cache channel - let (status_cache_sender, status_cache_receiver) = - tokio::sync::watch::channel(initial_status); - - Ok(Self { - workers, - timeout: total_timeout, - connection_timeout, - max_queue_items: config.max_queue_items, - max_retries_per_request: config.max_retries_per_request, - max_req_per_sec: config.max_req_per_sec, - available_workers_polling_interval: config.available_workers_polling_interval, - health_check_interval: config.health_check_interval, - supported_proof_type, - status_cache_sender, - status_cache_receiver, - }) - } - - /// Gets an available worker and marks it as unavailable. - /// - /// If no worker is available, it will return None. - pub async fn pop_available_worker(&self) -> Option { - let mut available_workers = self.workers.write().await; - available_workers.iter_mut().find(|w| w.is_available()).map(|w| { - w.set_availability(false); - WORKER_BUSY.inc(); - w.clone() - }) - } - - /// Marks the given worker as available and moves it to the end of the list. - /// - /// If the worker is not in the list, it won't be added. - /// The worker is moved to the end of the list to avoid overloading since the selection of the - /// worker is done in order, causing the workers at the beginning of the list to be selected - /// more often. - pub async fn add_available_worker(&self, worker: Worker) { - let mut workers = self.workers.write().await; - if let Some(pos) = workers.iter().position(|w| *w == worker) { - // Remove the worker from its current position - let mut w = workers.remove(pos); - // Mark it as available - w.set_availability(true); - // Add it to the end of the list - workers.push(w); - } - } - - /// Updates the list of available workers based on the given action ("add" or "remove"). - /// - /// # Behavior - /// - /// ## Add Action - /// - If the worker exists in the current workers list, do nothing. - /// - Otherwise, add it and mark it as available. - /// - /// ## Remove Action - /// - If the worker exists in the current workers list, remove it. - /// - Otherwise, do nothing. - /// - /// # Errors - /// - If the worker cannot be created. - pub async fn update_workers( - &self, - update_workers: UpdateWorkers, - ) -> std::result::Result<(), RemoteProverError> { - let mut workers = self.workers.write().await; - info!("Current workers: {:?}", workers); - - let mut native_workers = Vec::new(); - - for worker_addr in update_workers.workers { - native_workers - .push(Worker::new(worker_addr, self.connection_timeout, self.timeout).await?); - } - - match update_workers.action { - Action::Add => { - for worker in native_workers { - if !workers.iter().any(|w| w == &worker) { - workers.push(worker); - } - } - }, - Action::Remove => { - for worker in native_workers { - workers.retain(|w| w != &worker); - } - }, - } - - info!("Workers updated: {:?}", workers); - WORKER_COUNT.set(i64::try_from(workers.len()).expect("worker count greater than i64::MAX")); - - Ok(()) - } - - /// Get the total number of current workers. - pub async fn num_workers(&self) -> usize { - self.workers.read().await.len() - } - - /// Get the number of busy workers. - pub async fn num_busy_workers(&self) -> usize { - self.workers.read().await.iter().filter(|w| !w.is_available()).count() - } - - /// Get the cached status response - pub fn get_cached_status(&self) -> ProxyStatus { - self.status_cache_receiver.borrow().clone() - } - - /// Update the status cache with current worker status - pub async fn update_status_cache(&self) { - let workers = self.workers.read().await; - let new_status = build_proxy_status_response(&workers, self.supported_proof_type); - self.status_cache_sender.send(new_status).expect("Failed to send new status"); - } -} - -// UTILS -// ================================================================================================ - -/// Rate limiter -static RATE_LIMITER: LazyLock = LazyLock::new(|| Rate::new(Duration::from_secs(1))); - -// REQUEST QUEUE -// ================================================================================================ - -/// Request queue holds the list of requests that are waiting to be processed by the workers and -/// the time they were enqueued. -/// It is used to keep track of the order of the requests to then assign them to the workers. -pub struct RequestQueue { - queue: RwLock>, -} - -impl RequestQueue { - /// Create a new empty request queue - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - QUEUE_SIZE.set(0); - Self { queue: RwLock::new(VecDeque::new()) } - } - - /// Get the length of the queue - #[allow(clippy::len_without_is_empty)] - pub async fn len(&self) -> usize { - self.queue.read().await.len() - } - - /// Enqueue a request - pub async fn enqueue(&self, request_id: Uuid) { - QUEUE_SIZE.inc(); - let mut queue = self.queue.write().await; - queue.push_back((request_id, Instant::now())); - } - - /// Dequeue a request - pub async fn dequeue(&self) -> Option { - let mut queue = self.queue.write().await; - // If the queue was empty, the queue size does not change - if let Some((request_id, queued_time)) = queue.pop_front() { - QUEUE_SIZE.dec(); - QUEUE_LATENCY.observe(queued_time.elapsed().as_secs_f64()); - Some(request_id) - } else { - None - } - } - - /// Peek at the first request in the queue - pub async fn peek(&self) -> Option { - let queue = self.queue.read().await; - queue.front().copied().map(|(request_id, _)| request_id) - } -} - -/// Shared state. It keeps track of the order of the requests to then assign them to the workers. -static QUEUE: LazyLock = LazyLock::new(RequestQueue::new); - -// OPENTELEMETRY CONTEXT INJECTION -// ================================================================================================ - -/// Pingora `RequestHeader` injector for OpenTelemetry trace context propagation. -/// -/// This allows the proxy to inject trace context into headers that will be forwarded -/// to worker nodes, enabling proper parent-child trace relationships. -struct PingoraHeaderInjector<'a>(&'a mut pingora::http::RequestHeader); - -impl opentelemetry::propagation::Injector for PingoraHeaderInjector<'_> { - /// Set a key and value in the `RequestHeader` using pingora's API - fn set(&mut self, key: &str, value: String) { - // Use pingora's insert_header method which handles the proper header insertion - // Convert key to owned string to satisfy lifetime requirements - if let Err(e) = self.0.insert_header(key.to_string(), value) { - // Log error but don't fail the request if header injection fails - tracing::warn!(target: COMPONENT, header = %key, err = %e, "Failed to inject OpenTelemetry header"); - } - } -} - -// REQUEST CONTEXT -// ================================================================================================ - -/// Custom context for the request/response lifecycle -/// -/// We use this context to keep track of the number of tries for a request, the unique ID for the -/// request, the worker that will process the request, a span that will be used for traces along -/// the transaction execution, and a timer to track how long the request took. -#[derive(Debug)] -pub struct RequestContext { - /// Number of tries for the request - tries: usize, - /// Unique ID for the request - request_id: Uuid, - /// Worker that will process the request - worker: Option, - /// Parent span for the request - parent_span: Span, - /// Time when the request was created - created_at: Instant, -} - -impl RequestContext { - /// Create a new request context - fn new() -> Self { - let request_id = Uuid::new_v4(); - Self { - tries: 0, - request_id, - worker: None, - parent_span: info_span!(target: COMPONENT, "proxy.new_request", request_id = request_id.to_string()), - created_at: Instant::now(), - } - } - - /// Set the worker that will process the request - fn set_worker(&mut self, worker: Worker) { - WORKER_REQUEST_COUNT.with_label_values(&[&worker.name()]).inc(); - self.worker = Some(worker); - } -} - -// LOAD BALANCER -// ================================================================================================ - -/// Wrapper around the load balancer that implements the [`ProxyHttp`] trait -/// -/// This wrapper is used to implement the [`ProxyHttp`] trait for [`Arc`]. -/// This is necessary because we want to share the load balancer between the proxy server and the -/// health check background service. -#[derive(Debug)] -pub struct LoadBalancer(pub Arc); - -/// Implements load-balancing of incoming requests across a pool of workers. -/// -/// At the backend-level, a request lifecycle works as follows: -/// - When a new requests arrives, [`LoadBalancer::request_filter()`] method is called. In this -/// method we apply IP-based rate-limiting to the request and check if the request queue is full. -/// In this method we also handle the special case update workers request. -/// - Next, the [`Self::upstream_peer()`] method is called. We use it to figure out which worker -/// will process the request. Inside `upstream_peer()`, we add the request to the queue of -/// requests. Once the request gets to the front of the queue, we forward it to an available -/// worker. This step is also in charge of setting the SNI, timeouts, and enabling HTTP/2. -/// Finally, we establish a connection with the worker. -/// - Before sending the request to the upstream server and if the connection succeed, the -/// [`Self::upstream_request_filter()`] method is called. In this method, we ensure that the -/// correct headers are forwarded for gRPC requests. -/// - If the connection fails, the [`Self::fail_to_connect()`] method is called. In this method, we -/// retry the request [`self.max_retries_per_request`] times. -/// - Once the worker processes the request (either successfully or with a failure), -/// [`Self::logging()`] method is called. In this method, we log the request lifecycle and set the -/// worker as available. -#[async_trait] -impl ProxyHttp for LoadBalancer { - type CTX = RequestContext; - fn new_ctx(&self) -> Self::CTX { - RequestContext::new() - } - - /// Decide whether to filter the request or not. Also, handle the special case of the update - /// workers request or the proxy status request. - /// - /// The proxy status request is handled separately because it is used by the health check - /// service to check the status of the proxy and returns immediate response. - /// - /// Here we apply IP-based rate-limiting to the request. We also check if the queue is full. - /// - /// If the request is rate-limited, we return a 429 response. Otherwise, we return false. - #[tracing::instrument(name = "proxy.request_filter", parent = &ctx.parent_span, skip(session))] - async fn request_filter(&self, session: &mut Session, ctx: &mut Self::CTX) -> Result - where - Self::CTX: Send + Sync, - { - // Extract the client address early - let client_addr = match session.client_addr() { - Some(addr) => addr.to_string(), - None => { - return create_response_with_error_message( - session.as_downstream_mut(), - "No socket address".to_string(), - ) - .await - .map(|_| true); - }, - }; - - Span::current().record("client_addr", client_addr.clone()); - - let path = session.downstream_session.req_header().uri.path(); - Span::current().record("path", path); - - // Check if the request is a grpc proxy status request by checking the path - if path == PROXY_STATUS_PATH { - let status = self.0.get_cached_status(); - return write_grpc_response_to_session(session, status).await.map(|_| true); - } - - // Increment the request count - REQUEST_COUNT.inc(); - - let user_id = Some(client_addr); - - // Retrieve the current window requests - let curr_window_requests = RATE_LIMITER.observe(&user_id, 1); - - // Rate limit the request - if curr_window_requests > self.0.max_req_per_sec { - RATE_LIMITED_REQUESTS.inc(); - - // Only count a violation the first time in a given window - if curr_window_requests == self.0.max_req_per_sec + 1 { - RATE_LIMIT_VIOLATIONS.inc(); - } - - return create_too_many_requests_response(session, self.0.max_req_per_sec) - .await - .map(|_| true); - } - - let queue_len = QUEUE.len().await; - - info!("New request with ID: {}", ctx.request_id); - info!("Queue length: {}", queue_len); - - // Check if the queue is full - if queue_len >= self.0.max_queue_items { - return create_queue_full_response(session).await.map(|_| true); - } - - Ok(false) - } - - /// Returns [`HttpPeer`] corresponding to the worker that will handle the current request. - /// - /// Here we enqueue the request and wait for it to be at the front of the queue and a worker - /// becomes available, then we dequeue the request and process it. We then set the SNI, - /// timeouts, and enable HTTP/2. - /// - /// Note that the request will be assigned a worker here, and the worker will be removed from - /// the list of available workers once it reaches the [`Self::logging`] method. - #[tracing::instrument(name = "proxy.upstream_peer", parent = &ctx.parent_span, skip(_session))] - async fn upstream_peer( - &self, - _session: &mut Session, - ctx: &mut Self::CTX, - ) -> Result> { - let request_id = ctx.request_id; - - // Add the request to the queue. - QUEUE.enqueue(request_id).await; - - // Wait for the request to be at the front of the queue - loop { - // The request is at the front of the queue. - if QUEUE.peek().await.expect("Queue should not be empty") != request_id { - continue; - } - - // Check if there is an available worker - if let Some(worker) = self.0.pop_available_worker().await { - debug!("Worker {} picked up the request with ID: {}", worker.name(), request_id); - ctx.set_worker(worker); - break; - } - debug!("All workers are busy"); - tokio::time::sleep(self.0.available_workers_polling_interval).await; - } - - // Remove the request from the queue - QUEUE.dequeue().await; - - // Set SNI - let mut http_peer = HttpPeer::new( - ctx.worker.clone().expect("Failed to get worker").name(), - false, - String::new(), - ); - let peer_opts = - http_peer.get_mut_peer_options().ok_or(Error::new(ErrorType::InternalError))?; - - // Timeout settings - peer_opts.total_connection_timeout = Some(self.0.timeout); - peer_opts.connection_timeout = Some(self.0.connection_timeout); - - // Enable HTTP/2 - peer_opts.alpn = ALPN::H2; - - let peer = Box::new(http_peer); - Ok(peer) - } - - /// Applies the necessary filters to the request before sending it to the upstream server. - /// - /// Here we ensure that the correct headers are forwarded for gRPC requests and inject - /// the X-Request-ID header and OpenTelemetry trace context for trace correlation between proxy - /// and worker. - /// - /// This method is called right after [`Self::upstream_peer()`] returns a [`HttpPeer`] and a - /// connection is established with the worker. - #[tracing::instrument(name = "proxy.upstream_request_filter", parent = &_ctx.parent_span, skip(_session))] - async fn upstream_request_filter( - &self, - _session: &mut Session, - upstream_request: &mut RequestHeader, - _ctx: &mut Self::CTX, - ) -> Result<()> - where - Self::CTX: Send + Sync, - { - // Check if it's a gRPC request - if let Some(content_type) = upstream_request.headers.get("content-type") - && content_type == "application/grpc" - { - // Ensure the correct host and gRPC headers are forwarded - upstream_request.insert_header("content-type", "application/grpc")?; - } - - // Always inject X-Request-ID header for trace correlation - // This allows the worker traces to be correlated with the proxy traces - upstream_request.insert_header("x-request-id", _ctx.request_id.to_string())?; - - // Inject OpenTelemetry trace context for proper trace propagation - // This allows the worker trace to be a child of the proxy trace - { - use tracing_opentelemetry::OpenTelemetrySpanExt; - let ctx = tracing::Span::current().context(); - opentelemetry::global::get_text_map_propagator(|propagator| { - propagator.inject_context(&ctx, &mut PingoraHeaderInjector(upstream_request)); - }); - } - - Ok(()) - } - - /// Retry the request if the connection fails. - #[tracing::instrument(name = "proxy.fail_to_connect", parent = &ctx.parent_span, skip(_session))] - fn fail_to_connect( - &self, - _session: &mut Session, - peer: &HttpPeer, - ctx: &mut Self::CTX, - mut e: Box, - ) -> Box { - if ctx.tries > self.0.max_retries_per_request { - return e; - } - REQUEST_RETRIES.inc(); - ctx.tries += 1; - e.set_retry(true); - e - } - - /// Logs the request lifecycle in case that an error happened and sets the worker as available. - /// - /// This method is the last one in the request lifecycle, no matter if the request was - /// processed or not. - #[tracing::instrument(name = "proxy.logging", parent = &ctx.parent_span, skip(_session))] - async fn logging(&self, _session: &mut Session, e: Option<&Error>, ctx: &mut Self::CTX) - where - Self::CTX: Send + Sync, - { - if let Some(e) = e { - REQUEST_FAILURE_COUNT.inc(); - error!("Error: {:?}", e); - } - - // Mark the worker as available - if let Some(worker) = ctx.worker.take() { - self.0.add_available_worker(worker).await; - } - - REQUEST_LATENCY.observe(ctx.created_at.elapsed().as_secs_f64()); - - // Update the number of busy workers - WORKER_BUSY.set( - i64::try_from(self.0.num_busy_workers().await) - .expect("busy worker count greater than i64::MAX"), - ); - } - - // The following methods are a copy of the default implementation defined in the trait, but - // with tracing instrumentation. - // Pingora calls these methods to handle the request/response lifecycle internally and since - // the trait is defined in a different crate, we cannot add the tracing instrumentation there. - // We use the default implementation by implementing the method for our specific type, adding - // the tracing instrumentation and internally calling `ProxyHttp` methods. - // ============================================================================================ - #[tracing::instrument(name = "proxy.early_request_filter", parent = &ctx.parent_span, skip(_session))] - async fn early_request_filter( - &self, - _session: &mut Session, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.early_request_filter(_session, &mut ()).await - } - - #[tracing::instrument(name = "proxy.connected_to_upstream", parent = &ctx.parent_span, skip(_session, _sock, _reused, _peer, _fd, _digest))] - async fn connected_to_upstream( - &self, - _session: &mut Session, - _reused: bool, - _peer: &HttpPeer, - #[cfg(unix)] _fd: std::os::unix::io::RawFd, - #[cfg(windows)] _sock: std::os::windows::io::RawSocket, - _digest: Option<&Digest>, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl - .connected_to_upstream(_session, _reused, _peer, _fd, _digest, &mut ()) - .await - } - - #[tracing::instrument(name = "proxy.request_body_filter", parent = &ctx.parent_span, skip(session, body))] - async fn request_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl - .request_body_filter(session, body, end_of_stream, &mut ()) - .await - } - - #[tracing::instrument(name = "proxy.upstream_response_filter", parent = &ctx.parent_span, skip(session, upstream_response))] - fn upstream_response_filter( - &self, - session: &mut Session, - upstream_response: &mut ResponseHeader, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.upstream_response_filter(session, upstream_response, &mut ()) - } - - #[tracing::instrument(name = "proxy.response_filter", parent = &ctx.parent_span, skip(session, upstream_response))] - async fn response_filter( - &self, - session: &mut Session, - upstream_response: &mut ResponseHeader, - ctx: &mut Self::CTX, - ) -> Result<()> - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.response_filter(session, upstream_response, &mut ()).await - } - - #[tracing::instrument(name = "proxy.upstream_response_body_filter", parent = &ctx.parent_span, skip(session, body))] - fn upstream_response_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result<()> { - ProxyHttpDefaultImpl.upstream_response_body_filter(session, body, end_of_stream, &mut ()) - } - - #[tracing::instrument(name = "proxy.response_body_filter", parent = &ctx.parent_span, skip(session, body))] - fn response_body_filter( - &self, - session: &mut Session, - body: &mut Option, - end_of_stream: bool, - ctx: &mut Self::CTX, - ) -> Result> - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.response_body_filter(session, body, end_of_stream, &mut ()) - } - - #[tracing::instrument(name = "proxy.fail_to_proxy", parent = &ctx.parent_span, skip(session))] - async fn fail_to_proxy( - &self, - session: &mut Session, - e: &Error, - ctx: &mut Self::CTX, - ) -> FailToProxy - where - Self::CTX: Send + Sync, - { - ProxyHttpDefaultImpl.fail_to_proxy(session, e, &mut ()).await - } - - #[tracing::instrument(name = "proxy.error_while_proxy", parent = &ctx.parent_span, skip(session))] - fn error_while_proxy( - &self, - peer: &HttpPeer, - session: &mut Session, - e: Box, - ctx: &mut Self::CTX, - client_reused: bool, - ) -> Box { - ProxyHttpDefaultImpl.error_while_proxy(peer, session, e, &mut (), client_reused) - } -} - -// PROXY HTTP DEFAULT IMPLEMENTATION -// ================================================================================================ - -/// Default implementation of the [`ProxyHttp`] trait. -/// -/// It is used to provide the default methods of the trait in order for the [`LoadBalancer`] to -/// implement the trait adding tracing instrumentation but without having to copy all default -/// implementations. -struct ProxyHttpDefaultImpl; - -#[async_trait] -impl ProxyHttp for ProxyHttpDefaultImpl { - type CTX = (); - fn new_ctx(&self) {} - - /// This method is the only one that does not have a default implementation in the trait. - async fn upstream_peer( - &self, - _session: &mut Session, - _ctx: &mut Self::CTX, - ) -> Result> { - unimplemented!("This is a dummy implementation, should not be called") - } -} - -// HELPERS -// ================================================================================================ - -/// Builds a `ProxyStatusResponse` from a list of workers and a supported proof type. -fn build_proxy_status_response(workers: &[Worker], supported_proof_type: ProofType) -> ProxyStatus { - let worker_statuses: Vec = - workers.iter().map(ProxyWorkerStatus::from).collect(); - ProxyStatus { - version: env!("CARGO_PKG_VERSION").to_string(), - supported_proof_type: supported_proof_type.into(), - workers: worker_statuses, - } -} diff --git a/bin/remote-prover/src/proxy/update_workers.rs b/bin/remote-prover/src/proxy/update_workers.rs deleted file mode 100644 index 320ac5a676..0000000000 --- a/bin/remote-prover/src/proxy/update_workers.rs +++ /dev/null @@ -1,152 +0,0 @@ -use core::fmt; -use std::sync::Arc; - -use miden_node_utils::ErrorReport; -use miden_remote_prover::COMPONENT; -use pingora::apps::{HttpServerApp, HttpServerOptions, ReusedHttpStream}; -use pingora::http::ResponseHeader; -use pingora::protocols::http::ServerSession; -use pingora::server::ShutdownWatch; -use tonic::async_trait; -use tracing::{error, info}; - -use super::LoadBalancerState; -use crate::commands::update_workers::UpdateWorkers; -use crate::utils::create_response_with_error_message; - -/// The Load Balancer Updater Service. -/// -/// This service is responsible for updating the list of workers in the load balancer. -pub(crate) struct LoadBalancerUpdateService { - lb_state: Arc, - server_opts: HttpServerOptions, -} - -/// Manually implement Debug for `LoadBalancerUpdateService`. -/// [`HttpServerOptions`] does not implement Debug, so we cannot derive Debug for -/// [`LoadBalancerUpdateService`], which is needed for the tracing instrumentation. -impl fmt::Debug for LoadBalancerUpdateService { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LBUpdaterService") - .field("lb_state", &self.lb_state) - .finish_non_exhaustive() - } -} - -impl LoadBalancerUpdateService { - pub(crate) fn new(lb_state: Arc) -> Self { - let mut server_opts = HttpServerOptions::default(); - server_opts.h2c = true; - - Self { lb_state, server_opts } - } -} - -#[async_trait] -impl HttpServerApp for LoadBalancerUpdateService { - /// Handles the update workers request. - /// - /// # Behavior - /// - Reads the HTTP request from the session. - /// - If query parameters are present, attempts to parse them as an `UpdateWorkers` object. - /// - If the parsing fails, returns an error response. - /// - If successful, updates the list of workers by calling `update_workers`. - /// - If the update is successful, returns the count of available workers. - /// - /// # Errors - /// - If the HTTP request cannot be read. - /// - If the query parameters cannot be parsed. - /// - If the workers cannot be updated. - /// - If the response cannot be created. - #[tracing::instrument(target = COMPONENT, name = "lb_updater_service.process_new_http", skip(http))] - async fn process_new_http( - self: &Arc, - mut http: ServerSession, - _shutdown: &ShutdownWatch, - ) -> Option { - match http.read_request().await { - Ok(res) => { - if !res { - error!("Failed to read request header"); - create_response_with_error_message( - &mut http, - "Failed to read request header".to_string(), - ) - .await - .ok(); - return None; - } - }, - Err(e) => { - error!("HTTP server fails to read from downstream: {e}"); - create_response_with_error_message( - &mut http, - format!("HTTP server fails to read from downstream: {e}"), - ) - .await - .ok(); - return None; - }, - } - - info!("Successfully get a new request to update workers"); - - // Extract and parse query parameters, if there are not any, return early. - let Some(query_params) = http.req_header().as_ref().uri.query() else { - let error_message = "No query parameters provided".to_string(); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - }; - - let update_workers: Result = serde_qs::from_str(query_params); - let update_workers = match update_workers { - Ok(workers) => workers, - Err(err) => { - let error_message = err.as_report_context("failed to parse query parameters"); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - }, - }; - - // Update workers and handle potential errors. - if let Err(err) = self.lb_state.update_workers(update_workers).await { - let error_message = err.as_report_context("failed to update workers"); - error!("{}", error_message); - create_response_with_error_message(&mut http, error_message).await.ok(); - return None; - } - - create_workers_updated_response(&mut http, self.lb_state.num_workers().await) - .await - .ok(); - - info!("Successfully updated workers"); - - None - } - - /// Provide HTTP server options used to override default behavior. This function will be called - /// every time a new connection is processed. - fn server_options(&self) -> Option<&HttpServerOptions> { - Some(&self.server_opts) - } -} - -// HELPERS -// ================================================================================================ - -/// Create a 200 response for updated workers -/// -/// It will set the X-Worker-Count header to the number of workers. -async fn create_workers_updated_response( - session: &mut ServerSession, - workers: usize, -) -> pingora_core::Result { - let mut header = ResponseHeader::build(200, None)?; - header.insert_header("X-Worker-Count", workers.to_string())?; - session.set_keepalive(None); - session.write_response_header(Box::new(header)).await?; - Ok(true) -} diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs deleted file mode 100644 index aa418e8cb3..0000000000 --- a/bin/remote-prover/src/proxy/worker.rs +++ /dev/null @@ -1,420 +0,0 @@ -use std::sync::LazyLock; -use std::time::{Duration, Instant}; - -use anyhow::Context; -use miden_node_utils::ErrorReport; -use miden_remote_prover::COMPONENT; -use miden_remote_prover::api::ProofType; -use miden_remote_prover::error::RemoteProverError; -use miden_remote_prover::generated::ProxyWorkerStatus; -use miden_remote_prover::generated::remote_prover::worker_status_api_client::WorkerStatusApiClient; -use pingora::lb::Backend; -use semver::{Version, VersionReq}; -use serde::Serialize; -use tonic::transport::Channel; -use tracing::{error, info}; - -use super::metrics::WORKER_UNHEALTHY; - -/// The maximum exponent for the backoff. -/// -/// The maximum backoff is 2^[`MAX_BACKOFF_EXPONENT`] seconds. -const MAX_BACKOFF_EXPONENT: usize = 9; - -/// The version of the proxy. -/// -/// This is the version of the proxy that is used to check the version of the worker. -const MRP_PROXY_VERSION: &str = env!("CARGO_PKG_VERSION"); - -/// The version requirement for the worker. -/// -/// This is the version requirement for the worker that is used to check the version of the worker. -static WORKER_VERSION_REQUIREMENT: LazyLock = LazyLock::new(|| { - let current = - Version::parse(MRP_PROXY_VERSION).expect("Proxy version should be valid at this point"); - VersionReq::parse(&format!("~{}.{}", current.major, current.minor)) - .expect("Version should be valid at this point") -}); - -// WORKER -// ================================================================================================ - -/// A worker used for processing of requests. -/// -/// The worker is used to process requests. -/// It has a backend, a status client, a health status, and a version. -/// The backend is used to send requests to the worker. -/// The status client is used to check the status of the worker. -/// The health status is used to determine if the worker is healthy or unhealthy. -/// The version is used to check if the worker is compatible with the proxy. -/// The `is_available` is used to determine if the worker is available to process requests. -/// The `connection_timeout` is used to set the timeout for the connection to the worker. -/// The `total_timeout` is used to set the timeout for the total request. -#[derive(Debug, Clone)] -pub struct Worker { - backend: Backend, - status_client: Option>, - is_available: bool, - health_status: WorkerHealthStatus, - version: String, - connection_timeout: Duration, - total_timeout: Duration, -} - -/// The health status of a worker. -/// -/// A worker can be either healthy or unhealthy. -/// If the worker is unhealthy, it will have a number of failed attempts. -/// The number of failed attempts is incremented each time the worker is unhealthy. -#[derive(Debug, Clone, PartialEq, Serialize)] -pub enum WorkerHealthStatus { - /// The worker is healthy. - Healthy, - /// The worker is unhealthy. - Unhealthy { - /// The number of failed attempts. - num_failed_attempts: usize, - /// The timestamp of the first failure. - #[serde(skip_serializing)] - first_fail_timestamp: Instant, - /// The reason for the failure. - reason: String, - }, - /// The worker status is unknown. - Unknown, -} - -impl Worker { - // CONSTRUCTOR - // -------------------------------------------------------------------------------------------- - - /// Creates a new worker and a gRPC status client for the given worker address. - /// - /// # Errors - /// - Returns [`RemoteProverError::BackendCreationFailed`] if the worker address is invalid. - pub async fn new( - worker_addr: String, - connection_timeout: Duration, - total_timeout: Duration, - ) -> Result { - let backend = - Backend::new(&worker_addr).map_err(RemoteProverError::BackendCreationFailed)?; - - let (status_client, health_status) = - match create_status_client(&worker_addr, connection_timeout, total_timeout).await { - Ok(client) => (Some(client), WorkerHealthStatus::Unknown), - Err(err) => { - error!("Failed to create status client for worker {}: {}", worker_addr, err); - ( - None, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: 1, - first_fail_timestamp: Instant::now(), - reason: err.as_report_context("failed to create status client"), - }, - ) - }, - }; - - Ok(Self { - backend, - is_available: health_status == WorkerHealthStatus::Unknown, - status_client, - health_status, - version: String::new(), - connection_timeout, - total_timeout, - }) - } - - // MUTATORS - // -------------------------------------------------------------------------------------------- - - /// Attempts to recreate the status client for this worker. - /// - /// This method will try to create a new gRPC status client using the worker's address - /// and timeout configurations. If successful, it will update the worker's `status_client` - /// field. - /// - /// # Returns - /// - `Ok(())` if the client was successfully created - /// - `Err(RemoteProverError)` if the client creation failed - async fn recreate_status_client(&mut self) -> Result<(), RemoteProverError> { - let name = self.name(); - match create_status_client(&name, self.connection_timeout, self.total_timeout).await { - Ok(client) => { - self.status_client = Some(client); - Ok(()) - }, - Err(err) => { - error!("Failed to recreate status client for worker {}: {}", name, err); - Err(err) - }, - } - } - - /// Checks the current status of the worker and returns the result without updating worker - /// state. - /// - /// Returns `Ok(())` if the worker is healthy and compatible, or `Err(reason)` if there's an - /// issue. The caller should use `update_status` to apply the result to the worker's health - /// status. - #[allow(clippy::too_many_lines)] - #[tracing::instrument(target = COMPONENT, name = "worker.check_status")] - pub async fn check_status(&mut self, supported_proof_type: ProofType) -> Result<(), String> { - if !self.should_do_health_check() { - return Ok(()); - } - - // If we don't have a status client, try to recreate it - if self.status_client.is_none() { - match self.recreate_status_client().await { - Ok(()) => { - info!("Successfully recreated status client for worker {}", self.name()); - }, - Err(err) => { - return Err(err.as_report_context("failed to recreate status client")); - }, - } - } - - let worker_status = match self.status_client.as_mut().unwrap().status(()).await { - Ok(response) => response.into_inner(), - Err(e) => { - error!("Failed to check worker status ({}): {}", self.name(), e); - return Err(e.message().to_string()); - }, - }; - - if worker_status.version.is_empty() { - return Err("Worker version is empty".to_string()); - } - - if !is_valid_version(&WORKER_VERSION_REQUIREMENT, &worker_status.version).unwrap_or(false) { - return Err(format!("Worker version is invalid ({})", worker_status.version)); - } - - self.version = worker_status.version; - - let worker_supported_proof_type = ProofType::try_from(worker_status.supported_proof_type) - .inspect_err(|err| { - error!(%err, name=%self.name(), "Failed to convert worker supported proof type"); - })?; - - if supported_proof_type != worker_supported_proof_type { - return Err(format!("Unsupported proof type: {supported_proof_type}")); - } - - Ok(()) - } - - /// Updates the worker's health status based on the result from `check_status`. - /// - /// If the result is `Ok(())`, the worker is marked as healthy. - /// If the result is `Err(reason)`, the worker is marked as unhealthy with the failure reason. - #[tracing::instrument(target = COMPONENT, name = "worker.update_status")] - pub fn update_status(&mut self, check_result: Result<(), String>) { - match check_result { - Ok(()) => { - self.set_health_status(WorkerHealthStatus::Healthy); - }, - Err(reason) => { - let failed_attempts = self.num_failures(); - self.set_health_status(WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts + 1, - first_fail_timestamp: match &self.health_status { - WorkerHealthStatus::Unhealthy { first_fail_timestamp, .. } => { - *first_fail_timestamp - }, - _ => Instant::now(), - }, - reason, - }); - }, - } - } - - /// Sets the worker availability. - pub fn set_availability(&mut self, is_available: bool) { - self.is_available = is_available; - } - - // PUBLIC ACCESSORS - // -------------------------------------------------------------------------------------------- - - /// Returns the number of failures the worker has had. - pub fn num_failures(&self) -> usize { - match &self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => 0, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts, - first_fail_timestamp: _, - reason: _, - } => *failed_attempts, - } - } - - /// Returns the health status of the worker. - pub fn health_status(&self) -> &WorkerHealthStatus { - &self.health_status - } - - /// Returns the version of the worker. - pub fn version(&self) -> &str { - &self.version - } - - /// Returns the worker availability. - /// - /// A worker is available if it is healthy and ready to process requests. - pub fn is_available(&self) -> bool { - self.is_available - } - - /// Returns the worker name. - pub fn name(&self) -> String { - self.backend.addr.to_string() - } - - /// Returns whether the worker is healthy. - /// - /// This function will return `true` if the worker is healthy or the health status is unknown. - /// Otherwise, it will return `false`. - pub fn is_healthy(&self) -> bool { - !matches!(self.health_status, WorkerHealthStatus::Unhealthy { .. }) - } - - // PRIVATE HELPERS - // -------------------------------------------------------------------------------------------- - - /// Returns whether the worker should do a health check. - /// - /// A worker should do a health check if it is healthy or if the time since the first failure - /// is greater than the time since the first failure power of 2. - /// - /// The maximum exponent is [`MAX_BACKOFF_EXPONENT`], which corresponds to a backoff of - /// 2^[`MAX_BACKOFF_EXPONENT`] seconds. - fn should_do_health_check(&self) -> bool { - match self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => true, - WorkerHealthStatus::Unhealthy { - num_failed_attempts: failed_attempts, - first_fail_timestamp, - reason: _, - } => { - let time_since_first_failure = first_fail_timestamp.elapsed(); - time_since_first_failure - > Duration::from_secs( - 2u64.pow(failed_attempts.min(MAX_BACKOFF_EXPONENT) as u32), - ) - }, - } - } - - /// Sets the health status of the worker. - /// - /// This function will update the health status of the worker and update the worker availability - /// based on the new health status. - fn set_health_status(&mut self, health_status: WorkerHealthStatus) { - let was_healthy = self.is_healthy(); - self.health_status = health_status; - match &self.health_status { - WorkerHealthStatus::Healthy | WorkerHealthStatus::Unknown => { - if !was_healthy { - self.is_available = true; - } - }, - WorkerHealthStatus::Unhealthy { .. } => { - WORKER_UNHEALTHY.with_label_values(&[&self.name()]).inc(); - self.is_available = false; - }, - } - } -} - -// PARTIAL EQUALITY -// ================================================================================================ - -impl PartialEq for Worker { - fn eq(&self, other: &Self) -> bool { - self.backend == other.backend - } -} - -// CONVERSIONS -// ================================================================================================ - -/// Conversion from a Worker reference to a `WorkerStatus` proto message. -impl From<&Worker> for ProxyWorkerStatus { - fn from(worker: &Worker) -> Self { - use miden_remote_prover::generated::remote_prover::WorkerHealthStatus as ProtoWorkerHealthStatus; - Self { - name: worker.name(), - version: worker.version().to_string(), - status: match worker.health_status() { - WorkerHealthStatus::Healthy => ProtoWorkerHealthStatus::Healthy, - WorkerHealthStatus::Unhealthy { .. } => ProtoWorkerHealthStatus::Unhealthy, - WorkerHealthStatus::Unknown => ProtoWorkerHealthStatus::Unknown, - } as i32, - } - } -} - -// HELPER FUNCTIONS -// ================================================================================================ - -/// Create a gRPC [`StatusApiClient`] for the given worker address. -/// -/// # Errors -/// - [`RemoteProverError::InvalidURI`] if the worker address is invalid. -/// - [`RemoteProverError::ConnectionFailed`] if the connection to the worker fails. -async fn create_status_client( - address: &str, - connection_timeout: Duration, - total_timeout: Duration, -) -> Result, RemoteProverError> { - let channel = Channel::from_shared(format!("http://{address}")) - .map_err(|err| RemoteProverError::InvalidURI(err, address.to_string()))? - .connect_timeout(connection_timeout) - .timeout(total_timeout) - .connect() - .await - .map_err(|err| RemoteProverError::ConnectionFailed(err, address.to_string()))?; - - Ok(WorkerStatusApiClient::new(channel)) -} - -/// Returns true if the version has major and minor versions match that of the required version. -/// Returns false otherwise. -/// -/// # Errors -/// Returns an error if either of the versions is malformed. -fn is_valid_version(version_req: &VersionReq, version: &str) -> anyhow::Result { - let received = Version::parse(version).context("Invalid worker version: {err}")?; - - Ok(version_req.matches(&received)) -} - -// TESTS -// ================================================================================================ - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_valid_version() { - let version_req = VersionReq::parse("~1.0").unwrap(); - assert!(is_valid_version(&version_req, "1.0.0").unwrap()); - assert!(is_valid_version(&version_req, "1.0.1").unwrap()); - assert!(is_valid_version(&version_req, "1.0.12").unwrap()); - assert!(is_valid_version(&version_req, "1.0").is_err()); - assert!(!is_valid_version(&version_req, "2.0.0").unwrap()); - assert!(!is_valid_version(&version_req, "1.1.0").unwrap()); - assert!(!is_valid_version(&version_req, "0.9.0").unwrap()); - assert!(!is_valid_version(&version_req, "0.9.1").unwrap()); - assert!(!is_valid_version(&version_req, "0.10.0").unwrap()); - assert!(is_valid_version(&version_req, "miden").is_err()); - assert!(is_valid_version(&version_req, "1.miden.12").is_err()); - } -} diff --git a/bin/remote-prover/src/server/mod.rs b/bin/remote-prover/src/server/mod.rs new file mode 100644 index 0000000000..2ca74f5398 --- /dev/null +++ b/bin/remote-prover/src/server/mod.rs @@ -0,0 +1,103 @@ +use std::num::NonZeroUsize; + +use anyhow::Context; +use miden_node_utils::cors::cors_for_grpc_web_layer; +use miden_node_utils::panic::catch_panic_layer_fn; +use miden_node_utils::tracing::grpc::grpc_trace_fn; +use proof_kind::ProofKind; +use tokio::net::TcpListener; +use tokio::task::JoinHandle; +use tokio_stream::wrappers::TcpListenerStream; +use tonic_web::GrpcWebLayer; +use tower_http::catch_panic::CatchPanicLayer; +use tower_http::trace::TraceLayer; + +use crate::generated::api_server::ApiServer; +use crate::server::service::ProverService; + +mod proof_kind; +mod prover; +mod service; +mod status; + +#[cfg(test)] +mod tests; + +/// A gRPC server providing a proving service for the Miden blockchain. +#[derive(clap::Parser)] +pub struct Server { + /// The port the gRPC server will be hosted on. + #[arg(long, default_value = "50051", env = "MIDEN_PROVER_PORT")] + port: u16, + /// The proof type that the prover will be handling. + #[arg(long, value_enum, env = "MIDEN_PROVER_KIND")] + kind: ProofKind, + /// Maximum time allowed for a proof request to complete. Once exceeded, the request is + /// aborted. + #[arg(long, default_value = "60s", env = "MIDEN_PROVER_TIMEOUT", value_parser = humantime::parse_duration)] + timeout: std::time::Duration, + /// Maximum number of concurrent proof requests that the prover will allow. + /// + /// Note that the prover only proves one request at a time; the rest are queued. This capacity + /// is used to limit the number of requests that can be queued at any given time, and includes + /// the one request that is currently being processed. + #[arg(long, default_value_t = NonZeroUsize::new(1).unwrap(), env = "MIDEN_PROVER_CAPACITY")] + capacity: NonZeroUsize, +} + +impl Server { + /// Spawns the prover server, returning its handle and the port it is listening on. + pub async fn spawn(&self) -> anyhow::Result<(JoinHandle>, u16)> { + let listener = TcpListener::bind(format!("0.0.0.0:{}", self.port)) + .await + .context("failed to bind to gRPC port")?; + + // We do this to get the actual port if configured with `self.port=0`. + let port = listener + .local_addr() + .expect("local address should exist for a tcp listener") + .port(); + + tracing::info!( + server.timeout=%humantime::Duration::from(self.timeout), + server.capacity=self.capacity, + proof.kind = %self.kind, + server.port = port, + "proof server listening" + ); + + let status_service = status::StatusService::new(self.kind); + let prover_service = ProverService::with_capacity(self.kind, self.capacity); + let prover_service = ApiServer::new(prover_service); + + let reflection_service = tonic_reflection::server::Builder::configure() + .register_file_descriptor_set(miden_node_proto_build::remote_prover_api_descriptor()) + .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) + .build_v1() + .context("failed to build reflection service")?; + + // Create a gRPC health reporter. + let (health_reporter, health_service) = tonic_health::server::health_reporter(); + + // Mark the service as serving + health_reporter.set_serving::>().await; + + let server = tonic::transport::Server::builder() + .accept_http1(true) + .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) + .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) + .layer(cors_for_grpc_web_layer()) + .layer(GrpcWebLayer::new()) + .timeout(self.timeout) + .add_service(prover_service) + .add_service(status_service) + .add_service(health_service) + .add_service(reflection_service) + .serve_with_incoming(TcpListenerStream::new(listener)); + + let server = + tokio::spawn(async move { server.await.context("failed while serving proof server") }); + + Ok((server, port)) + } +} diff --git a/bin/remote-prover/src/server/proof_kind.rs b/bin/remote-prover/src/server/proof_kind.rs new file mode 100644 index 0000000000..ccd72ca305 --- /dev/null +++ b/bin/remote-prover/src/server/proof_kind.rs @@ -0,0 +1,35 @@ +use crate::generated as proto; + +/// Specifies the type of proof supported by the remote prover. +#[derive(Debug, Clone, Copy, PartialEq, clap::ValueEnum)] +pub enum ProofKind { + Transaction, + Batch, + Block, +} + +impl From for ProofKind { + fn from(value: proto::ProofType) -> Self { + match value { + proto::ProofType::Transaction => ProofKind::Transaction, + proto::ProofType::Batch => ProofKind::Batch, + proto::ProofType::Block => ProofKind::Block, + } + } +} + +impl std::fmt::Display for ProofKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProofKind::Transaction => write!(f, "transaction"), + ProofKind::Batch => write!(f, "batch"), + ProofKind::Block => write!(f, "block"), + } + } +} + +impl miden_node_utils::tracing::ToValue for ProofKind { + fn to_value(&self) -> opentelemetry::Value { + self.to_string().into() + } +} diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs new file mode 100644 index 0000000000..6ca76794e5 --- /dev/null +++ b/bin/remote-prover/src/server/prover.rs @@ -0,0 +1,122 @@ +use miden_block_prover::LocalBlockProver; +use miden_node_proto::BlockProofRequest; +use miden_node_utils::ErrorReport; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::block::BlockProof; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; +use miden_tx::LocalTransactionProver; +use miden_tx_batch_prover::LocalBatchProver; +use tracing::instrument; + +use crate::COMPONENT; +use crate::generated::{self as proto}; +use crate::server::proof_kind::ProofKind; + +/// An enum representing the different types of provers available. +pub enum Prover { + Transaction(LocalTransactionProver), + Batch(LocalBatchProver), + Block(LocalBlockProver), +} + +impl Prover { + /// Constructs a [`Prover`] of the specified [`ProofKind`]. + pub fn new(proof_type: ProofKind) -> Self { + match proof_type { + ProofKind::Transaction => Self::Transaction(LocalTransactionProver::default()), + ProofKind::Batch => Self::Batch(LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL)), + ProofKind::Block => Self::Block(LocalBlockProver::new(MIN_PROOF_SECURITY_LEVEL)), + } + } + + /// Proves a [`proto::ProofRequest`] using the appropriate prover implementation as specified + /// during construction. + pub fn prove(&self, request: proto::ProofRequest) -> Result { + match self { + Prover::Transaction(prover) => prover.prove_request(request), + Prover::Batch(prover) => prover.prove_request(request), + Prover::Block(prover) => prover.prove_request(request), + } + } +} + +/// This trait abstracts over proof request handling by providing a common interface for our +/// different provers. +/// +/// It standardizes the proving process by providing default implementations for the decoding of +/// requests, and encoding of response. Notably it also standardizes the instrumentation, though +/// implementations should still add attributes that can only be known post-decoding of the request. +/// +/// Implementations of this trait only need to provide the input and outputs types, as well as the +/// proof implementation. +trait ProveRequest { + type Input: miden_protocol::utils::Deserializable; + type Output: miden_protocol::utils::Serializable; + + fn prove(&self, input: Self::Input) -> Result; + + /// Entry-point to the proof request handling. + /// + /// Decodes the request, proves it, and encodes the response. + fn prove_request(&self, request: proto::ProofRequest) -> Result { + Self::decode_request(request) + .and_then(|input| { + // We cannot #[instrument] the trait's prove method because it lacks an + // implementation, so we do it manually. + tracing::info_span!("prove", target = COMPONENT).in_scope(|| { + self.prove(input).inspect_err(|e| tracing::Span::current().set_error(e)) + }) + }) + .map(|output| Self::encode_response(output)) + } + + #[instrument(target=COMPONENT, skip_all, err)] + fn decode_request(request: proto::ProofRequest) -> Result { + use miden_protocol::utils::Deserializable; + + Self::Input::read_from_bytes(&request.payload).map_err(|e| { + tonic::Status::invalid_argument(e.as_report_context("failed to decode request")) + }) + } + + #[instrument(target=COMPONENT, skip_all)] + fn encode_response(output: Self::Output) -> proto::Proof { + use miden_protocol::utils::Serializable; + + proto::Proof { payload: output.to_bytes() } + } +} + +impl ProveRequest for LocalTransactionProver { + type Input = TransactionInputs; + type Output = ProvenTransaction; + + fn prove(&self, input: Self::Input) -> Result { + self.prove(input).map_err(|e| { + tonic::Status::internal(e.as_report_context("failed to prove transaction")) + }) + } +} + +impl ProveRequest for LocalBatchProver { + type Input = ProposedBatch; + type Output = ProvenBatch; + + fn prove(&self, input: Self::Input) -> Result { + self.prove(input) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove batch"))) + } +} + +impl ProveRequest for LocalBlockProver { + type Input = BlockProofRequest; + type Output = BlockProof; + + fn prove(&self, input: Self::Input) -> Result { + let BlockProofRequest { tx_batches, block_header, block_inputs } = input; + self.prove(tx_batches, &block_header, block_inputs) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove block"))) + } +} diff --git a/bin/remote-prover/src/server/service.rs b/bin/remote-prover/src/server/service.rs new file mode 100644 index 0000000000..4a72147a65 --- /dev/null +++ b/bin/remote-prover/src/server/service.rs @@ -0,0 +1,88 @@ +use std::num::NonZeroUsize; + +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use tokio::sync::{Mutex, MutexGuard, SemaphorePermit}; +use tracing::instrument; + +use crate::server::proof_kind::ProofKind; +use crate::server::prover::Prover; +use crate::{COMPONENT, generated as proto}; + +pub struct ProverService { + permits: tokio::sync::Semaphore, + prover: tokio::sync::Mutex, + kind: ProofKind, +} + +impl ProverService { + pub fn with_capacity(kind: ProofKind, capacity: NonZeroUsize) -> Self { + let permits = tokio::sync::Semaphore::new(capacity.get()); + let prover = Mutex::new(Prover::new(kind)); + Self { permits, prover, kind } + } + + fn is_supported(&self, kind: ProofKind) -> bool { + self.kind == kind + } + + #[instrument(target=COMPONENT, skip_all, err)] + fn acquire_permit(&self) -> Result, tonic::Status> { + self.permits + .try_acquire() + .map_err(|_| tonic::Status::resource_exhausted("proof queue is full")) + } + + #[instrument(target=COMPONENT, skip_all)] + async fn acquire_prover(&self) -> MutexGuard<'_, Prover> { + self.prover.lock().await + } +} + +#[async_trait::async_trait] +impl proto::api_server::Api for ProverService { + async fn prove( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + // Record X-Request-ID header for trace correlation + let request_id = request + .metadata() + .get("x-request-id") + .and_then(|v| v.to_str().ok()) + .unwrap_or("unknown"); + tracing::Span::current().set_attribute("request.id", request_id); + + // Check that the proof type is supported. + let request = request.into_inner(); + // Protobuf enums return a default value if the enum is set to an unknown value. + // This round trip checks that the value is valid. + if request.proof_type() as i32 != request.proof_type { + return Err(tonic::Status::invalid_argument("unknown proof_type value")); + } + let proof_kind = ProofKind::from(request.proof_type()); + tracing::Span::current().set_attribute("request.kind", proof_kind); + + // Reject unsupported proof types early so they don't clog the queue. + if !self.is_supported(proof_kind) { + return Err(tonic::Status::invalid_argument("unsupported proof type")); + } + + // This semaphore acts like a queue, but with a fixed capacity. + // + // We need to hold this until our request is processed to ensure that the queue capacity is + // not exceeded. + let _permit = self.acquire_permit()?; + + // This mutex is fair and uses FIFO ordering. + let prover = self.acquire_prover().await; + + // Blocking in place is fairly safe since we guarantee that only a single request is + // processed at a time. + // + // This has the downside that requests being proven cannot be cancelled since we are now + // outside the async runtime. This could occur if the server timeout is exceeded, or + // the client cancels the request. A different approach is technically possible, but + // would require more complex logic to handle cancellation in tandem with sync. + tokio::task::block_in_place(|| prover.prove(request)).map(tonic::Response::new) + } +} diff --git a/bin/remote-prover/src/api/status.rs b/bin/remote-prover/src/server/status.rs similarity index 51% rename from bin/remote-prover/src/api/status.rs rename to bin/remote-prover/src/server/status.rs index bb537b804b..6922f76167 100644 --- a/bin/remote-prover/src/api/status.rs +++ b/bin/remote-prover/src/server/status.rs @@ -1,25 +1,26 @@ +use proto::worker_status_api_server::WorkerStatusApiServer; use tonic::{Request, Response, Status}; -use crate::api::prover::ProofType; use crate::generated::worker_status_api_server::WorkerStatusApi; use crate::generated::{self as proto}; +use crate::server::proof_kind::ProofKind; -pub struct StatusRpcApi { - proof_type: ProofType, +pub struct StatusService { + kind: ProofKind, } -impl StatusRpcApi { - pub fn new(proof_type: ProofType) -> Self { - Self { proof_type } +impl StatusService { + pub fn new(kind: ProofKind) -> WorkerStatusApiServer { + WorkerStatusApiServer::new(Self { kind }) } } #[async_trait::async_trait] -impl WorkerStatusApi for StatusRpcApi { +impl WorkerStatusApi for StatusService { async fn status(&self, _: Request<()>) -> Result, Status> { Ok(Response::new(proto::WorkerStatus { version: env!("CARGO_PKG_VERSION").to_string(), - supported_proof_type: self.proof_type as i32, + supported_proof_type: self.kind as i32, })) } } diff --git a/bin/remote-prover/src/server/tests.rs b/bin/remote-prover/src/server/tests.rs new file mode 100644 index 0000000000..8172c344be --- /dev/null +++ b/bin/remote-prover/src/server/tests.rs @@ -0,0 +1,372 @@ +use std::collections::BTreeMap; +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Duration; + +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::note::NoteType; +use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; +use miden_protocol::transaction::{ExecutedTransaction, ProvenTransaction}; +use miden_testing::{Auth, MockChainBuilder}; +use miden_tx::utils::{Deserializable, Serializable}; +use miden_tx::{LocalTransactionProver, TransactionVerifier}; +use miden_tx_batch_prover::LocalBatchProver; + +use crate::generated::api_client::ApiClient; +use crate::generated::{Proof, ProofRequest, ProofType}; +use crate::server::Server; +use crate::server::proof_kind::ProofKind; + +/// A gRPC client with which to interact with the server. +#[derive(Clone)] +struct Client { + inner: ApiClient, +} + +impl Client { + async fn connect(port: u16) -> Self { + let inner = ApiClient::connect(format!("http://127.0.0.1:{port}")) + .await + .expect("client should connect"); + + Self { inner } + } + + async fn submit_request(&mut self, request: ProofRequest) -> Result { + self.inner.prove(request).await.map(tonic::Response::into_inner) + } +} + +impl ProofRequest { + /// Generates a proof request for a transaction using [`MockChain`]. + fn from_tx(tx: &ExecutedTransaction) -> Self { + let tx_inputs = tx.tx_inputs().clone(); + + Self { + proof_type: ProofType::Transaction as i32, + payload: tx_inputs.to_bytes(), + } + } + + fn from_batch(batch: &ProposedBatch) -> Self { + Self { + proof_type: ProofType::Batch as i32, + payload: batch.to_bytes(), + } + } + + async fn mock_tx() -> ExecutedTransaction { + // Create a mock transaction to send to the server + let mut mock_chain_builder = MockChainBuilder::new(); + let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); + + let fungible_asset_1: Asset = + FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) + .unwrap() + .into(); + let note_1 = mock_chain_builder + .add_p2id_note( + ACCOUNT_ID_SENDER.try_into().unwrap(), + account.id(), + &[fungible_asset_1], + NoteType::Private, + ) + .unwrap(); + + let mock_chain = mock_chain_builder.build().unwrap(); + + let tx_context = mock_chain + .build_tx_context(account.id(), &[note_1.id()], &[]) + .unwrap() + .disable_debug_mode() + .build() + .unwrap(); + + Box::pin(tx_context.execute()).await.unwrap() + } + + async fn mock_batch() -> ProposedBatch { + // Create a mock transaction to send to the server + let mut mock_chain_builder = MockChainBuilder::new(); + let account = mock_chain_builder.add_existing_wallet(Auth::BasicAuth).unwrap(); + + let fungible_asset_1: Asset = + FungibleAsset::new(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET.try_into().unwrap(), 100) + .unwrap() + .into(); + let note_1 = mock_chain_builder + .add_p2id_note( + ACCOUNT_ID_SENDER.try_into().unwrap(), + account.id(), + &[fungible_asset_1], + NoteType::Private, + ) + .unwrap(); + + let mock_chain = mock_chain_builder.build().unwrap(); + + let tx = mock_chain + .build_tx_context(account.id(), &[note_1.id()], &[]) + .unwrap() + .disable_debug_mode() + .build() + .unwrap(); + + let tx = Box::pin(tx.execute()).await.unwrap(); + let tx = tokio::task::block_in_place(|| { + LocalTransactionProver::default().prove(tx.tx_inputs().clone()).unwrap() + }); + + ProposedBatch::new( + vec![Arc::new(tx)], + mock_chain.latest_block_header(), + mock_chain.latest_partial_blockchain(), + BTreeMap::new(), + ) + .unwrap() + } +} + +// Test helpers for the server. +// +// Note: This is implemented under `#[cfg(test)]`. +impl Server { + /// A server configured with an arbitrary port (i.e. `port=0`) and the given kind. + /// + /// Capacity is set to 10 with a timeout of 60 seconds. + fn with_arbitrary_port(kind: ProofKind) -> Self { + Self { + port: 0, + kind, + timeout: Duration::from_secs(60), + capacity: NonZeroUsize::new(10).unwrap(), + } + } + + /// Overrides the capacity of the server. + /// + /// # Panics + /// + /// Panics if the given capacity is zero. + fn with_capacity(mut self, capacity: usize) -> Self { + self.capacity = NonZeroUsize::new(capacity).unwrap(); + self + } + + /// Overrides the timeout of the server. + fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } +} + +/// This test ensures that the legacy behaviour can still be configured. +/// +/// The original prover worker refused to process multiple requests concurrently. +/// This test ensures that the redesign behaves the same when limited to a capacity of 1. +/// +/// Create a server with a capacity of one and submit two requests. Ensure +/// that one succeeds and one fails with a resource exhaustion error. +#[tokio::test(flavor = "multi_thread")] +async fn legacy_behaviour_with_capacity_1() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_capacity(1) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client_a = Client::connect(port).await; + let mut client_b = client_a.clone(); + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request); + + let (first, second) = tokio::join!(a, b); + + // We cannot know which got served and which got rejected. + // We can only assert that one of them is Ok and the other is Err. + assert!(first.is_ok() || second.is_ok()); + assert!(first.is_err() || second.is_err()); + // We also expect that the error is a resource exhaustion error. + let err = first.err().or(second.err()).unwrap(); + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + + server.abort(); +} + +/// Test that multiple requests can be queued and capacity is respected. +/// +/// Create a server with a capacity of two and submit three requests. Ensure +/// that two succeed and one fails with a resource exhaustion error. +#[tokio::test(flavor = "multi_thread")] +async fn capacity_is_respected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_capacity(2) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + let mut client_a = Client::connect(port).await; + let mut client_b = client_a.clone(); + let mut client_c = client_a.clone(); + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request.clone()); + let c = client_c.submit_request(request); + + let (first, second, third) = tokio::join!(a, b, c); + + // We cannot know which got served and which got rejected. + // We can only assert that two succeeded and one failed. + let mut expected = [true, true, false]; + let mut result = [first.is_ok(), second.is_ok(), third.is_ok()]; + expected.sort_unstable(); + result.sort_unstable(); + assert_eq!(expected, result); + + // We also expect that the error is a resource exhaustion error. + let err = first.err().or(second.err()).or(third.err()).unwrap(); + assert_eq!(err.code(), tonic::Code::ResourceExhausted); + + server.abort(); +} + +/// Ensures that the server request timeout is adhered to. +/// +/// We cannot actually enforce this for a request that has already being proven as the proof +/// is done in a blocking sync task. We can however check that a second queued request is rejected. +/// +/// This is tricky to test properly because we can't easily control the server's response time. +/// Instead we configure the server to have a ridiculously short timeout which should hopefully +/// always timeout. +#[tokio::test(flavor = "multi_thread")] +async fn timeout_is_respected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .with_timeout(Duration::from_nanos(10)) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client_a = Client::connect(port).await; + let mut client_b = Client::connect(port).await; + + let a = client_a.submit_request(request.clone()); + let b = client_b.submit_request(request); + + let (a, b) = tokio::join!(a, b); + + // At least one of the requests should timeout. + let err = a.err().or(b.err()).unwrap(); + + assert_eq!(err.code(), tonic::Code::Cancelled); + assert!(err.message().contains("Timeout expired")); + + server.abort(); +} + +/// Ensures that an invalid proof kind is rejected. +/// +/// The error should be an invalid argument error, but since that is fairly broad we also inspect +/// the error message for mention of the invalid proof kind. This is technically an implementation +/// detail, but its the best we have without adding multiple abstraction layers. +#[tokio::test(flavor = "multi_thread")] +async fn invalid_proof_kind_is_rejected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .spawn() + .await + .expect("server should spawn"); + + let mut request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + request.proof_type = i32::MAX; + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await; + let err = response.unwrap_err(); + + assert_eq!(err.code(), tonic::Code::InvalidArgument); + assert!(err.message().contains("unknown proof_type value")); + + server.abort(); +} + +/// Ensures that a valid but unsupported proof kind is rejected. +/// +/// Aka submit a transaction proof request to a batch proving server. +/// +/// The error should be an invalid argument error, but since that is fairly broad we also inspect +/// the error message for mention of the unsupported proof kind. This is technically an +/// implementation detail, but its the best we have without adding multiple abstraction layers. +#[tokio::test(flavor = "multi_thread")] +async fn unsupported_proof_kind_is_rejected() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) + .spawn() + .await + .expect("server should spawn"); + + let request = ProofRequest::from_tx(&ProofRequest::mock_tx().await); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await; + let err = response.unwrap_err(); + + assert_eq!(err.code(), tonic::Code::InvalidArgument); + assert!(err.message().contains("unsupported proof type")); + + server.abort(); +} + +/// Checks that the a transaction request results in a correct proof. +/// +/// The proof is verified and the transaction IDs of request and response must correspond. +#[tokio::test(flavor = "multi_thread")] +async fn transaction_proof_is_correct() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Transaction) + .spawn() + .await + .expect("server should spawn"); + + let tx = ProofRequest::mock_tx().await; + let request = ProofRequest::from_tx(&tx); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await.unwrap(); + let response = ProvenTransaction::read_from_bytes(&response.payload).unwrap(); + + assert_eq!(response.id(), tx.id()); + TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL).verify(&response).unwrap(); + + server.abort(); +} + +/// Checks that the a batch request results in a correct proof. +/// +/// The proof is replicated locally, which ensures that the gRPC codec and server code do the +/// correct thing. +#[tokio::test(flavor = "multi_thread")] +async fn batch_proof_is_correct() { + let (server, port) = Server::with_arbitrary_port(ProofKind::Batch) + .spawn() + .await + .expect("server should spawn"); + + let batch = ProofRequest::mock_batch().await; + let request = ProofRequest::from_batch(&batch); + + let mut client = Client::connect(port).await; + let response = client.submit_request(request).await.unwrap(); + let response = ProvenBatch::read_from_bytes(&response.payload).unwrap(); + + let expected = tokio::task::block_in_place(|| { + LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL).prove(batch).unwrap() + }); + assert_eq!(response, expected); + + server.abort(); +} diff --git a/bin/remote-prover/src/utils.rs b/bin/remote-prover/src/utils.rs deleted file mode 100644 index 1214911364..0000000000 --- a/bin/remote-prover/src/utils.rs +++ /dev/null @@ -1,178 +0,0 @@ -use std::net::TcpListener; - -use http::{HeaderMap, HeaderName, HeaderValue}; -use miden_remote_prover::error::RemoteProverError; -use pingora::http::ResponseHeader; -use pingora::protocols::http::ServerSession; -use pingora::{Error, ErrorType}; -use pingora_proxy::Session; -use prost::Message; -use tonic::Code; -use tracing::debug; - -use crate::COMPONENT; -use crate::commands::PROXY_HOST; -use crate::proxy::metrics::QUEUE_DROP_COUNT; - -// CONSTANTS -// ================================================================================================ -const GRPC_CONTENT_TYPE: HeaderValue = HeaderValue::from_static("application/grpc"); -const GRPC_STATUS_HEADER: HeaderName = HeaderName::from_static("grpc-status"); -const GRPC_MESSAGE_HEADER: HeaderName = HeaderName::from_static("grpc-message"); - -/// Build gRPC trailers with status and optional message -fn build_grpc_trailers( - grpc_status: Code, - error_message: Option<&str>, -) -> pingora_core::Result { - let mut trailers = HeaderMap::new(); - - // Set gRPC status - let status_code = (grpc_status as i32).to_string(); - trailers.insert( - GRPC_STATUS_HEADER, - status_code.parse().map_err(|e| { - Error::because(ErrorType::InternalError, format!("Failed to parse grpc-status: {e}"), e) - })?, - ); - - // Set gRPC message if provided - if let Some(message) = error_message { - trailers.insert( - GRPC_MESSAGE_HEADER, - message.parse().map_err(|e| { - Error::because( - ErrorType::InternalError, - format!("Failed to parse grpc-message: {e}"), - e, - ) - })?, - ); - } - - Ok(trailers) -} - -/// Write a protobuf message as a gRPC response to a Pingora session -/// -/// This helper function takes a protobuf message and writes it to a Pingora session -/// in the proper gRPC format, handling message encoding, headers, and trailers. -pub async fn write_grpc_response_to_session( - session: &mut Session, - message: T, -) -> pingora_core::Result<()> -where - T: Message, -{ - // Serialize the protobuf message - let mut response_body = Vec::new(); - message.encode(&mut response_body).map_err(|e| { - Error::because(ErrorType::InternalError, format!("Failed to encode proto response: {e}"), e) - })?; - - let mut grpc_message = Vec::new(); - - // Add compression flag (1 byte, 0 = no compression) - grpc_message.push(0u8); - - // Add message length (4 bytes, big-endian) - let msg_len = response_body.len() as u32; - grpc_message.extend_from_slice(&msg_len.to_be_bytes()); - - // Add the actual message - grpc_message.extend_from_slice(&response_body); - - // Create gRPC response headers WITHOUT grpc-status (that goes in trailers) - let mut header = ResponseHeader::build(200, None)?; - header.insert_header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE)?; - - session.set_keepalive(None); - session.write_response_header(Box::new(header), false).await?; - session.write_response_body(Some(grpc_message.into()), false).await?; - - // Send trailers with gRPC status - let trailers = build_grpc_trailers(Code::Ok, None)?; - session.write_response_trailers(trailers).await?; - - Ok(()) -} - -/// Write a gRPC error response to a Pingora session -/// -/// This helper function creates a proper gRPC error response with the specified -/// status code and error message. -pub async fn write_grpc_error_to_session( - session: &mut Session, - grpc_status: Code, - error_message: &str, -) -> pingora_core::Result<()> { - // Create gRPC response headers (always HTTP 200 for gRPC) - let mut header = ResponseHeader::build(200, None)?; - header.insert_header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE)?; - - session.set_keepalive(None); - session.write_response_header(Box::new(header), false).await?; - - // gRPC errors don't have a body, just headers and trailers - session.write_response_body(None, false).await?; - - // Send trailers with gRPC status and error message - let trailers = build_grpc_trailers(grpc_status, Some(error_message))?; - session.write_response_trailers(trailers).await?; - - Ok(()) -} - -/// Create a gRPC `RESOURCE_EXHAUSTED` response for a full queue -pub(crate) async fn create_queue_full_response(session: &mut Session) -> pingora_core::Result<()> { - // Increment the queue drop count metric - QUEUE_DROP_COUNT.inc(); - - // Use our helper function to create a proper gRPC error response - write_grpc_error_to_session(session, Code::ResourceExhausted, "Too many requests in the queue") - .await -} - -/// Create a gRPC `RESOURCE_EXHAUSTED` response for rate limiting -pub async fn create_too_many_requests_response( - session: &mut Session, - max_request_per_second: isize, -) -> pingora_core::Result<()> { - // Use our helper function to create a proper gRPC error response - let error_message = - format!("Rate limit exceeded: {max_request_per_second} requests per second"); - write_grpc_error_to_session(session, Code::ResourceExhausted, &error_message).await -} - -/// Create a 400 response with an error message -/// -/// It will set the X-Error-Message header to the error message. -pub async fn create_response_with_error_message( - session: &mut ServerSession, - error_msg: String, -) -> pingora_core::Result<()> { - let mut header = ResponseHeader::build(400, None)?; - header.insert_header("X-Error-Message", error_msg)?; - session.set_keepalive(None); - session.write_response_header(Box::new(header)).await?; - Ok(()) -} - -/// Checks if a port is available for use. -/// -/// # Arguments -/// * `port` - The port to check. -/// * `service` - A descriptive name for the service (for logging purposes). -/// -/// # Returns -/// * `Ok(TcpListener)` if the port is available. -/// * `Err(RemoteProverError::PortAlreadyInUse)` if the port is already in use. -pub fn check_port_availability( - port: u16, - service: &str, -) -> Result { - let addr = format!("{PROXY_HOST}:{port}"); - TcpListener::bind(&addr) - .inspect(|_| debug!(target: COMPONENT, %service, %port, %addr, "Port is available")) - .map_err(|err| RemoteProverError::PortAlreadyInUse(err, port)) -} diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index b9df84d41d..9c3fe9387d 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -21,7 +21,6 @@ clap = { features = ["derive"], version = "4.5" } fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } -miden-block-prover = { features = ["testing"], workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } miden-node-store = { workspace = true } @@ -33,3 +32,6 @@ rayon = { version = "1.10" } tokio = { workspace = true } tonic = { default-features = true, workspace = true } url = { workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index 4d8c283c6e..d60a611907 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -20,14 +20,14 @@ This command allows to run stress tests against the Store component. These tests The endpoints that you can test are: - `load_state` -- `sync_state` - `sync_notes` - `sync_nullifiers` - `sync_transactions` +- `sync-chain-mmr` Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. -**Note on Concurrency**: For the endpoints that support it (`sync_state`, `sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. +**Note on Concurrency**: For the endpoints that support it (`sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. Example usage: @@ -119,18 +119,6 @@ Database contains 99961 accounts and 99960 nullifiers **Performance Note**: The load-state benchmark shows that account tree loading (~21.3s) and nullifier tree loading (~21.5s) are the primary bottlenecks, while MMR loading and database connection are negligible (<3ms each). -- sync-state -``` bash -$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-state - -Average request latency: 1.120061ms -P50 request latency: 1.106042ms -P95 request latency: 1.530708ms -P99 request latency: 1.919209ms -P99.9 request latency: 5.795125ms -Average notes per response: 1.3159 -``` - - sync-notes ``` bash $ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-notes @@ -171,5 +159,21 @@ Pagination statistics: Average pages per run: 2.00 ``` +- sync-chain-mmr +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-chain-mmr --block-range 1000 + +Average request latency: 1.021ms +P50 request latency: 0.981ms +P95 request latency: 1.412ms +P99 request latency: 1.822ms +P99.9 request latency: 3.174ms +Pagination statistics: + Total runs: 10000 + Runs triggering pagination: 1 + Pagination rate: 0.01% + Average pages per run: 1.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/build.rs b/bin/stress-test/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/bin/stress-test/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index 095b04caf1..a5cc82f9f4 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -4,9 +4,9 @@ use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; use store::{ + bench_sync_chain_mmr, bench_sync_notes, bench_sync_nullifiers, - bench_sync_state, bench_sync_transactions, load_state, }; @@ -70,8 +70,6 @@ pub enum Endpoint { #[arg(short, long, value_name = "PREFIXES", default_value = "10")] prefixes: usize, }, - #[command(name = "sync-state")] - SyncState, #[command(name = "sync-notes")] SyncNotes, #[command(name = "sync-transactions")] @@ -83,6 +81,12 @@ pub enum Endpoint { #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "100")] block_range: u32, }, + #[command(name = "sync-chain-mmr")] + SyncChainMmr { + /// Block range size for each request (number of blocks to query). + #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "1000")] + block_range: u32, + }, #[command(name = "load-state")] LoadState, } @@ -111,9 +115,6 @@ async fn main() { Endpoint::SyncNullifiers { prefixes } => { bench_sync_nullifiers(data_directory, iterations, concurrency, prefixes).await; }, - Endpoint::SyncState => { - bench_sync_state(data_directory, iterations, concurrency).await; - }, Endpoint::SyncNotes => { bench_sync_notes(data_directory, iterations, concurrency).await; }, @@ -127,6 +128,9 @@ async fn main() { ) .await; }, + Endpoint::SyncChainMmr { block_range } => { + bench_sync_chain_mmr(data_directory, iterations, concurrency, block_range).await; + }, Endpoint::LoadState => { load_state(&data_directory).await; }, diff --git a/bin/stress-test/src/seeding/metrics.rs b/bin/stress-test/src/seeding/metrics.rs index cdf32965ab..56e89e4a95 100644 --- a/bin/stress-test/src/seeding/metrics.rs +++ b/bin/stress-test/src/seeding/metrics.rs @@ -76,7 +76,7 @@ impl SeedingMetrics { } /// Prints the block metrics table. - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn print_block_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "\nBlock metrics:")?; writeln!(f, "Note: Each block contains 256 transactions (16 batches * 16 transactions).")?; @@ -189,7 +189,7 @@ impl SeedingMetrics { } impl Display for SeedingMetrics { - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index e0fe79338f..3b80481bbd 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -5,7 +5,6 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; -use miden_block_prover::LocalBlockProver; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; @@ -30,6 +29,7 @@ use miden_protocol::block::{ FeeParameters, ProposedBlock, ProvenBlock, + SignedBlock, }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; @@ -145,7 +145,7 @@ async fn generate_blocks( let mut consume_notes_txs = vec![]; let consumes_per_block = TRANSACTIONS_PER_BATCH * BATCHES_PER_BLOCK - 1; - #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] + #[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] let num_public_accounts = (consumes_per_block as f64 * (f64::from(public_accounts_percentage) / 100.0)) .round() as usize; @@ -161,7 +161,7 @@ async fn generate_blocks( SecretKey::with_rng(&mut *rng) }; - let mut prev_block = genesis_block.clone(); + let mut prev_block_header = genesis_block.header().clone(); let mut current_anchor_header = genesis_block.header().clone(); for i in 0..total_blocks { @@ -193,7 +193,7 @@ async fn generate_blocks( note_nullifiers.extend(notes.iter().map(|n| n.nullifier().prefix())); // create the tx that creates the notes - let emit_note_tx = create_emit_note_tx(prev_block.header(), &mut faucet, notes.clone()); + let emit_note_tx = create_emit_note_tx(&prev_block_header, &mut faucet, notes.clone()); // collect all the txs block_txs.push(emit_note_tx); @@ -202,27 +202,23 @@ async fn generate_blocks( // create the batches with [TRANSACTIONS_PER_BATCH] txs each let batches: Vec = block_txs .par_chunks(TRANSACTIONS_PER_BATCH) - .map(|txs| create_batch(txs, prev_block.header())) + .map(|txs| create_batch(txs, &prev_block_header)) .collect(); // create the block and send it to the store let block_inputs = get_block_inputs(store_client, &batches, &mut metrics).await; // update blocks - prev_block = apply_block(batches, block_inputs, store_client, &mut metrics).await; - if current_anchor_header.block_epoch() != prev_block.header().block_epoch() { - current_anchor_header = prev_block.header().clone(); + prev_block_header = apply_block(batches, block_inputs, store_client, &mut metrics).await; + if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { + current_anchor_header = prev_block_header.clone(); } // create the consume notes txs to be used in the next block let batch_inputs = - get_batch_inputs(store_client, prev_block.header(), ¬es, &mut metrics).await; - consume_notes_txs = create_consume_note_txs( - prev_block.header(), - accounts, - notes, - &batch_inputs.note_proofs, - ); + get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; + consume_notes_txs = + create_consume_note_txs(&prev_block_header, accounts, notes, &batch_inputs.note_proofs); // track store size every 50 blocks if i % 50 == 0 { @@ -248,21 +244,21 @@ async fn apply_block( block_inputs: BlockInputs, store_client: &StoreClient, metrics: &mut SeedingMetrics, -) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); +) -> BlockHeader { + let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); let (header, body) = proposed_block.clone().into_header_and_body().unwrap(); - let block_proof = LocalBlockProver::new(0) - .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) - .unwrap(); + let block_size: usize = header.to_bytes().len() + body.to_bytes().len(); let signature = EcdsaSecretKey::new().sign(header.commitment()); - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - let block_size: usize = proven_block.to_bytes().len(); + // SAFETY: The header, body, and signature are known to correspond to each other. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + let ordered_batches = proposed_block.batches().clone(); let start = Instant::now(); - store_client.apply_block(&proven_block).await.unwrap(); + store_client.apply_block(&ordered_batches, &signed_block).await.unwrap(); metrics.track_block_insertion(start.elapsed(), block_size); - proven_block + let (header, ..) = signed_block.into_parts(); + header } // HELPER FUNCTIONS @@ -366,7 +362,7 @@ fn create_batch(txs: &[ProvenTransaction], block_ref: &BlockHeader) -> ProvenBat account_updates, InputNotes::new(input_notes).unwrap(), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(txs.iter().map(TransactionHeader::from).collect()), ) .unwrap() @@ -522,6 +518,8 @@ async fn get_block_inputs( /// Runs the store with the given data directory. Returns a tuple with: /// - a gRPC client to access the store /// - the URL of the store +/// +/// The store uses a local prover. pub async fn start_store( data_directory: PathBuf, ) -> (RpcClient>, Url) { @@ -543,6 +541,7 @@ pub async fn start_store( task::spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, diff --git a/bin/stress-test/src/store/metrics.rs b/bin/stress-test/src/store/metrics.rs index 95f8ce0ffd..b56f362643 100644 --- a/bin/stress-test/src/store/metrics.rs +++ b/bin/stress-test/src/store/metrics.rs @@ -18,7 +18,7 @@ pub fn print_summary(timers_accumulator: &[Duration]) { } /// Computes a percentile from a list of durations. -#[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] +#[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] fn compute_percentile(times: &[Duration], percentile: f64) -> Duration { if times.is_empty() { return Duration::ZERO; diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index fa39303aed..314a5e95d0 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -24,9 +24,6 @@ mod metrics; // CONSTANTS // ================================================================================================ -/// Number of accounts used in each `sync_state` call. -const ACCOUNTS_PER_SYNC_STATE: usize = 5; - /// Number of accounts used in each `sync_notes` call. const ACCOUNTS_PER_SYNC_NOTES: usize = 15; @@ -36,77 +33,6 @@ const NOTE_IDS_PER_NULLIFIERS_CHECK: usize = 20; /// Number of attempts the benchmark will make to reach the store before proceeding. const STORE_STATUS_RETRIES: usize = 10; -// SYNC STATE -// ================================================================================================ - -/// Sends multiple `sync_state` requests to the store and prints the performance. -/// -/// Arguments: -/// - `data_directory`: directory that contains the database dump file and the accounts ids dump -/// file. -/// - `iterations`: number of requests to send. -/// - `concurrency`: number of requests to send in parallel. -pub async fn bench_sync_state(data_directory: PathBuf, iterations: usize, concurrency: usize) { - // load accounts from the dump file - let accounts_file = data_directory.join(ACCOUNTS_FILENAME); - let accounts = fs::read_to_string(&accounts_file) - .await - .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); - let mut account_ids = accounts.lines().map(|a| AccountId::from_hex(a).unwrap()).cycle(); - - let (store_client, _) = start_store(data_directory).await; - - wait_for_store(&store_client).await.unwrap(); - - // each request will have 5 account ids, 5 note tags and will be sent with block number 0 - let request = |_| { - let mut client = store_client.clone(); - let account_batch: Vec = - account_ids.by_ref().take(ACCOUNTS_PER_SYNC_STATE).collect(); - tokio::spawn(async move { sync_state(&mut client, account_batch, 0).await }) - }; - - // create a stream of tasks to send sync_notes requests - let (timers_accumulator, responses) = stream::iter(0..iterations) - .map(request) - .buffer_unordered(concurrency) - .map(|res| res.unwrap()) - .collect::<(Vec<_>, Vec<_>)>() - .await; - - print_summary(&timers_accumulator); - - #[allow(clippy::cast_precision_loss)] - let average_notes_per_response = - responses.iter().map(|r| r.notes.len()).sum::() as f64 / responses.len() as f64; - println!("Average notes per response: {average_notes_per_response}"); -} - -/// Sends a single `sync_state` request to the store and returns a tuple with: -/// - the elapsed time. -/// - the response. -pub async fn sync_state( - api_client: &mut RpcClient>, - account_ids: Vec, - block_num: u32, -) -> (Duration, proto::rpc::SyncStateResponse) { - let note_tags = account_ids - .iter() - .map(|id| u32::from(NoteTag::with_account_target(*id))) - .collect::>(); - - let account_ids = account_ids - .iter() - .map(|id| proto::account::AccountId { id: id.to_bytes() }) - .collect::>(); - - let sync_request = proto::rpc::SyncStateRequest { block_num, note_tags, account_ids }; - - let start = Instant::now(); - let response = api_client.sync_state(sync_request).await.unwrap(); - (start.elapsed(), response.into_inner()) -} - // SYNC NOTES // ================================================================================================ @@ -197,61 +123,68 @@ pub async fn bench_sync_nullifiers( .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); let account_ids: Vec = accounts .lines() - .take(ACCOUNTS_PER_SYNC_STATE) + .take(ACCOUNTS_PER_SYNC_NOTES) .map(|a| AccountId::from_hex(a).unwrap()) .collect(); - // get all nullifier prefixes from the store + // Get all nullifier prefixes from the store using sync_notes let mut nullifier_prefixes: Vec = vec![]; let mut current_block_num = 0; loop { - // get the accounts notes - let (_, response) = - sync_state(&mut store_client, account_ids.clone(), current_block_num).await; + // Get the accounts notes using sync_notes + let note_tags: Vec = account_ids + .iter() + .map(|id| u32::from(NoteTag::with_account_target(*id))) + .collect(); + let sync_request = proto::rpc::SyncNotesRequest { + block_range: Some(proto::rpc::BlockRange { + block_from: current_block_num, + block_to: None, + }), + note_tags, + }; + let response = store_client.sync_notes(sync_request).await.unwrap().into_inner(); + let note_ids = response .notes .iter() .map(|n| n.note_id.unwrap()) .collect::>(); - // get the notes nullifiers, limiting to 20 notes maximum + // Get the notes nullifiers, limiting to 20 notes maximum let note_ids_to_fetch = note_ids.iter().take(NOTE_IDS_PER_NULLIFIERS_CHECK).copied().collect::>(); - let notes = store_client - .get_notes_by_id(proto::note::NoteIdList { ids: note_ids_to_fetch }) - .await - .unwrap() - .into_inner() - .notes; - - nullifier_prefixes.extend( - notes - .iter() - .filter_map(|n| { - // private notes are filtered out because `n.details` is None - let details_bytes = n.note.as_ref()?.details.as_ref()?; - let details = NoteDetails::read_from_bytes(details_bytes).unwrap(); - Some(u32::from(details.nullifier().prefix())) - }) - .collect::>(), - ); + if !note_ids_to_fetch.is_empty() { + let notes = store_client + .get_notes_by_id(proto::note::NoteIdList { ids: note_ids_to_fetch }) + .await + .unwrap() + .into_inner() + .notes; + + nullifier_prefixes.extend( + notes + .iter() + .filter_map(|n| { + // Private notes are filtered out because `n.details` is None + let details_bytes = n.note.as_ref()?.details.as_ref()?; + let details = NoteDetails::read_from_bytes(details_bytes).unwrap(); + Some(u32::from(details.nullifier().prefix())) + }) + .collect::>(), + ); + } - // Use the response from the first chunk to update block number - // (all chunks should return the same block header for the same block_num) - let (_, first_response) = sync_state( - &mut store_client, - account_ids[..1000.min(account_ids.len())].to_vec(), - current_block_num, - ) - .await; - current_block_num = first_response.block_header.unwrap().block_num; - if first_response.chain_tip == current_block_num { + // Update block number from pagination info + let pagination_info = response.pagination_info.expect("pagination_info should exist"); + current_block_num = pagination_info.block_num; + if pagination_info.chain_tip == current_block_num { break; } } let mut nullifiers = nullifier_prefixes.into_iter().cycle(); - // each request will have `prefixes_per_request` prefixes and block number 0 + // Each request will have `prefixes_per_request` prefixes and block number 0 let request = |_| { let mut client = store_client.clone(); @@ -260,7 +193,7 @@ pub async fn bench_sync_nullifiers( tokio::spawn(async move { sync_nullifiers(&mut client, nullifiers_batch).await }) }; - // create a stream of tasks to send the requests + // Create a stream of tasks to send the requests let (timers_accumulator, responses) = stream::iter(0..iterations) .map(request) .buffer_unordered(concurrency) @@ -270,7 +203,7 @@ pub async fn bench_sync_nullifiers( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_nullifiers_per_response = responses.iter().map(|r| r.nullifiers.len()).sum::() as f64 / responses.len() as f64; println!("Average nullifiers per response: {average_nullifiers_per_response}"); @@ -364,7 +297,7 @@ pub async fn bench_sync_transactions( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_transactions_per_response = if responses.is_empty() { 0.0 } else { @@ -376,13 +309,13 @@ pub async fn bench_sync_transactions( // Calculate pagination statistics let total_runs = results.len(); let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let pagination_rate = if total_runs > 0 { (paginated_runs as f64 / total_runs as f64) * 100.0 } else { 0.0 }; - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let avg_pages = if total_runs > 0 { results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 } else { @@ -481,6 +414,76 @@ async fn sync_transactions_paginated( } } +// SYNC CHAIN MMR +// ================================================================================================ + +/// Sends multiple `sync_chain_mmr` requests to the store and prints the performance. +/// +/// Arguments: +/// - `data_directory`: directory that contains the database dump file. +/// - `iterations`: number of requests to send. +/// - `concurrency`: number of requests to send in parallel. +/// - `block_range_size`: number of blocks to include per request. +pub async fn bench_sync_chain_mmr( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + block_range_size: u32, +) { + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + let chain_tip = store_client.clone().status(()).await.unwrap().into_inner().chain_tip; + let block_range_size = block_range_size.max(1); + + let request = |_| { + let mut client = store_client.clone(); + tokio::spawn(async move { sync_chain_mmr(&mut client, chain_tip, block_range_size).await }) + }; + + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + + print_summary(&timers_accumulator); + + let total_runs = results.len(); + + println!("Pagination statistics:"); + println!(" Total runs: {total_runs}"); +} + +/// Sends a single `sync_chain_mmr` request to the store and returns a tuple with: +/// - the elapsed time. +/// - the response. +async fn sync_chain_mmr( + api_client: &mut RpcClient>, + block_from: u32, + block_to: u32, +) -> SyncChainMmrRun { + let sync_request = proto::rpc::SyncChainMmrRequest { + block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), + }; + + let start = Instant::now(); + let response = api_client.sync_chain_mmr(sync_request).await.unwrap(); + let elapsed = start.elapsed(); + let response = response.into_inner(); + let _mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + SyncChainMmrRun { duration: elapsed } +} + +#[derive(Clone)] +struct SyncChainMmrRun { + duration: Duration, +} + // LOAD STATE // ================================================================================================ diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index e5e5511ad1..6ca345217a 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -22,16 +22,14 @@ tracing-forest = ["miden-node-utils/tracing-forest"] anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } -miden-block-prover = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } miden-standards = { workspace = true } -miden-tx = { default-features = true, workspace = true } miden-tx-batch-prover = { workspace = true } -rand = { version = "0.9" } +rand = { workspace = true } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } @@ -46,13 +44,14 @@ assert_matches = { workspace = true } miden-node-store = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-node-validator = { workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } pretty_assertions = "1.4" -rand_chacha = { default-features = false, version = "0.9" } +rand_chacha = { default-features = false, workspace = true } rstest = { workspace = true } serial_test = "3.2" -tempfile = { version = "3.20" } +tempfile = { workspace = true } tokio = { features = ["test-util"], workspace = true } winterfell = { version = "0.13" } diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index e3cc714c2a..34dab83a3f 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -9,7 +9,7 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; -use miden_remote_prover_client::remote_prover::batch_prover::RemoteBatchProver; +use miden_remote_prover_client::RemoteBatchProver; use miden_tx_batch_prover::LocalBatchProver; use rand::Rng; use tokio::task::JoinSet; diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index a3a36ec4f0..56b5a3666f 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,29 +1,15 @@ -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::sync::Arc; use anyhow::Context; use futures::FutureExt; -use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{OrderedBatches, ProvenBatch}; -use miden_protocol::block::{ - BlockBody, - BlockHeader, - BlockInputs, - BlockNumber, - BlockProof, - ProposedBlock, - ProvenBlock, -}; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock, SignedBlock}; use miden_protocol::note::NoteHeader; -use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; -use rand::Rng; +use miden_protocol::transaction::TransactionHeader; use tokio::time::Duration; -use tracing::{Span, info, instrument}; -use url::Url; +use tracing::{Span, instrument}; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; @@ -35,21 +21,19 @@ use crate::{COMPONENT, TelemetryInjectorExt}; // ================================================================================================= pub struct BlockBuilder { + /// The frequency at which blocks are produced. pub block_interval: Duration, - /// Used to simulate block proving by sleeping for a random duration selected from this range. - pub simulated_proof_time: Range, /// Simulated block failure rate as a percentage. /// /// Note: this _must_ be sign positive and less than 1.0. pub failure_rate: f64, + /// The store RPC client for committing blocks. pub store: StoreClient, + /// The validator RPC client for validating blocks. pub validator: BlockProducerValidatorClient, - - /// The prover used to prove a proposed block into a proven block. - pub block_prover: BlockProver, } impl BlockBuilder { @@ -59,20 +43,12 @@ impl BlockBuilder { pub fn new( store: StoreClient, validator: BlockProducerValidatorClient, - block_prover_url: Option, block_interval: Duration, ) -> Self { - let block_prover = match block_prover_url { - Some(url) => BlockProver::new_remote(url), - None => BlockProver::new_local(MIN_PROOF_SECURITY_LEVEL), - }; - Self { block_interval, // Note: The range cannot be empty. - simulated_proof_time: Duration::ZERO..Duration::from_millis(1), failure_rate: 0.0, - block_prover, store, validator, } @@ -136,16 +112,11 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(|(proposed_block, _)| { + .inspect_ok(|proposed_block| { ProposedBlock::inject_telemetry(proposed_block); }) - .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) - .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) - .inspect_ok(ProvenBlock::inject_telemetry) - // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot - // handle errors after it considers the process complete (which makes sense). - .and_then(|proven_block| async { self.inject_failure(proven_block) }) - .and_then(|proven_block| self.commit_block(mempool, proven_block)) + .and_then(|proposed_block| self.build_and_validate_block(proposed_block)) + .and_then(|(ordered_batches, signed_block)| self.commit_block(mempool, ordered_batches, signed_block)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) .or_else(|err| async { @@ -239,23 +210,21 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { + ) -> Result { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = ProposedBlock::new(inputs.clone(), batches) - .map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = + ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; - Ok((proposed_block, inputs)) + Ok(proposed_block) } #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] - async fn validate_block( + async fn build_and_validate_block( &self, proposed_block: ProposedBlock, - block_inputs: BlockInputs, - ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> - { + ) -> Result<(OrderedBatches, SignedBlock), BuildBlockError> { // Concurrently build the block and validate it via the validator. let build_result = tokio::task::spawn_blocking({ let proposed_block = proposed_block.clone(); @@ -278,53 +247,27 @@ impl BlockBuilder { } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, header, signature, body)) - } - - #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] - async fn prove_block( - &self, - ordered_batches: OrderedBatches, - block_inputs: BlockInputs, - header: BlockHeader, - signature: Signature, - body: BlockBody, - ) -> Result { - // Prove block using header and body from validator. - let block_proof = self - .block_prover - .prove(ordered_batches.clone(), header.clone(), block_inputs) - .await?; - self.simulate_proving().await; - - // SAFETY: The header and body are assumed valid and consistent with the proof. - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { - return Err(BuildBlockError::SecurityLevelTooLow( - proven_block.proof_security_level(), - MIN_PROOF_SECURITY_LEVEL, - )); - } - // TODO(sergerad): Consider removing this validation. Once block proving is implemented, - // this would be replaced with verifying the proof returned from the prover against - // the block header. - validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; - - Ok(proven_block) + // SAFETY: The header, body, and signature are known to correspond to each other because the + // header and body are derived from the proposed block and the signature is verified + // against the corresponding commitment. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + Ok((ordered_batches, signed_block)) } #[instrument(target = COMPONENT, name = "block_builder.commit_block", skip_all, err)] async fn commit_block( &self, mempool: &SharedMempool, - built_block: ProvenBlock, + ordered_batches: OrderedBatches, + signed_block: SignedBlock, ) -> Result<(), BuildBlockError> { self.store - .apply_block(&built_block) + .apply_block(&ordered_batches, &signed_block) .await .map_err(BuildBlockError::StoreApplyBlockFailed)?; - mempool.lock().await.commit_block(built_block.header().clone()); + let (header, ..) = signed_block.into_parts(); + mempool.lock().await.commit_block(header); Ok(()) } @@ -333,31 +276,6 @@ impl BlockBuilder { async fn rollback_block(&self, mempool: &SharedMempool, block: BlockNumber) { mempool.lock().await.rollback_block(block); } - - #[instrument(target = COMPONENT, name = "block_builder.simulate_proving", skip_all)] - async fn simulate_proving(&self) { - let proving_duration = rand::rng().random_range(self.simulated_proof_time.clone()); - - Span::current().set_attribute("range.min_s", self.simulated_proof_time.start); - Span::current().set_attribute("range.max_s", self.simulated_proof_time.end); - Span::current().set_attribute("dice_roll_s", proving_duration); - - tokio::time::sleep(proving_duration).await; - } - - #[instrument(target = COMPONENT, name = "block_builder.inject_failure", skip_all, err)] - fn inject_failure(&self, value: T) -> Result { - let roll = rand::rng().random::(); - - Span::current().set_attribute("failure_rate", self.failure_rate); - Span::current().set_attribute("dice_roll", roll); - - if roll < self.failure_rate { - Err(BuildBlockError::InjectedFailure) - } else { - Ok(value) - } - } } /// A wrapper around batches selected for inlucion in a block, primarily used to be able to inject @@ -454,76 +372,3 @@ impl TelemetryInjectorExt for ProvenBlock { span.set_attribute("block.commitments.transaction", header.tx_commitment()); } } - -// BLOCK PROVER -// ================================================================================================ - -pub enum BlockProver { - Local(LocalBlockProver), - Remote(RemoteBlockProver), -} - -impl BlockProver { - pub fn new_local(security_level: u32) -> Self { - info!(target: COMPONENT, "Using local block prover"); - Self::Local(LocalBlockProver::new(security_level)) - } - - pub fn new_remote(endpoint: impl Into) -> Self { - info!(target: COMPONENT, "Using remote block prover"); - Self::Remote(RemoteBlockProver::new(endpoint)) - } - - #[instrument(target = COMPONENT, skip_all, err)] - async fn prove( - &self, - tx_batches: OrderedBatches, - block_header: BlockHeader, - block_inputs: BlockInputs, - ) -> Result { - match self { - Self::Local(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .map_err(BuildBlockError::ProveBlockFailed), - Self::Remote(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .await - .map_err(BuildBlockError::RemoteProverClientError), - } - } -} - -/// Validates that the proven block's transaction headers are consistent with the transactions -/// passed in the proposed block. -/// -/// This expects that transactions from the proposed block and proven block are in the same -/// order, as defined by [`OrderedTransactionHeaders`]. -fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, -) -> Result<(), BuildBlockError> { - if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { - return Err(BuildBlockError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.body().transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in proposed_txs - .as_slice() - .iter() - .zip(proven_block.body().transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(BuildBlockError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) -} diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 5b2ab30b32..f581ca95e8 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -1,5 +1,3 @@ -#![allow(dead_code, reason = "WIP: mempoool refactor")] - use std::collections::HashSet; use std::sync::Arc; @@ -127,10 +125,6 @@ impl AuthenticatedTransaction { Arc::clone(&self.inner) } - pub fn raw_proven_transaction(&self) -> &ProvenTransaction { - &self.inner - } - pub fn expires_at(&self) -> BlockNumber { self.inner.expiration_block_num() } @@ -177,4 +171,8 @@ impl AuthenticatedTransaction { self.store_account_state = None; self } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 40c74c99f5..b610b0534a 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,6 +1,5 @@ use core::error::Error as CoreError; -use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -223,16 +222,10 @@ pub enum BuildBlockError { ValidateBlockFailed(#[source] Box), #[error("block signature is invalid")] InvalidSignature, - #[error("failed to prove block")] - ProveBlockFailed(#[source] BlockProverError), + /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. - #[error("nothing actually went wrong, failure was injected on purpose")] - InjectedFailure, - #[error("failed to prove block with remote prover")] - RemoteProverClientError(#[source] RemoteProverClientError), - #[error("block proof security level is too low: {0} < {1}")] - SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. #[error("{error_msg}")] Other { diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 36ab9b53d1..955aa23565 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -60,7 +60,7 @@ pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); /// /// The value is selected such that all transactions should approximately be processed within one /// minutes with a block time of 5s. -#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +#[expect(clippy::cast_sign_loss, reason = "Both durations are positive")] pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( DEFAULT_MAX_BATCHES_PER_BLOCK * DEFAULT_MAX_TXS_PER_BATCH diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index 461a836c25..c41e305fab 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -416,7 +416,7 @@ mod tests { BTreeMap::from([(account_update.account_id(), account_update)]), InputNotes::default(), Vec::default(), - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(vec![tx_header]), ) .unwrap(); diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 8245c1ee6b..d7ea49db07 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -40,6 +40,9 @@ use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +#[cfg(test)] +mod tests; + /// The block producer server. /// /// Specifies how to connect to the store, batch prover, and block prover components. @@ -55,8 +58,6 @@ pub struct BlockProducer { pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, - /// The address of the block prover component. - pub block_prover_url: Option, /// The interval at which to produce batches. pub batch_interval: Duration, /// The interval at which to produce blocks. @@ -82,7 +83,6 @@ impl BlockProducer { /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is /// encountered. - #[allow(clippy::too_many_lines)] pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); @@ -123,8 +123,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); - let block_builder = - BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); + let block_builder = BlockBuilder::new(store.clone(), validator, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 453512597b..8c98e9da4d 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -1,27 +1,25 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_air::{ExecutionProof, HashFunction}; -use miden_node_proto::generated::{ - self as proto, block_producer::api_client as block_producer_client, -}; +use miden_node_proto::generated::block_producer::api_client as block_producer_client; use miden_node_store::{GenesisState, Store}; -use miden_protocol::{ - Digest, - account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, - transaction::ProvenTransactionBuilder, -}; -use miden_tx::utils::Serializable; -use tokio::{net::TcpListener, runtime, task, time::sleep}; +use miden_node_utils::fee::test_fee_params; +use miden_node_validator::Validator; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::random_signer::RandomBlockSigner as _; +use tokio::net::TcpListener; +use tokio::time::sleep; +use tokio::{runtime, task}; use tonic::transport::{Channel, Endpoint}; -use winterfell::Proof; +use url::Url; -use crate::{BlockProducer, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MAX_TXS_PER_BATCH}; +use crate::{BlockProducer, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +/// Tests that the block producer starts up correctly even when the store is not initially +/// available. The block producer should retry with exponential backoff until the store becomes +/// available, then start serving requests. #[tokio::test] async fn block_producer_startup_is_robust_to_network_failures() { - // This test starts the block producer and tests that it starts serving only after the store - // is started. - // get the addresses for the store and block producer let store_addr = { let store_listener = @@ -36,113 +34,106 @@ async fn block_producer_startup_is_robust_to_network_failures() { .expect("Failed to get block-producer address") }; - let ntx_builder_addr = { - let ntx_builder_address = TcpListener::bind("127.0.0.1:0") - .await - .expect("failed to bind the ntx builder address"); - ntx_builder_address.local_addr().expect("failed to get ntx builder address") + let validator_addr = { + let validator_listener = + TcpListener::bind("127.0.0.1:0").await.expect("failed to bind validator"); + validator_listener.local_addr().expect("failed to get validator address") }; - // start the block producer + let grpc_timeout = Duration::from_secs(30); + + // start the validator + task::spawn(async move { + let temp_dir = tempfile::tempdir().expect("tempdir should be created"); + let data_directory = temp_dir.path().to_path_buf(); + Validator { + address: validator_addr, + grpc_timeout, + signer: SecretKey::random(), + data_directory, + } + .serve() + .await + .unwrap(); + }); + + // start the block producer BEFORE the store is available + // this tests the exponential backoff behavior + let store_url = Url::parse(&format!("http://{store_addr}")).expect("Failed to parse store URL"); + let validator_url = + Url::parse(&format!("http://{validator_addr}")).expect("Failed to parse validator URL"); task::spawn(async move { BlockProducer { block_producer_address: block_producer_addr, - store_address: store_addr, - ntx_builder_address: Some(ntx_builder_addr), + store_url, + validator_url, batch_prover_url: None, - block_prover_url: None, batch_interval: Duration::from_millis(500), block_interval: Duration::from_millis(500), - max_txs_per_batch: SERVER_MAX_TXS_PER_BATCH, - max_batches_per_block: SERVER_MAX_BATCHES_PER_BLOCK, + max_txs_per_batch: DEFAULT_MAX_TXS_PER_BATCH, + max_batches_per_block: DEFAULT_MAX_BATCHES_PER_BLOCK, + grpc_timeout, + mempool_tx_capacity: NonZeroUsize::new(100).unwrap(), } .serve() .await .unwrap(); }); - // test: connecting to the block producer should fail until the store is started + // test: connecting to the block producer should fail because the store is not yet started + // (and therefore the block producer is not yet listening) let block_producer_endpoint = Endpoint::try_from(format!("http://{block_producer_addr}")).expect("valid url"); let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await; - assert!(block_producer_client.is_err()); + assert!( + block_producer_client.is_err(), + "Block producer should not be available before store is started" + ); // start the store let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let store_runtime = { - let genesis_state = GenesisState::new(vec![], 1, 1); - Store::bootstrap(genesis_state.clone(), data_directory.path()) - .expect("store should bootstrap"); - let dir = data_directory.path().to_path_buf(); - let rpc_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); - let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind store ntx-builder gRPC endpoint"); - let block_producer_listener = TcpListener::bind(store_addr) - .await - .expect("store should bind the block-producer port"); - // in order to later kill the store, we need to spawn a new runtime and run the store on - // it. That allows us to kill all the tasks spawned by the store when we - // kill the runtime. - let store_runtime = - runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); - store_runtime.spawn(async move { - Store { - rpc_listener, - ntx_builder_listener, - block_producer_listener, - data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + let store_runtime = start_store(store_addr, data_directory.path()).await; + + // wait for the block producer's exponential backoff to connect to the store + // use a retry loop since CI environments may be slower + let block_producer_client = { + let mut attempts = 0; + loop { + attempts += 1; + match block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await { + Ok(client) => break client, + Err(_) if attempts < 30 => { + sleep(Duration::from_millis(200)).await; + }, + Err(e) => panic!( + "block producer client should connect after store is started (after {attempts} attempts): {e}" + ), } - .serve() - .await - .expect("store should start serving"); - }); - store_runtime + } }; - // we need to wait for the exponential backoff of the block producer to connect to the store - sleep(Duration::from_secs(1)).await; + // test: status request against block-producer should succeed + let response = send_status_request(block_producer_client).await; + assert!(response.is_ok(), "Status request should succeed, got: {:?}", response.err()); - let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint) - .await - .expect("block producer client should connect"); + // verify the response contains expected data + let status = response.unwrap().into_inner(); + assert_eq!(status.status, "connected"); - // test: request against block-producer api should succeed - let response = send_request(block_producer_client.clone(), 0).await; - assert!(response.is_ok()); - - // kill the store - shutdown_store(store_runtime).await; - - // test: request against block-producer api should fail immediately - let response = send_request(block_producer_client.clone(), 1).await; - assert!(response.is_err()); - - // test: restart the store and request should succeed - let store_runtime = restart_store(store_addr, data_directory.path()).await; - let response = send_request(block_producer_client.clone(), 2).await; - assert!(response.is_ok()); - - // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + // Shutdown the store before data_directory is dropped to allow the database to flush properly shutdown_store(store_runtime).await; } -/// Shuts down the store runtime properly to allow RocksDB to flush before the temp directory is -/// deleted. -async fn shutdown_store(store_runtime: runtime::Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); -} - -/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. -async fn restart_store( +/// Starts the store with a fresh genesis state and returns the runtime handle. +async fn start_store( store_addr: std::net::SocketAddr, data_directory: &std::path::Path, ) -> runtime::Runtime { + let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, SecretKey::random()); + Store::bootstrap(genesis_state.clone(), data_directory).expect("store should bootstrap"); + + let dir = data_directory.to_path_buf(); let rpc_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -151,7 +142,8 @@ async fn restart_store( let block_producer_listener = TcpListener::bind(store_addr) .await .expect("store should bind the block-producer port"); - let dir = data_directory.to_path_buf(); + + // Use a separate runtime so we can kill all store tasks later let store_runtime = runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); store_runtime.spawn(async move { @@ -159,8 +151,9 @@ async fn restart_store( rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover_url: None, data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + grpc_timeout: Duration::from_secs(30), } .serve() .await @@ -169,32 +162,17 @@ async fn restart_store( store_runtime } -/// Creates a dummy transaction and submits it to the block producer. -async fn send_request( +/// Shuts down the store runtime properly to allow the database to flush before the temp directory +/// is deleted. +async fn shutdown_store(store_runtime: runtime::Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Sends a status request to the block producer to verify connectivity. +async fn send_status_request( mut client: block_producer_client::ApiClient, - i: u8, -) -> Result, tonic::Status> -{ - let tx = ProvenTransactionBuilder::new( - AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Private, - ), - Digest::default(), - [i; 32].try_into().unwrap(), - Digest::default(), - 0.into(), - Digest::default(), - u32::MAX.into(), - ExecutionProof::new(Proof::new_dummy(), HashFunction::default()), - ) - .build() - .unwrap(); - let request = proto::transaction::ProvenTransaction { - transaction: tx.to_bytes(), - transaction_replay: None, - }; - client.submit_proven_transaction(request).await +) -> Result, tonic::Status> { + client.status(()).await } diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index a82a60582d..fb20bc160e 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -10,7 +10,8 @@ use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, SignedBlock}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::Serializable; @@ -238,8 +239,15 @@ impl StoreClient { } #[instrument(target = COMPONENT, name = "store.client.apply_block", skip_all, err)] - pub async fn apply_block(&self, block: &ProvenBlock) -> Result<(), StoreError> { - let request = tonic::Request::new(proto::blockchain::Block { block: block.to_bytes() }); + pub async fn apply_block( + &self, + ordered_batches: &OrderedBatches, + signed_block: &SignedBlock, + ) -> Result<(), StoreError> { + let request = tonic::Request::new(proto::store::ApplyBlockRequest { + ordered_batches: ordered_batches.to_bytes(), + block: Some(signed_block.into()), + }); self.client.clone().apply_block(request).await.map(|_| ()).map_err(Into::into) } diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index ecbd215863..ca705e241e 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -66,7 +66,7 @@ impl TransactionBatchConstructor for ProvenBatch { account_updates, InputNotes::new_unchecked(input_notes), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked( txs.into_iter().map(TransactionHeader::from).collect(), ), diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml new file mode 100644 index 0000000000..2a42af4305 --- /dev/null +++ b/crates/db/Cargo.toml @@ -0,0 +1,23 @@ +[package] +authors.workspace = true +description = "Shared database capabilities for Miden node" +edition.workspace = true +homepage.workspace = true +keywords = ["database", "miden", "node"] +license.workspace = true +name = "miden-node-db" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +deadpool = { default-features = false, workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { default-features = false, workspace = true } +diesel = { features = ["sqlite"], workspace = true } +miden-protocol = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } diff --git a/crates/db/src/conv.rs b/crates/db/src/conv.rs new file mode 100644 index 0000000000..64c853c73d --- /dev/null +++ b/crates/db/src/conv.rs @@ -0,0 +1,183 @@ +//! Central place to define conversion from and to database primitive types +//! +//! Eventually, all of them should have types and we can implement a trait for them +//! rather than function pairs. +//! +//! Notice: All of them are infallible. The invariant is a sane content of the database +//! and humans ensure the sanity of casts. +//! +//! Notice: Keep in mind if you _need_ to expand the datatype, only if you require sorting this is +//! mandatory! +//! +//! Notice: Ensure you understand what casting does at the bit-level before changing any. +//! +//! Notice: Changing any of these are _backwards-incompatible_ changes that are not caught/covered +//! by migrations! + +#![expect( + clippy::inline_always, + reason = "Just unification helpers of 1-2 lines of casting types" +)] +#![expect( + dead_code, + reason = "Not all converters are used bidirectionally, however, keeping them is a good thing" +)] +#![expect( + clippy::cast_sign_loss, + reason = "This is the one file where we map the signed database types to the working types" +)] +#![expect( + clippy::cast_possible_wrap, + reason = "We will not approach the item count where i64 and usize casting will cause issues + on relevant platforms" +)] + +use miden_protocol::Felt; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteTag; + +#[derive(Debug, thiserror::Error)] +#[error("failed to convert from database type {from_type} into {into_type}")] +pub struct DatabaseTypeConversionError { + source: Box, + from_type: &'static str, + into_type: &'static str, +} + +/// Convert from and to it's database representation and back +/// +/// We do not assume sanity of DB types. +pub trait SqlTypeConvert: Sized { + type Raw: Sized; + + fn to_raw_sql(self) -> Self::Raw; + fn from_raw_sql(_raw: Self::Raw) -> Result; + + fn map_err( + source: E, + ) -> DatabaseTypeConversionError { + DatabaseTypeConversionError { + source: Box::new(source), + from_type: std::any::type_name::(), + into_type: std::any::type_name::(), + } + } +} + +impl SqlTypeConvert for BlockNumber { + type Raw = i64; + + fn from_raw_sql(raw: Self::Raw) -> Result { + u32::try_from(raw).map(BlockNumber::from).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + i64::from(self.as_u32()) + } +} + +impl SqlTypeConvert for NoteTag { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[expect(clippy::cast_sign_loss)] + Ok(NoteTag::new(raw as u32)) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + self.as_u32() as i32 + } +} + +impl SqlTypeConvert for StorageSlotType { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + + Ok(match raw { + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, + }) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + match self { + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, + } + } +} + +impl SqlTypeConvert for StorageSlotName { + type Raw = String; + + fn from_raw_sql(raw: Self::Raw) -> Result { + StorageSlotName::new(raw).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + String::from(self) + } +} + +// Raw type conversions - eventually introduce wrapper types +// =========================================================== + +#[inline(always)] +pub(crate) fn raw_sql_to_nullifier_prefix(raw: i32) -> u16 { + debug_assert!(raw >= 0); + raw as u16 +} +#[inline(always)] +pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { + i32::from(prefix) +} + +#[inline(always)] +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { + debug_assert!(raw >= 0); + Felt::new(raw as u64) +} +#[inline(always)] +pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { + nonce.as_int() as i64 +} + +#[inline(always)] +pub(crate) fn raw_sql_to_fungible_delta(raw: i64) -> i64 { + raw +} +#[inline(always)] +pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { + delta +} + +#[inline(always)] +#[expect(clippy::cast_sign_loss)] +pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { + raw as u8 +} +#[inline(always)] +pub(crate) fn note_type_to_raw_sql(note_type: u8) -> i32 { + i32::from(note_type) +} + +#[inline(always)] +pub(crate) fn raw_sql_to_idx(raw: i32) -> usize { + raw as usize +} +#[inline(always)] +pub(crate) fn idx_to_raw_sql(idx: usize) -> i32 { + idx as i32 +} diff --git a/crates/db/src/errors.rs b/crates/db/src/errors.rs new file mode 100644 index 0000000000..5e59ff4b9a --- /dev/null +++ b/crates/db/src/errors.rs @@ -0,0 +1,98 @@ +use std::any::type_name; +use std::io; + +use deadpool_sync::InteractError; +use thiserror::Error; + +// SCHEMA VERIFICATION ERROR +// ================================================================================================= + +/// Errors that can occur during schema verification. +#[derive(Debug, Error)] +pub enum SchemaVerificationError { + #[error("failed to create in-memory reference database")] + InMemoryDbCreation(#[source] diesel::ConnectionError), + #[error("failed to apply migrations to reference database")] + MigrationApplication(#[source] Box), + #[error("failed to extract schema from database")] + SchemaExtraction(#[source] diesel::result::Error), + #[error( + "schema mismatch: expected {expected_count} objects, found {actual_count} \ + ({missing_count} missing, {extra_count} unexpected)" + )] + Mismatch { + expected_count: usize, + actual_count: usize, + missing_count: usize, + extra_count: usize, + }, +} + +// DATABASE ERROR +// ================================================================================================= + +#[derive(Debug, Error)] +pub enum DatabaseError { + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("setup deadpool connection pool failed")] + ConnectionPoolObtainError(#[from] Box), + #[error("conversion from SQL to rust type {to} failed")] + ConversionSqlToRust { + #[source] + inner: Option>, + to: &'static str, + }, + #[error(transparent)] + Diesel(#[from] diesel::result::Error), + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), + #[error("I/O error")] + Io(#[from] io::Error), + #[error("pool build error")] + PoolBuild(#[from] deadpool::managed::BuildError), + #[error("Setup deadpool connection pool failed")] + Pool(#[from] deadpool::managed::PoolError), +} + +impl DatabaseError { + /// Converts from `InteractError` + /// + /// Note: Required since `InteractError` has at least one enum + /// variant that is _not_ `Send + Sync` and hence prevents the + /// `Sync` auto implementation. + /// This does an internal conversion to string while maintaining + /// convenience. + /// + /// Using `MSG` as const so it can be called as + /// `.map_err(DatabaseError::interact::<"Your message">)` + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } + + /// Failed to convert an SQL entry to a rust representation + pub fn conversiont_from_sql(err: MaybeE) -> DatabaseError + where + MaybeE: Into>, + E: std::error::Error + Send + Sync + 'static, + { + DatabaseError::ConversionSqlToRust { + inner: err.into().map(|err| Box::new(err) as Box), + to: type_name::(), + } + } + + /// Creates a deserialization error with a static context string and the original error. + /// + /// This is a convenience wrapper around [`ConversionSqlToRust`](Self::ConversionSqlToRust). + pub fn deserialization( + context: &'static str, + source: impl std::error::Error + Send + Sync + 'static, + ) -> Self { + Self::ConversionSqlToRust { + inner: Some(Box::new(source)), + to: context, + } + } +} diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs new file mode 100644 index 0000000000..7000f131d1 --- /dev/null +++ b/crates/db/src/lib.rs @@ -0,0 +1,77 @@ +mod conv; +mod errors; +mod manager; + +use std::path::Path; + +pub use conv::{DatabaseTypeConversionError, SqlTypeConvert}; +use diesel::{RunQueryDsl, SqliteConnection}; +pub use errors::{DatabaseError, SchemaVerificationError}; +pub use manager::{ConnectionManager, ConnectionManagerError, configure_connection_on_creation}; +use tracing::Instrument; + +pub type Result = std::result::Result; + +/// Database handle that provides fundamental operations that various components of Miden Node can +/// utililze for their storage needs. +#[derive(Clone)] +pub struct Db { + pool: deadpool_diesel::Pool>, +} + +impl Db { + /// Creates a new database instance with the provided connection pool. + pub fn new(database_filepath: &Path) -> Result { + let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); + let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; + Ok(Self { pool }) + } + + /// Create and commit a transaction with the queries added in the provided closure + pub async fn transact(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result + + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .in_current_span() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) + .in_current_span() + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Run the query _without_ a transaction + pub async fn query(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(move |conn| { + let r = query(conn)?; + Ok(r) + }) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } +} diff --git a/crates/store/src/db/manager.rs b/crates/db/src/manager.rs similarity index 85% rename from crates/store/src/db/manager.rs rename to crates/db/src/manager.rs index fca9a33db6..c34e7a15e9 100644 --- a/crates/store/src/db/manager.rs +++ b/crates/db/src/manager.rs @@ -36,12 +36,12 @@ impl ConnectionManagerError { /// Create a connection manager with per-connection setup /// /// Particularly, `foreign_key` checks are enabled and using a write-append-log for journaling. -pub(crate) struct ConnectionManager { +pub struct ConnectionManager { pub(crate) manager: deadpool_diesel::sqlite::Manager, } impl ConnectionManager { - pub(crate) fn new(database_path: &str) -> Self { + pub fn new(database_path: &str) -> Self { let manager = deadpool_diesel::sqlite::Manager::new( database_path.to_owned(), deadpool_diesel::sqlite::Runtime::Tokio1, @@ -75,9 +75,14 @@ impl deadpool::managed::Manager for ConnectionManager { } } -pub(crate) fn configure_connection_on_creation( +pub fn configure_connection_on_creation( conn: &mut SqliteConnection, ) -> Result<(), ConnectionManagerError> { + // Wait up to 3 seconds for writer locks before erroring. + diesel::sql_query("PRAGMA busy_timeout=3000") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + // Enable the WAL mode. This allows concurrent reads while the transaction is being written, // this is required for proper synchronization of the servers in-memory and on-disk // representations (see [State::apply_block]) @@ -89,5 +94,10 @@ pub(crate) fn configure_connection_on_creation( diesel::sql_query("PRAGMA foreign_keys=ON") .execute(conn) .map_err(ConnectionManagerError::ConnectionParamSetup)?; + + // Set busy timeout so concurrent writers wait instead of immediately failing. + diesel::sql_query("PRAGMA busy_timeout=5000") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; Ok(()) } diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 4fd7ff0017..0c30970a09 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -15,18 +15,21 @@ workspace = true [dependencies] anyhow = { workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } -indexmap = { workspace = true } libsqlite3-sys = { workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["tx-prover"], workspace = true } miden-tx = { default-features = true, workspace = true } +prost = { workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } -tokio-util = { version = "0.7" } +tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } @@ -35,8 +38,10 @@ url = { workspace = true } miden-node-test-macro = { path = "../test-macro" } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } -miden-standards = { workspace = true } +miden-standards = { features = ["testing"], workspace = true } +rand_chacha = { workspace = true } rstest = { workspace = true } +tempfile = { version = "3.20" } [package.metadata.cargo-machete] ignored = ["libsqlite3-sys"] diff --git a/crates/ntx-builder/build.rs b/crates/ntx-builder/build.rs new file mode 100644 index 0000000000..881be3168f --- /dev/null +++ b/crates/ntx-builder/build.rs @@ -0,0 +1,11 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `src/db/migrations.rs` to include the latest version of the migrations into the binary, see +// . + +fn main() { + println!("cargo:rerun-if-changed=./src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + println!("cargo:rerun-if-changed=Cargo.toml"); +} diff --git a/crates/ntx-builder/diesel.toml b/crates/ntx-builder/diesel.toml new file mode 100644 index 0000000000..71215dbf76 --- /dev/null +++ b/crates/ntx-builder/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/ntx-builder/src/actor/account_effect.rs b/crates/ntx-builder/src/actor/account_effect.rs new file mode 100644 index 0000000000..7a6acf0058 --- /dev/null +++ b/crates/ntx-builder/src/actor/account_effect.rs @@ -0,0 +1,42 @@ +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta, AccountId}; + +// NETWORK ACCOUNT EFFECT +// ================================================================================================ + +/// Represents the effect of a transaction on a network account. +#[derive(Clone)] +pub enum NetworkAccountEffect { + Created(Account), + Updated(AccountDelta), +} + +impl NetworkAccountEffect { + pub fn from_protocol(update: &AccountUpdateDetails) -> Option { + let update = match update { + AccountUpdateDetails::Private => return None, + AccountUpdateDetails::Delta(update) if update.is_full_state() => { + NetworkAccountEffect::Created( + Account::try_from(update) + .expect("Account should be derivable by full state AccountDelta"), + ) + }, + AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), + }; + + update.protocol_account_id().is_network().then_some(update) + } + + pub fn network_account_id(&self) -> NetworkAccountId { + // SAFETY: This is a network account by construction. + self.protocol_account_id().try_into().unwrap() + } + + fn protocol_account_id(&self) -> AccountId { + match self { + NetworkAccountEffect::Created(acc) => acc.id(), + NetworkAccountEffect::Updated(delta) => delta.id(), + } + } +} diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index 25020c8b2d..753dfee8a6 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -1,23 +1,10 @@ -use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::num::NonZeroUsize; +use std::sync::Arc; -use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; -use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::account::Account; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::{BlockHeader, BlockNumber}; -use miden_protocol::note::{Note, Nullifier}; -use miden_protocol::transaction::{PartialBlockchain, TransactionId}; -use tracing::instrument; +use miden_protocol::block::BlockHeader; +use miden_protocol::transaction::PartialBlockchain; -use super::ActorShutdownReason; -use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; -use crate::COMPONENT; use crate::actor::inflight_note::InflightNetworkNote; -use crate::builder::ChainState; -use crate::store::{StoreClient, StoreError}; // TRANSACTION CANDIDATE // ================================================================================================ @@ -40,656 +27,7 @@ pub struct TransactionCandidate { pub chain_tip_header: BlockHeader, /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, -} - -// NETWORK ACCOUNT STATE -// ================================================================================================ - -/// The current state of a network account. -#[derive(Clone)] -pub struct NetworkAccountState { - /// The network account ID corresponding to the network account this state represents. - account_id: NetworkAccountId, - - /// Component of this state which Contains the committed and inflight account updates as well - /// as available and nullified notes. - account: NetworkAccountNoteState, - - /// Uncommitted transactions which have some impact on the network state. - /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. - inflight_txs: BTreeMap, - - /// Nullifiers of all network notes targeted at this account. - /// - /// Used to filter mempool events: when a `TransactionAdded` event reports consumed nullifiers, - /// only those present in this set are processed (moved from `available_notes` to - /// `nullified_notes`). Nullifiers are added when notes are loaded or created, and removed - /// when the consuming transaction is committed. - known_nullifiers: HashSet, -} - -impl NetworkAccountState { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 30; - - /// Load's all available network notes from the store, along with the required account states. - #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] - pub async fn load( - account: Account, - account_id: NetworkAccountId, - store: &StoreClient, - block_num: BlockNumber, - ) -> Result { - let notes = store.get_unconsumed_network_notes(account_id, block_num.as_u32()).await?; - let notes = notes - .into_iter() - .map(|note| { - let NetworkNote::SingleTarget(note) = note; - note - }) - .collect::>(); - - let known_nullifiers: HashSet = - notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - - let account = NetworkAccountNoteState::new(account, notes); - - let state = Self { - account, - account_id, - inflight_txs: BTreeMap::default(), - known_nullifiers, - }; - - state.inject_telemetry(); - - Ok(state) - } - - /// Selects the next candidate network transaction. - #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] - pub fn select_candidate( - &mut self, - limit: NonZeroUsize, - chain_state: ChainState, - ) -> Option { - // Remove notes that have failed too many times. - self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); - - // Skip empty accounts, and prune them. - // This is how we keep the number of accounts bounded. - if self.account.is_empty() { - return None; - } - - // Select notes from the account that can be consumed or are ready for a retry. - let notes = self - .account - .available_notes(&chain_state.chain_tip_header.block_num()) - .take(limit.get()) - .cloned() - .collect::>(); - - // Skip accounts with no available notes. - if notes.is_empty() { - return None; - } - - let (chain_tip_header, chain_mmr) = chain_state.into_parts(); - TransactionCandidate { - account: self.account.latest_account(), - notes, - chain_tip_header, - chain_mmr, - } - .into() - } - - /// Marks notes of a previously selected candidate as failed. /// - /// Does not remove the candidate from the in-progress pool. - #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] - pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { - let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - self.account.fail_notes(nullifiers.as_slice(), block_num); - } - - /// Updates state with the mempool event. - #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] - pub fn mempool_update(&mut self, update: &MempoolEvent) -> Option { - let span = tracing::Span::current(); - span.set_attribute("mempool_event.kind", update.kind()); - - match update { - MempoolEvent::TransactionAdded { - id, - nullifiers, - network_notes, - account_delta, - } => { - // Filter network notes relevant to this account. - let network_notes = filter_by_account_id_and_map_to_single_target( - self.account_id, - network_notes.clone(), - ); - self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); - }, - MempoolEvent::TransactionsReverted(txs) => { - for tx in txs { - let shutdown_reason = self.revert_transaction(*tx); - if shutdown_reason.is_some() { - return shutdown_reason; - } - } - }, - MempoolEvent::BlockCommitted { txs, .. } => { - for tx in txs { - self.commit_transaction(*tx); - } - }, - } - self.inject_telemetry(); - - // No shutdown, continue running actor. - None - } - - /// Handles a [`MempoolEvent::TransactionAdded`] event. - fn add_transaction( - &mut self, - id: TransactionId, - nullifiers: &[Nullifier], - network_notes: &[SingleTargetNetworkNote], - account_delta: Option<&AccountUpdateDetails>, - ) { - // Skip transactions we already know about. - // - // This can occur since both ntx builder and the mempool might inform us of the same - // transaction. Once when it was submitted to the mempool, and once by the mempool event. - if self.inflight_txs.contains_key(&id) { - return; - } - - let mut tx_impact = TransactionImpact::default(); - if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let account_id = update.network_account_id(); - if account_id == self.account_id { - match update { - NetworkAccountEffect::Updated(account_delta) => { - self.account.add_delta(&account_delta); - tx_impact.account_delta = Some(account_id); - }, - NetworkAccountEffect::Created(_) => {}, - } - } - } - for note in network_notes { - assert_eq!( - note.account_id(), - self.account_id, - "note's account ID does not match network account actor's account ID" - ); - tx_impact.notes.insert(note.nullifier()); - self.known_nullifiers.insert(note.nullifier()); - self.account.add_note(note.clone()); - } - for nullifier in nullifiers { - // Ignore nullifiers that aren't network note nullifiers. - if !self.known_nullifiers.contains(nullifier) { - continue; - } - tx_impact.nullifiers.insert(*nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _ = self.account.add_nullifier(*nullifier); - } - - if !tx_impact.is_empty() { - self.inflight_txs.insert(id, tx_impact); - } - } - - /// Handles [`MempoolEvent::BlockCommitted`] events. - fn commit_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(delta_account_id) = impact.account_delta { - if delta_account_id == self.account_id { - self.account.commit_delta(); - } - } - - for nullifier in impact.nullifiers { - if self.known_nullifiers.remove(&nullifier) { - // Its possible for the account to no longer exist if the transaction creating it - // was reverted. - self.account.commit_nullifier(nullifier); - } - } - } - - /// Handles [`MempoolEvent::TransactionsReverted`] events. - fn revert_transaction(&mut self, tx: TransactionId) -> Option { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - tracing::debug!("transaction {tx} not found in inflight transactions"); - return None; - }; - - // Revert account creation. - if let Some(account_id) = impact.account_delta { - // Account creation reverted, actor must stop. - if account_id == self.account_id && self.account.revert_delta() { - return Some(ActorShutdownReason::AccountReverted(account_id)); - } - } - - // Revert notes. - for note_nullifier in impact.notes { - if self.known_nullifiers.contains(¬e_nullifier) { - self.account.revert_note(note_nullifier); - self.known_nullifiers.remove(¬e_nullifier); - } - } - - // Revert nullifiers. - for nullifier in impact.nullifiers { - if self.known_nullifiers.contains(&nullifier) { - self.account.revert_nullifier(nullifier); - self.known_nullifiers.remove(&nullifier); - } - } - - None - } - - /// Adds stats to the current tracing span. - /// - /// Note that these are only visible in the OpenTelemetry context, as conventional tracing - /// does not track fields added dynamically. - fn inject_telemetry(&self) { - let span = tracing::Span::current(); - - span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); - span.set_attribute("ntx.state.notes.total", self.known_nullifiers.len()); - } -} - -/// The impact a transaction has on the state. -#[derive(Clone, Default)] -struct TransactionImpact { - /// The network account this transaction added an account delta to. - account_delta: Option, - - /// Network notes this transaction created. - notes: BTreeSet, - - /// Network notes this transaction consumed. - nullifiers: BTreeSet, -} - -impl TransactionImpact { - fn is_empty(&self) -> bool { - self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() - } -} - -/// Filters network notes by account ID and maps them to single target network notes. -fn filter_by_account_id_and_map_to_single_target( - account_id: NetworkAccountId, - notes: Vec, -) -> Vec { - notes - .into_iter() - .filter_map(|note| match note { - NetworkNote::SingleTarget(note) if note.account_id() == account_id => Some(note), - NetworkNote::SingleTarget(_) => None, - }) - .collect::>() -} - -#[cfg(test)] -mod tests { - use std::collections::HashSet; - use std::sync::{Arc, Mutex}; - - use miden_protocol::account::{AccountBuilder, AccountStorageMode, AccountType}; - use miden_protocol::asset::{Asset, FungibleAsset}; - use miden_protocol::crypto::rand::RpoRandomCoin; - use miden_protocol::note::{Note, NoteAttachment, NoteExecutionHint, NoteType}; - use miden_protocol::testing::account_id::AccountIdBuilder; - use miden_protocol::transaction::TransactionId; - use miden_protocol::{EMPTY_WORD, Felt, Hasher}; - use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; - - use super::*; - - // HELPERS - // ============================================================================================ - - /// Creates a network account for testing. - fn create_network_account(seed: u8) -> Account { - use miden_protocol::testing::noop_auth_component::NoopAuthComponent; - use miden_standards::account::wallets::BasicWallet; - - AccountBuilder::new([seed; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Network) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .expect("should be able to build test account") - } - - /// Creates a faucet account ID for testing. - fn create_faucet_id(seed: u8) -> miden_protocol::account::AccountId { - AccountIdBuilder::new() - .account_type(AccountType::FungibleFaucet) - .storage_mode(AccountStorageMode::Public) - .build_with_seed([seed; 32]) - } - - /// Creates a note targeted at the given network account. - fn create_network_note( - target_account_id: miden_protocol::account::AccountId, - seed: u8, - ) -> Note { - let coin_seed: [u64; 4] = - [u64::from(seed), u64::from(seed) + 1, u64::from(seed) + 2, u64::from(seed) + 3]; - let rng = Arc::new(Mutex::new(RpoRandomCoin::new(coin_seed.map(Felt::new).into()))); - let mut rng = rng.lock().unwrap(); - - let faucet_id = create_faucet_id(seed.wrapping_add(100)); - - let target = NetworkAccountTarget::new(target_account_id, NoteExecutionHint::Always) - .expect("NetworkAccountTarget creation should succeed for network account"); - let attachment: NoteAttachment = target.into(); - - create_p2id_note( - target_account_id, - target_account_id, - vec![Asset::Fungible(FungibleAsset::new(faucet_id, 10).unwrap())], - NoteType::Public, - attachment, - &mut *rng, - ) - .expect("note creation should succeed") - } - - /// Creates a `SingleTargetNetworkNote` from a `Note`. - fn to_single_target_note(note: Note) -> SingleTargetNetworkNote { - SingleTargetNetworkNote::try_from(note).expect("should convert to SingleTargetNetworkNote") - } - - /// Creates a mock `TransactionId` for testing. - fn mock_tx_id(seed: u8) -> TransactionId { - TransactionId::new( - Hasher::hash(&[seed; 32]), - Hasher::hash(&[seed.wrapping_add(1); 32]), - EMPTY_WORD, - EMPTY_WORD, - ) - } - - /// Creates a mock `BlockHeader` for testing. - fn mock_block_header(block_num: u32) -> miden_protocol::block::BlockHeader { - use miden_node_utils::fee::test_fee_params; - use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; - - miden_protocol::block::BlockHeader::new( - 0, - EMPTY_WORD, - BlockNumber::from(block_num), - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - EMPTY_WORD, - SecretKey::new().public_key(), - test_fee_params(), - 0, - ) - } - - impl NetworkAccountState { - /// Creates a new `NetworkAccountState` for testing. - /// - /// This mirrors the behavior of `load()` but with provided notes instead of - /// fetching from the store. - #[cfg(test)] - pub fn new_for_testing( - account: Account, - account_id: NetworkAccountId, - notes: Vec, - ) -> Self { - let known_nullifiers: HashSet = - notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - - let account = NetworkAccountNoteState::new(account, notes); - - Self { - account, - account_id, - inflight_txs: BTreeMap::default(), - known_nullifiers, - } - } - } - - // TESTS - // ============================================================================================ - - /// Tests that initial notes loaded into `NetworkAccountState` have their nullifiers - /// registered in `known_nullifiers`. - #[test] - fn test_initial_notes_have_nullifiers_indexed() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let note2 = to_single_target_note(create_network_note(account_id, 2)); - let nullifier1 = note1.nullifier(); - let nullifier2 = note2.nullifier(); - - let state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - - assert!( - state.known_nullifiers.contains(&nullifier1), - "known_nullifiers should contain first note's nullifier" - ); - assert!( - state.known_nullifiers.contains(&nullifier2), - "known_nullifiers should contain second note's nullifier" - ); - assert_eq!( - state.known_nullifiers.len(), - 2, - "known_nullifiers should have exactly 2 entries" - ); - } - - /// Tests that when a `TransactionAdded` event arrives with nullifiers from initial notes, - /// those notes are properly moved from `available_notes` to `nullified_notes`. - #[test] - fn test_mempool_event_nullifies_initial_notes() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let note2 = to_single_target_note(create_network_note(account_id, 2)); - let nullifier1 = note1.nullifier(); - let nullifier2 = note2.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); - assert_eq!(available_count, 2, "both notes should be available initially"); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - - let shutdown = state.mempool_update(&event); - assert!(shutdown.is_none(), "mempool_update should not trigger shutdown"); - - let available_nullifiers: Vec<_> = state - .account - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - !available_nullifiers.contains(&nullifier1), - "note1 should no longer be available" - ); - assert!(available_nullifiers.contains(&nullifier2), "note2 should still be available"); - assert_eq!(available_nullifiers.len(), 1, "only one note should be available"); - - assert!( - state.inflight_txs.contains_key(&tx_id), - "transaction should be tracked in inflight_txs" - ); - } - - /// Tests that after committing a transaction, the nullifier is removed from `known_nullifiers`. - #[test] - fn test_commit_removes_nullifier_from_index() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let nullifier1 = note1.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1]); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - state.mempool_update(&event); - - assert!( - state.known_nullifiers.contains(&nullifier1), - "nullifier should still be in index while transaction is inflight" - ); - - let commit_event = MempoolEvent::BlockCommitted { - header: Box::new(mock_block_header(1)), - txs: vec![tx_id], - }; - state.mempool_update(&commit_event); - - assert!( - !state.known_nullifiers.contains(&nullifier1), - "nullifier should be removed from index after commit" - ); - } - - /// Tests that reverting a transaction restores the note to `available_notes`. - #[test] - fn test_revert_restores_note_to_available() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let note1 = to_single_target_note(create_network_note(account_id, 1)); - let nullifier1 = note1.nullifier(); - - let mut state = - NetworkAccountState::new_for_testing(account, network_account_id, vec![note1]); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![nullifier1], - network_notes: vec![], - account_delta: None, - }; - state.mempool_update(&event); - - // Verify note is not available - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); - assert_eq!(available_count, 0, "note should not be available after being consumed"); - - // Revert the transaction - let revert_event = - MempoolEvent::TransactionsReverted(HashSet::from_iter(std::iter::once(tx_id))); - state.mempool_update(&revert_event); - - // Verify note is available again - let available_nullifiers: Vec<_> = state - .account - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - available_nullifiers.contains(&nullifier1), - "note should be available again after revert" - ); - } - - /// Tests that nullifiers from dynamically added notes are also indexed. - #[test] - fn test_dynamically_added_notes_are_indexed() { - let account = create_network_account(1); - let account_id = account.id(); - let network_account_id = - NetworkAccountId::try_from(account_id).expect("should be a network account"); - - let mut state = NetworkAccountState::new_for_testing(account, network_account_id, vec![]); - - assert!(state.known_nullifiers.is_empty(), "known_nullifiers should be empty initially"); - - let new_note = to_single_target_note(create_network_note(account_id, 1)); - let new_nullifier = new_note.nullifier(); - - let tx_id = mock_tx_id(1); - let event = MempoolEvent::TransactionAdded { - id: tx_id, - nullifiers: vec![], - network_notes: vec![NetworkNote::SingleTarget(new_note)], - account_delta: None, - }; - - state.mempool_update(&event); - - // Verify the new note's nullifier is now indexed - assert!( - state.known_nullifiers.contains(&new_nullifier), - "dynamically added note's nullifier should be indexed" - ); - - // Verify the note is available - let available_nullifiers: Vec<_> = state - .account - .available_notes(&BlockNumber::from(0)) - .map(|n| n.to_inner().nullifier()) - .collect(); - assert!( - available_nullifiers.contains(&new_nullifier), - "dynamically added note should be available" - ); - } + /// Wrapped in `Arc` to avoid expensive clones when reading the chain state. + pub chain_mmr: Arc, } diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index edcf58c07e..09658cd233 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -31,7 +31,7 @@ use miden_protocol::transaction::{ TransactionInputs, }; use miden_protocol::vm::FutureMaybeSend; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; use miden_tx::utils::Serializable; use miden_tx::{ @@ -55,6 +55,7 @@ use tracing::{Instrument, instrument}; use crate::COMPONENT; use crate::actor::account_state::TransactionCandidate; use crate::block_producer::BlockProducerClient; +use crate::db::Db; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -77,13 +78,19 @@ pub enum NtxError { type NtxResult = Result; +/// The result of a successful transaction execution. +/// +/// Contains the transaction ID, any notes that failed during filtering, and note scripts fetched +/// from the remote store that should be persisted to the local DB cache. +pub type NtxExecutionResult = (TransactionId, Vec, Vec<(Word, NoteScript)>); + // NETWORK TRANSACTION CONTEXT // ================================================================================================ /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - /// TODO(sergerad): Remove block producer client when block proving moved to store. + /// Client for submitting proven transactions to the Block Producer. block_producer: BlockProducerClient, /// Client for validating transactions via the Validator. @@ -100,6 +107,9 @@ pub struct NtxContext { /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + + /// Local database for persistent note script caching. + db: Db, } impl NtxContext { @@ -110,6 +120,7 @@ impl NtxContext { prover: Option, store: StoreClient, script_cache: LruCache, + db: Db, ) -> Self { Self { block_producer, @@ -117,6 +128,7 @@ impl NtxContext { prover, store, script_cache, + db, } } @@ -132,8 +144,9 @@ impl NtxContext { /// /// # Returns /// - /// On success, returns the [`TransactionId`] of the executed transaction and a list of - /// [`FailedNote`]s representing notes that were filtered out before execution. + /// On success, returns an [`NtxExecutionResult`] containing the transaction ID, any notes + /// that failed during filtering, and note scripts fetched from the remote store that should + /// be persisted to the local DB cache. /// /// # Errors /// @@ -146,7 +159,7 @@ impl NtxContext { pub fn execute_transaction( self, tx: TransactionCandidate, - ) -> impl FutureMaybeSend)>> { + ) -> impl FutureMaybeSend> { let TransactionCandidate { account, notes, @@ -168,6 +181,7 @@ impl NtxContext { chain_mmr, self.store.clone(), self.script_cache.clone(), + self.db.clone(), ); // Filter notes. @@ -178,6 +192,9 @@ impl NtxContext { // Execute transaction. let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; + // Collect scripts fetched from the remote store during execution. + let scripts_to_cache = data_store.take_fetched_scripts().await; + // Prove transaction. let tx_inputs: TransactionInputs = executed_tx.into(); let proven_tx = Box::pin(self.prove(&tx_inputs)).await?; @@ -188,7 +205,7 @@ impl NtxContext { // Submit transaction to block producer. self.submit(&proven_tx).await?; - Ok((proven_tx.id(), failed_notes)) + Ok((proven_tx.id(), failed_notes, scripts_to_cache)) }) .in_current_span() .await @@ -327,12 +344,18 @@ impl NtxContext { struct NtxDataStore { account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + /// The chain MMR, wrapped in `Arc` to avoid expensive clones when reading the chain state. + chain_mmr: Arc, mast_store: TransactionMastStore, /// Store client for retrieving note scripts. store: StoreClient, /// LRU cache for storing retrieved note scripts to avoid repeated store calls. script_cache: LruCache, + /// Local database for persistent note script. + db: Db, + /// Scripts fetched from the remote store during execution, to be persisted by the + /// coordinator. + fetched_scripts: Arc>>, /// Mapping of storage map roots to storage slot names observed during various calls. /// /// The registered slot names are subsequently used to retrieve storage map witnesses from the @@ -362,9 +385,10 @@ impl NtxDataStore { fn new( account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + chain_mmr: Arc, store: StoreClient, script_cache: LruCache, + db: Db, ) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(account.code()); @@ -376,10 +400,17 @@ impl NtxDataStore { mast_store, store, script_cache, + db, + fetched_scripts: Arc::new(Mutex::new(Vec::new())), storage_slots: Arc::new(Mutex::new(BTreeMap::default())), } } + /// Returns the list of note scripts fetched from the remote store during execution. + async fn take_fetched_scripts(&self) -> Vec<(Word, NoteScript)> { + self.fetched_scripts.lock().await.drain(..).collect() + } + /// Registers storage map slot names for the given account ID and storage header. /// /// These slot names are subsequently used to query for storage map witnesses against the store. @@ -421,7 +452,7 @@ impl DataStore for NtxDataStore { .await; let partial_account = PartialAccount::from(&self.account); - Ok((partial_account, self.reference_block.clone(), self.chain_mmr.clone())) + Ok((partial_account, self.reference_block.clone(), (*self.chain_mmr).clone())) } } @@ -506,28 +537,40 @@ impl DataStore for NtxDataStore { /// Retrieves a note script by its root hash. /// - /// This implementation uses the configured RPC client to call the `GetNoteScriptByRoot` - /// endpoint on the RPC server. + /// Uses a 3-tier lookup strategy: + /// 1. In-memory LRU cache. + /// 2. Local SQLite database. + /// 3. Remote store via gRPC. fn get_note_script( &self, script_root: Word, ) -> impl FutureMaybeSend, DataStoreError>> { async move { - // Attempt to retrieve the script from the cache. + // 1. In-memory LRU cache. if let Some(cached_script) = self.script_cache.get(&script_root).await { return Ok(Some(cached_script)); } - // Retrieve the script from the store. + // 2. Local DB. + if let Some(script) = self.db.lookup_note_script(script_root).await.map_err(|err| { + DataStoreError::other_with_source("failed to look up note script in local DB", err) + })? { + self.script_cache.put(script_root, script.clone()).await; + return Ok(Some(script)); + } + + // 3. Remote store. let maybe_script = self.store.get_note_script_by_root(script_root).await.map_err(|err| { - DataStoreError::Other { - error_msg: "failed to retrieve note script from store".to_string().into(), - source: Some(err.into()), - } + DataStoreError::other_with_source( + "failed to retrieve note script from store", + err, + ) })?; - // Handle response. + if let Some(script) = maybe_script { + // Collect for later persistence by the coordinator. + self.fetched_scripts.lock().await.push((script_root, script.clone())); self.script_cache.put(script_root, script.clone()).await; Ok(Some(script)) } else { diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs index 23c7d06d72..4cc0808627 100644 --- a/crates/ntx-builder/src/actor/inflight_note.rs +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -29,6 +29,15 @@ impl InflightNetworkNote { } } + /// Reconstructs an inflight network note from its constituent parts (e.g., from DB rows). + pub fn from_parts( + note: SingleTargetNetworkNote, + attempt_count: usize, + last_attempt: Option, + ) -> Self { + Self { note, attempt_count, last_attempt } + } + /// Consumes the inflight network note and returns the inner network note. pub fn into_inner(self) -> SingleTargetNetworkNote { self.note diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index ae8f63629e..ecb72552be 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -1,12 +1,13 @@ +pub(crate) mod account_effect; pub mod account_state; mod execute; -mod inflight_note; -mod note_state; +pub(crate) mod inflight_note; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; -use account_state::{NetworkAccountState, TransactionCandidate}; +use account_state::TransactionCandidate; use futures::FutureExt; use miden_node_proto::clients::{Builder, ValidatorClient}; use miden_node_proto::domain::account::NetworkAccountId; @@ -16,24 +17,38 @@ use miden_node_utils::lru_cache::LruCache; use miden_protocol::Word; use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; -use miden_protocol::note::NoteScript; +use miden_protocol::note::{Note, NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; use crate::block_producer::BlockProducerClient; use crate::builder::ChainState; +use crate::db::Db; use crate::store::StoreClient; +// ACTOR NOTIFICATION +// ================================================================================================ + +/// A notification sent from an account actor to the coordinator. +pub enum ActorNotification { + /// One or more notes failed during transaction execution and should have their attempt + /// counters incremented. + NotesFailed { + nullifiers: Vec, + block_num: BlockNumber, + }, + /// A note script was fetched from the remote store and should be persisted to the local DB. + CacheNoteScript { script_root: Word, script: NoteScript }, +} + // ACTOR SHUTDOWN REASON // ================================================================================================ /// The reason an actor has shut down. pub enum ActorShutdownReason { - /// Occurs when the transaction that created the actor is reverted. - AccountReverted(NetworkAccountId), /// Occurs when an account actor detects failure in the messaging channel used by the /// coordinator. EventChannelClosed, @@ -66,6 +81,14 @@ pub struct AccountActorContext { /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. /// This cache is shared across all account actors to maximize cache efficiency. pub script_cache: LruCache, + /// Maximum number of notes per transaction. + pub max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + pub max_note_attempts: usize, + /// Database for persistent state. + pub db: Db, + /// Channel for sending notifications to the coordinator (via the builder event loop). + pub notification_tx: mpsc::Sender, } // ACCOUNT ORIGIN @@ -127,10 +150,10 @@ enum ActorMode { /// /// ## Core Responsibilities /// -/// - **State Management**: Loads and maintains the current state of network accounts, including -/// available notes, pending transactions, and account commitments. +/// - **State Management**: Queries the database for the current state of network accounts, +/// including available notes and the latest account state. /// - **Transaction Selection**: Selects viable notes and constructs a [`TransactionCandidate`] -/// based on current chain state. +/// based on current chain state and DB queries. /// - **Transaction Execution**: Executes selected transactions using either local or remote /// proving. /// - **Mempool Integration**: Listens for mempool events to stay synchronized with the network @@ -138,11 +161,12 @@ enum ActorMode { /// /// ## Lifecycle /// -/// 1. **Initialization**: Loads account state from the store or uses provided account data. +/// 1. **Initialization**: Checks DB for available notes to determine initial mode. /// 2. **Event Loop**: Continuously processes mempool events and executes transactions. /// 3. **Transaction Processing**: Selects, executes, and proves transactions, and submits them to /// block producer. -/// 4. **State Updates**: Updates internal state based on mempool events and execution results. +/// 4. **State Updates**: Event effects are persisted to DB by the coordinator before actors are +/// notified. /// 5. **Shutdown**: Terminates gracefully when cancelled or encounters unrecoverable errors. /// /// ## Concurrency @@ -153,15 +177,21 @@ enum ActorMode { pub struct AccountActor { origin: AccountOrigin, store: StoreClient, + db: Db, mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, - // TODO(sergerad): Remove block producer when block proving moved to store. block_producer: BlockProducerClient, validator: ValidatorClient, prover: Option, chain_state: Arc>, script_cache: LruCache, + /// Maximum number of notes per transaction. + max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + max_note_attempts: usize, + /// Channel for sending notifications to the coordinator. + notification_tx: mpsc::Sender, } impl AccountActor { @@ -185,6 +215,7 @@ impl AccountActor { Self { origin, store: actor_context.store.clone(), + db: actor_context.db.clone(), mode: ActorMode::NoViableNotes, event_rx, cancel_token, @@ -193,29 +224,28 @@ impl AccountActor { prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), + max_notes_per_tx: actor_context.max_notes_per_tx, + max_note_attempts: actor_context.max_note_attempts, + notification_tx: actor_context.notification_tx.clone(), } } /// Runs the account actor, processing events and managing state until a reason to shutdown is /// encountered. pub async fn run(mut self, semaphore: Arc) -> ActorShutdownReason { - // Load the account state from the store and set up the account actor state. - let account = { - match self.origin { - AccountOrigin::Store(account_id) => self - .store - .get_network_account(account_id) - .await - .expect("actor should be able to load account") - .expect("actor account should exist"), - AccountOrigin::Transaction(ref account) => *(account.clone()), - } - }; + let account_id = self.origin.id(); + + // Determine initial mode by checking DB for available notes. let block_num = self.chain_state.read().await.chain_tip_header.block_num(); - let mut state = - NetworkAccountState::load(account, self.origin.id(), &self.store, block_num) - .await - .expect("actor should be able to load account state"); + let has_notes = self + .db + .has_available_notes(account_id, block_num, self.max_note_attempts) + .await + .expect("actor should be able to check for available notes"); + + if has_notes { + self.mode = ActorMode::NotesAvailable; + } loop { // Enable or disable transaction execution based on actor mode. @@ -229,28 +259,31 @@ impl AccountActor { }; tokio::select! { _ = self.cancel_token.cancelled() => { - return ActorShutdownReason::Cancelled(self.origin.id()); + return ActorShutdownReason::Cancelled(account_id); } // Handle mempool events. event = self.event_rx.recv() => { let Some(event) = event else { return ActorShutdownReason::EventChannelClosed; }; - // Re-enable transaction execution if the transaction being waited on has been - // added to the mempool. + // Re-enable transaction execution if the transaction being waited on has + // been resolved (added to mempool, committed in a block, or reverted). if let ActorMode::TransactionInflight(awaited_id) = self.mode { - if let MempoolEvent::TransactionAdded { id, .. } = *event { - if id == awaited_id { - self.mode = ActorMode::NotesAvailable; - } + let should_wake = match event.as_ref() { + MempoolEvent::TransactionAdded { id, .. } => *id == awaited_id, + MempoolEvent::BlockCommitted { txs, .. } => { + txs.contains(&awaited_id) + }, + MempoolEvent::TransactionsReverted(tx_ids) => { + tx_ids.contains(&awaited_id) + }, + }; + if should_wake { + self.mode = ActorMode::NotesAvailable; } } else { self.mode = ActorMode::NotesAvailable; } - // Update state. - if let Some(shutdown_reason) = state.mempool_update(event.as_ref()) { - return shutdown_reason; - } }, // Execute transactions. permit = tx_permit_acquisition => { @@ -258,9 +291,15 @@ impl AccountActor { Ok(_permit) => { // Read the chain state. let chain_state = self.chain_state.read().await.clone(); - // Find a candidate transaction and execute it. - if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { - self.execute_transactions(&mut state, tx_candidate).await; + + // Query DB for latest account and available notes. + let tx_candidate = self.select_candidate_from_db( + account_id, + chain_state, + ).await; + + if let Some(tx_candidate) = tx_candidate { + self.execute_transactions(account_id, tx_candidate).await; } else { // No transactions to execute, wait for events. self.mode = ActorMode::NoViableNotes; @@ -275,13 +314,44 @@ impl AccountActor { } } + /// Selects a transaction candidate by querying the DB. + async fn select_candidate_from_db( + &self, + account_id: NetworkAccountId, + chain_state: ChainState, + ) -> Option { + let block_num = chain_state.chain_tip_header.block_num(); + let max_notes = self.max_notes_per_tx.get(); + + let (latest_account, notes) = self + .db + .select_candidate(account_id, block_num, self.max_note_attempts) + .await + .expect("actor should be able to query DB for candidate"); + + let account = latest_account?; + + let notes: Vec<_> = notes.into_iter().take(max_notes).collect(); + if notes.is_empty() { + return None; + } + + let (chain_tip_header, chain_mmr) = chain_state.into_parts(); + Some(TransactionCandidate { + account, + notes, + chain_tip_header, + chain_mmr, + }) + } + /// Execute a transaction candidate and mark notes as failed as required. /// /// Updates the state of the actor based on the execution result. - #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, state, tx_candidate))] + #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, tx_candidate))] async fn execute_transactions( &mut self, - state: &mut NetworkAccountState, + account_id: NetworkAccountId, tx_candidate: TransactionCandidate, ) { let block_num = tx_candidate.chain_tip_header.block_num(); @@ -293,31 +363,58 @@ impl AccountActor { self.prover.clone(), self.store.clone(), self.script_cache.clone(), + self.db.clone(), ); let notes = tx_candidate.notes.clone(); let execution_result = context.execute_transaction(tx_candidate).await; match execution_result { // Execution completed without failed notes. - Ok((tx_id, failed)) if failed.is_empty() => { + Ok((tx_id, failed, scripts_to_cache)) if failed.is_empty() => { + self.cache_note_scripts(scripts_to_cache).await; self.mode = ActorMode::TransactionInflight(tx_id); }, // Execution completed with some failed notes. - Ok((tx_id, failed)) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(notes.as_slice(), block_num); + Ok((tx_id, failed, scripts_to_cache)) => { + self.cache_note_scripts(scripts_to_cache).await; + let nullifiers: Vec<_> = + failed.into_iter().map(|note| note.note.nullifier()).collect(); + self.mark_notes_failed(&nullifiers, block_num).await; self.mode = ActorMode::TransactionInflight(tx_id); }, // Transaction execution failed. Err(err) => { tracing::error!(err = err.as_report(), "network transaction failed"); self.mode = ActorMode::NoViableNotes; - let notes = - notes.into_iter().map(|note| note.into_inner().into()).collect::>(); - state.notes_failed(notes.as_slice(), block_num); + let nullifiers: Vec<_> = notes + .into_iter() + .map(|note| Note::from(note.into_inner()).nullifier()) + .collect(); + self.mark_notes_failed(&nullifiers, block_num).await; }, } } + + /// Sends notifications to the coordinator to cache note scripts fetched from the remote store. + async fn cache_note_scripts(&self, scripts: Vec<(Word, NoteScript)>) { + for (script_root, script) in scripts { + let _ = self + .notification_tx + .send(ActorNotification::CacheNoteScript { script_root, script }) + .await; + } + } + + /// Sends a notification to the coordinator to mark notes as failed. + async fn mark_notes_failed(&self, nullifiers: &[Nullifier], block_num: BlockNumber) { + let _ = self + .notification_tx + .send(ActorNotification::NotesFailed { + nullifiers: nullifiers.to_vec(), + block_num, + }) + .await; + } } // HELPERS @@ -334,7 +431,7 @@ impl AccountActor { /// - After 10 attempts, the backoff period is 12 blocks. /// - After 20 attempts, the backoff period is 148 blocks. /// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +#[expect(clippy::cast_precision_loss, clippy::cast_sign_loss)] fn has_backoff_passed( chain_tip: BlockNumber, last_attempt: Option, @@ -354,3 +451,34 @@ fn has_backoff_passed( // Check if the backoff period has passed. blocks_passed.as_usize() > backoff_threshold } + +#[cfg(test)] +mod tests { + use miden_protocol::block::BlockNumber; + + use super::has_backoff_passed; + + #[rstest::rstest] + #[test] + #[case::all_zero(Some(BlockNumber::GENESIS), BlockNumber::GENESIS, 0, true)] + #[case::no_attempts(None, BlockNumber::GENESIS, 0, true)] + #[case::one_attempt(Some(BlockNumber::GENESIS), BlockNumber::from(2), 1, true)] + #[case::three_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(3), 3, true)] + #[case::ten_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(13), 10, true)] + #[case::twenty_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(149), 20, true)] + #[case::one_attempt_false(Some(BlockNumber::GENESIS), BlockNumber::from(1), 1, false)] + #[case::three_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(2), 3, false)] + #[case::ten_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(12), 10, false)] + #[case::twenty_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(148), 20, false)] + fn backoff_has_passed( + #[case] last_attempt_block_num: Option, + #[case] current_block_num: BlockNumber, + #[case] attempt_count: usize, + #[case] backoff_should_have_passed: bool, + ) { + assert_eq!( + backoff_should_have_passed, + has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) + ); + } +} diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs deleted file mode 100644 index 87b91fc21a..0000000000 --- a/crates/ntx-builder/src/actor/note_state.rs +++ /dev/null @@ -1,258 +0,0 @@ -use std::collections::{HashMap, VecDeque}; - -use miden_node_proto::domain::account::NetworkAccountId; -use miden_node_proto::domain::note::SingleTargetNetworkNote; -use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::account::{Account, AccountDelta, AccountId}; -use miden_protocol::block::BlockNumber; -use miden_protocol::note::Nullifier; - -use crate::actor::inflight_note::InflightNetworkNote; - -// ACCOUNT STATE -// ================================================================================================ - -/// Tracks the state of a network account and its notes. -#[derive(Clone)] -pub struct NetworkAccountNoteState { - /// The committed account state, if any. - /// - /// Its possible this is `None` if the account creation transaction is still inflight. - committed: Option, - - /// Inflight account updates in chronological order. - inflight: VecDeque, - - /// Unconsumed notes of this account. - available_notes: HashMap, - - /// Notes which have been consumed by transactions that are still inflight. - nullified_notes: HashMap, -} - -impl NetworkAccountNoteState { - /// Creates a new account state from the supplied account and notes. - pub fn new(account: Account, notes: Vec) -> Self { - let account_id = NetworkAccountId::try_from(account.id()) - .expect("only network accounts are used for account state"); - - let mut state = Self { - committed: Some(account), - inflight: VecDeque::default(), - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), - }; - - for note in notes { - // Currently only support single target network notes in NTB. - assert!( - note.account_id() == account_id, - "Notes supplied into account state must match expected account ID" - ); - state.add_note(note); - } - - state - } - - /// Returns an iterator over inflight notes that are not currently within their respective - /// backoff periods based on block number. - pub fn available_notes( - &self, - block_num: &BlockNumber, - ) -> impl Iterator { - self.available_notes.values().filter(|¬e| note.is_available(*block_num)) - } - - /// Appends a delta to the set of inflight account updates. - pub fn add_delta(&mut self, delta: &AccountDelta) { - let mut state = self.latest_account(); - state - .apply_delta(delta) - .expect("network account delta should apply since it was accepted by the mempool"); - - self.inflight.push_back(state); - } - - /// Commits the oldest account state delta. - /// - /// # Panics - /// - /// Panics if there are no deltas to commit. - pub fn commit_delta(&mut self) { - self.committed = self.inflight.pop_front().expect("must have a delta to commit").into(); - } - - /// Reverts the newest account state delta. - /// - /// # Returns - /// - /// Returns `true` if this reverted the account creation delta. The caller _must_ remove this - /// account and associated notes as calls to `account` will panic. - /// - /// # Panics - /// - /// Panics if there are no deltas to revert. - #[must_use = "must remove this account and its notes"] - pub fn revert_delta(&mut self) -> bool { - self.inflight.pop_back().expect("must have a delta to revert"); - self.committed.is_none() && self.inflight.is_empty() - } - - /// Adds a new network note making it available for consumption. - pub fn add_note(&mut self, note: SingleTargetNetworkNote) { - self.available_notes.insert(note.nullifier(), InflightNetworkNote::new(note)); - } - - /// Removes the note completely. - pub fn revert_note(&mut self, note: Nullifier) { - // Transactions can be reverted out of order. - // - // This means the tx which nullified the note might not have been reverted yet, and the note - // might still be in the nullified - self.available_notes.remove(¬e); - self.nullified_notes.remove(¬e); - } - - /// Marks a note as being consumed. - /// - /// The note data is retained until the nullifier is committed. - /// - /// Returns `Err(())` if the note does not exist or was already nullified. - pub fn add_nullifier(&mut self, nullifier: Nullifier) -> Result<(), ()> { - if let Some(note) = self.available_notes.remove(&nullifier) { - self.nullified_notes.insert(nullifier, note); - Ok(()) - } else { - tracing::warn!(%nullifier, "note must be available to nullify"); - Err(()) - } - } - - /// Marks a nullifier as being committed, removing the associated note data entirely. - /// - /// Silently ignores the request if the nullifier is not present, which can happen - /// if the note's transaction wasn't available when the nullifier was added. - pub fn commit_nullifier(&mut self, nullifier: Nullifier) { - // we might not have this if we didn't add it with `add_nullifier` - // in case it's transaction wasn't available in the first place. - // It shouldn't happen practically, since we skip them if the - // relevant account cannot be retrieved via `fetch`. - - let _ = self.nullified_notes.remove(&nullifier); - } - - /// Reverts a nullifier, marking the associated note as available again. - pub fn revert_nullifier(&mut self, nullifier: Nullifier) { - // Transactions can be reverted out of order. - // - // The note may already have been fully removed by `revert_note` if the transaction creating - // the note was reverted before the transaction that consumed it. - if let Some(note) = self.nullified_notes.remove(&nullifier) { - self.available_notes.insert(nullifier, note); - } - } - - /// Drops all notes that have failed to be consumed after a certain number of attempts. - pub fn drop_failing_notes(&mut self, max_attempts: usize) { - self.available_notes.retain(|_, note| note.attempt_count() < max_attempts); - } - - /// Returns the latest inflight account state. - pub fn latest_account(&self) -> Account { - self.inflight - .back() - .or(self.committed.as_ref()) - .expect("account must have either a committed or inflight state") - .clone() - } - - /// Returns `true` if there is no inflight state being tracked. - /// - /// This implies this state is safe to remove without losing uncommitted data. - pub fn is_empty(&self) -> bool { - self.inflight.is_empty() - && self.available_notes.is_empty() - && self.nullified_notes.is_empty() - } - - /// Marks the specified notes as failed. - pub fn fail_notes(&mut self, nullifiers: &[Nullifier], block_num: BlockNumber) { - for nullifier in nullifiers { - if let Some(note) = self.available_notes.get_mut(nullifier) { - note.fail(block_num); - } else { - tracing::warn!(%nullifier, "failed note is not in account's state"); - } - } - } -} - -// NETWORK ACCOUNT UPDATE -// ================================================================================================ - -#[derive(Clone)] -pub enum NetworkAccountEffect { - Created(Account), - Updated(AccountDelta), -} - -impl NetworkAccountEffect { - pub fn from_protocol(update: &AccountUpdateDetails) -> Option { - let update = match update { - AccountUpdateDetails::Private => return None, - AccountUpdateDetails::Delta(update) if update.is_full_state() => { - NetworkAccountEffect::Created( - Account::try_from(update) - .expect("Account should be derivable by full state AccountDelta"), - ) - }, - AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), - }; - - update.protocol_account_id().is_network().then_some(update) - } - - pub fn network_account_id(&self) -> NetworkAccountId { - // SAFETY: This is a network account by construction. - self.protocol_account_id().try_into().unwrap() - } - - fn protocol_account_id(&self) -> AccountId { - match self { - NetworkAccountEffect::Created(acc) => acc.id(), - NetworkAccountEffect::Updated(delta) => delta.id(), - } - } -} - -#[cfg(test)] -mod tests { - use miden_protocol::block::BlockNumber; - - #[rstest::rstest] - #[test] - #[case::all_zero(Some(BlockNumber::from(0)), BlockNumber::from(0), 0, true)] - #[case::no_attempts(None, BlockNumber::from(0), 0, true)] - #[case::one_attempt(Some(BlockNumber::from(0)), BlockNumber::from(2), 1, true)] - #[case::three_attempts(Some(BlockNumber::from(0)), BlockNumber::from(3), 3, true)] - #[case::ten_attempts(Some(BlockNumber::from(0)), BlockNumber::from(13), 10, true)] - #[case::twenty_attempts(Some(BlockNumber::from(0)), BlockNumber::from(149), 20, true)] - #[case::one_attempt_false(Some(BlockNumber::from(0)), BlockNumber::from(1), 1, false)] - #[case::three_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(2), 3, false)] - #[case::ten_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(12), 10, false)] - #[case::twenty_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(148), 20, false)] - fn backoff_has_passed( - #[case] last_attempt_block_num: Option, - #[case] current_block_num: BlockNumber, - #[case] attempt_count: usize, - #[case] backoff_should_have_passed: bool, - ) { - use crate::actor::has_backoff_passed; - - assert_eq!( - backoff_should_have_passed, - has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) - ); - } -} diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index ce4d7b9c6a..53925bdcf8 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -62,7 +62,7 @@ impl BlockProducerClient { pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let mut retry_counter = 0; loop { match self.subscribe_to_mempool(chain_tip).await { @@ -90,7 +90,7 @@ impl BlockProducerClient { async fn subscribe_to_mempool( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let request = proto::block_producer::MempoolSubscriptionRequest { chain_tip: chain_tip.as_u32() }; let stream = self.client.clone().mempool_subscription(request).await?; diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 8b789779f7..20090c5b93 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -1,56 +1,64 @@ -use std::num::NonZeroUsize; +use std::pin::Pin; use std::sync::Arc; use anyhow::Context; -use futures::TryStreamExt; +use futures::Stream; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_utils::lru_cache::LruCache; -use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::BlockHeader; use miden_protocol::crypto::merkle::mmr::PartialMmr; -use miden_protocol::note::NoteScript; use miden_protocol::transaction::PartialBlockchain; use tokio::sync::{RwLock, mpsc}; -use url::Url; +use tokio_stream::StreamExt; +use tonic::Status; -use crate::MAX_IN_PROGRESS_TXS; -use crate::actor::{AccountActorContext, AccountOrigin}; -use crate::block_producer::BlockProducerClient; +use crate::NtxBuilderConfig; +use crate::actor::{AccountActorContext, AccountOrigin, ActorNotification}; use crate::coordinator::Coordinator; +use crate::db::Db; use crate::store::StoreClient; -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - // CHAIN STATE // ================================================================================================ /// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and -/// all account actors managed by the [`Coordinator`] +/// all account actors managed by the [`Coordinator`]. +/// +/// The chain MMR stored here contains: +/// - The MMR peaks. +/// - Block headers and authentication paths for the last [`NtxBuilderConfig::max_block_count`] +/// blocks. +/// +/// Authentication paths for older blocks are pruned because the NTX builder executes all notes as +/// "unauthenticated" (see [`InputNotes::from_unauthenticated_notes`]) and therefore does not need +/// to prove that input notes were created in specific past blocks. #[derive(Debug, Clone)] pub struct ChainState { /// The current tip of the chain. pub chain_tip_header: BlockHeader, - /// A partial representation of the latest state of the chain. - pub chain_mmr: PartialBlockchain, + /// A partial representation of the chain MMR. + /// + /// Contains block headers and authentication paths for the last + /// [`NtxBuilderConfig::max_block_count`] blocks only, since all notes are executed as + /// unauthenticated. + pub chain_mmr: Arc, } impl ChainState { /// Constructs a new instance of [`ChainState`]. - fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + pub(crate) fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { let chain_mmr = PartialBlockchain::new(chain_mmr, []) .expect("partial blockchain should build from partial mmr"); - Self { chain_tip_header, chain_mmr } + Self { + chain_tip_header, + chain_mmr: Arc::new(chain_mmr), + } } /// Consumes the chain state and returns the chain tip header and the partial blockchain as a /// tuple. - pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + pub fn into_parts(self) -> (BlockHeader, Arc) { (self.chain_tip_header, self.chain_mmr) } } @@ -58,103 +66,84 @@ impl ChainState { // NETWORK TRANSACTION BUILDER // ================================================================================================ +/// A boxed, pinned stream of mempool events with a `'static` lifetime. +/// +/// Boxing gives the stream a `'static` lifetime by ensuring it owns all its data, avoiding +/// complex lifetime annotations that would otherwise be required when storing `impl TryStream`. +pub(crate) type MempoolEventStream = + Pin> + Send>>; + /// Network transaction builder component. /// -/// The network transaction builder is in in charge of building transactions that consume notes +/// The network transaction builder is in charge of building transactions that consume notes /// against network accounts. These notes are identified and communicated by the block producer. /// The service maintains a list of unconsumed notes and periodically executes and proves /// transactions that consume them (reaching out to the store to retrieve state as necessary). /// /// The builder manages the tasks for every network account on the chain through the coordinator. +/// +/// Create an instance using [`NtxBuilderConfig::build()`]. pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the Validator server. - validator_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the performance impact. - tx_prover_url: Option, - /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. - /// This cache is shared across all account actors. - script_cache: LruCache, + /// Configuration for the builder. + config: NtxBuilderConfig, /// Coordinator for managing actor tasks. coordinator: Coordinator, + /// Client for the store gRPC API. + store: StoreClient, + /// Database for persistent state. + db: Db, + /// Shared chain state updated by the event loop and read by actors. + chain_state: Arc>, + /// Context shared with all account actors. + actor_context: AccountActorContext, + /// Stream of mempool events from the block producer. + mempool_events: MempoolEventStream, + /// Receiver for notifications from account actors (e.g., note failures). + notification_rx: mpsc::Receiver, } impl NetworkTransactionBuilder { - /// Channel capacity for account loading. - const ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; - - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - validator_url: Url, - tx_prover_url: Option, - script_cache_size: NonZeroUsize, + #[expect(clippy::too_many_arguments)] + pub(crate) fn new( + config: NtxBuilderConfig, + coordinator: Coordinator, + store: StoreClient, + db: Db, + chain_state: Arc>, + actor_context: AccountActorContext, + mempool_events: MempoolEventStream, + notification_rx: mpsc::Receiver, ) -> Self { - let script_cache = LruCache::new(script_cache_size); - let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); Self { - store_url, - block_producer_url, - validator_url, - tx_prover_url, - script_cache, + config, coordinator, + store, + db, + chain_state, + actor_context, + mempool_events, + notification_rx, } } - /// Runs the network transaction builder until a fatal error occurs. + /// Runs the network transaction builder event loop until a fatal error occurs. + /// + /// This method: + /// 1. Spawns a background task to load existing network accounts from the store + /// 2. Runs the main event loop, processing mempool events and managing actors + /// + /// # Errors + /// + /// Returns an error if: + /// - The mempool event stream ends unexpectedly + /// - An actor encounters a fatal error + /// - The account loader task fails pub async fn run(mut self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url.clone()); - let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); - - // Loop until we successfully subscribe. - // - // The mempool rejects our subscription if we don't have the same view of the chain aka - // if our chain tip does not match the mempools. This can occur if a new block is committed - // _after_ we fetch the chain tip from the store but _before_ our subscription request is - // handled. - // - // This is a hack-around for https://github.com/0xMiden/miden-node/issues/1566. - let (chain_tip_header, chain_mmr, mut mempool_events) = loop { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - match block_producer - .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) - .await - { - Ok(subscription) => break (chain_tip_header, chain_mmr, subscription), - Err(status) if status.code() == tonic::Code::InvalidArgument => { - tracing::error!(err=%status, "mempool subscription failed due to desync, trying again"); - }, - Err(err) => return Err(err).context("failed to subscribe to mempool events"), - } - }; - - // Create chain state that will be updated by the coordinator and read by actors. - let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); - - let actor_context = AccountActorContext { - block_producer_url: self.block_producer_url.clone(), - validator_url: self.validator_url.clone(), - tx_prover_url: self.tx_prover_url.clone(), - chain_state: chain_state.clone(), - store: store.clone(), - script_cache: self.script_cache.clone(), - }; - // Spawn a background task to load network accounts from the store. - // Accounts are sent through a channel in batches and processed in the main event loop. + // Accounts are sent through a channel and processed in the main event loop. let (account_tx, mut account_rx) = - mpsc::channel::(Self::ACCOUNT_CHANNEL_CAPACITY); - let account_loader_store = store.clone(); + mpsc::channel::(self.config.account_channel_capacity); + let account_loader_store = self.store.clone(); let mut account_loader_handle = tokio::spawn(async move { account_loader_store .stream_network_account_ids(account_tx) @@ -162,7 +151,7 @@ impl NetworkTransactionBuilder { .context("failed to load network accounts from store") }); - // Main loop which manages actors and passes mempool events to them. + // Main event loop. loop { tokio::select! { // Handle actor result. @@ -170,22 +159,22 @@ impl NetworkTransactionBuilder { result?; }, // Handle mempool events. - event = mempool_events.try_next() => { + event = self.mempool_events.next() => { let event = event .context("mempool event stream ended")? .context("mempool event stream failed")?; - self.handle_mempool_event( - event.into(), - &actor_context, - chain_state.clone(), - ).await?; + self.handle_mempool_event(event.into()).await?; }, // Handle account batches loaded from the store. // Once all accounts are loaded, the channel closes and this branch // becomes inactive (recv returns None and we stop matching). Some(account_id) = account_rx.recv() => { - self.handle_loaded_account(account_id, &actor_context).await?; + self.handle_loaded_account(account_id).await?; + }, + // Handle actor notifications (DB writes delegated from actors). + Some(notification) = self.notification_rx.recv() => { + self.handle_actor_notification(notification).await; }, // Handle account loader task completion/failure. // If the task fails, we abort since the builder would be in a degraded state @@ -202,44 +191,71 @@ impl NetworkTransactionBuilder { } } - /// Handles a batch of account IDs loaded from the store by spawning actors for them. - #[tracing::instrument( - name = "ntx.builder.handle_loaded_accounts", - skip(self, account_id, actor_context) - )] + /// Handles account IDs loaded from the store by syncing state to DB and spawning actors. + #[tracing::instrument(name = "ntx.builder.handle_loaded_account", skip(self, account_id))] async fn handle_loaded_account( &mut self, account_id: NetworkAccountId, - actor_context: &AccountActorContext, ) -> Result<(), anyhow::Error> { + // Fetch account from store and write to DB. + let account = self + .store + .get_network_account(account_id) + .await + .context("failed to load account from store")? + .context("account should exist in store")?; + + let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let notes = self + .store + .get_unconsumed_network_notes(account_id, block_num.as_u32()) + .await + .context("failed to load notes from store")?; + + let notes: Vec<_> = notes + .into_iter() + .map(|n| { + let miden_node_proto::domain::note::NetworkNote::SingleTarget(note) = n; + note + }) + .collect(); + + // Write account and notes to DB. + self.db + .sync_account_from_store(account_id, account.clone(), notes.clone()) + .await + .context("failed to sync account to DB")?; + self.coordinator - .spawn_actor(AccountOrigin::store(account_id), actor_context) + .spawn_actor(AccountOrigin::store(account_id), &self.actor_context) .await?; Ok(()) } - /// Handles mempool events by sending them to actors via the coordinator and/or spawning new - /// actors as required. - #[tracing::instrument( - name = "ntx.builder.handle_mempool_event", - skip(self, event, actor_context, chain_state) - )] + /// Handles mempool events by writing to DB first, then routing to actors. + #[tracing::instrument(name = "ntx.builder.handle_mempool_event", skip(self, event))] async fn handle_mempool_event( &mut self, event: Arc, - actor_context: &AccountActorContext, - chain_state: Arc>, ) -> Result<(), anyhow::Error> { match event.as_ref() { MempoolEvent::TransactionAdded { account_delta, .. } => { + // Write event effects to DB first. + self.coordinator + .write_event(&event) + .await + .context("failed to write TransactionAdded to DB")?; + // Handle account deltas in case an account is being created. if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { // Handle account deltas for network accounts only. if let Some(network_account) = AccountOrigin::transaction(delta) { - // Spawn new actors if a transaction creates a new network account + // Spawn new actors if a transaction creates a new network account. let is_creating_account = delta.is_full_state(); if is_creating_account { - self.coordinator.spawn_actor(network_account, actor_context).await?; + self.coordinator + .spawn_actor(network_account, &self.actor_context) + .await?; } } } @@ -247,48 +263,70 @@ impl NetworkTransactionBuilder { Ok(()) }, // Update chain state and broadcast. - MempoolEvent::BlockCommitted { header, txs } => { - self.update_chain_tip(header.as_ref().clone(), chain_state).await; - self.coordinator.broadcast(event.clone()).await; + MempoolEvent::BlockCommitted { header, .. } => { + // Write event effects to DB first. + self.coordinator + .write_event(&event) + .await + .context("failed to write BlockCommitted to DB")?; - // All transactions pertaining to predating events should now be available through - // the store. So we can now drain them. - for tx_id in txs { - self.coordinator.drain_predating_events(tx_id); - } + self.update_chain_tip(header.as_ref().clone()).await; + self.coordinator.broadcast(event.clone()).await; Ok(()) }, // Broadcast to all actors. - MempoolEvent::TransactionsReverted(txs) => { + MempoolEvent::TransactionsReverted(_) => { + // Write event effects to DB first; returns reverted account IDs. + let reverted_accounts = self + .coordinator + .write_event(&event) + .await + .context("failed to write TransactionsReverted to DB")?; + self.coordinator.broadcast(event.clone()).await; - // Reverted predating transactions need not be processed. - for tx_id in txs { - self.coordinator.drain_predating_events(tx_id); + // Cancel actors for reverted account creations. + for account_id in &reverted_accounts { + self.coordinator.cancel_actor(account_id); } Ok(()) }, } } - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { - // Lock the chain state. - let mut chain_state = chain_state.write().await; + /// Processes a notification from an account actor by performing the corresponding DB write. + async fn handle_actor_notification(&mut self, notification: ActorNotification) { + match notification { + ActorNotification::NotesFailed { nullifiers, block_num } => { + if let Err(err) = self.db.notes_failed(nullifiers, block_num).await { + tracing::error!(err = %err, "failed to mark notes as failed"); + } + }, + ActorNotification::CacheNoteScript { script_root, script } => { + if let Err(err) = self.db.insert_note_script(script_root, &script).await { + tracing::error!(err = %err, "failed to cache note script"); + } + }, + } + } + + /// Updates the chain tip and prunes old blocks from the MMR. + async fn update_chain_tip(&mut self, tip: BlockHeader) { + let mut chain_state = self.chain_state.write().await; // Update MMR which lags by one block. let mmr_tip = chain_state.chain_tip_header.clone(); - chain_state.chain_mmr.add_block(&mmr_tip, true); + Arc::make_mut(&mut chain_state.chain_mmr).add_block(&mmr_tip, true); // Set the new tip. chain_state.chain_tip_header = tip; // Keep MMR pruned. - let pruned_block_height = - (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) - as u32; - chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + let pruned_block_height = (chain_state + .chain_mmr + .chain_length() + .as_usize() + .saturating_sub(self.config.max_block_count)) as u32; + Arc::make_mut(&mut chain_state.chain_mmr).prune_to(..pruned_block_height.into()); } } diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 285cee47af..a857bdc643 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -2,18 +2,18 @@ use std::collections::HashMap; use std::sync::Arc; use anyhow::Context; -use indexmap::IndexMap; +use miden_node_db::DatabaseError; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::NetworkNote; +use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::transaction::TransactionId; use tokio::sync::mpsc::error::SendError; use tokio::sync::{Semaphore, mpsc}; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use crate::actor::{AccountActor, AccountActorContext, AccountOrigin, ActorShutdownReason}; +use crate::db::Db; // ACTOR HANDLE // ================================================================================================ @@ -87,23 +87,23 @@ pub struct Coordinator { /// ensuring fair resource allocation and system stability under load. semaphore: Arc, - /// Cache of events received from the mempool that predate corresponding network accounts. - /// Grouped by network account ID to allow targeted event delivery to actors upon creation. - predating_events: HashMap>>, + /// Database for persistent state. + db: Db, + + /// Channel size for each actor's event channel. + actor_channel_size: usize, } impl Coordinator { - /// Maximum number of messages of the message channel for each actor. - const ACTOR_CHANNEL_SIZE: usize = 100; - /// Creates a new coordinator with the specified maximum number of inflight transactions - /// and shared script cache. - pub fn new(max_inflight_transactions: usize) -> Self { + /// and actor channel size. + pub fn new(max_inflight_transactions: usize, actor_channel_size: usize, db: Db) -> Self { Self { actor_registry: HashMap::new(), actor_join_set: JoinSet::new(), semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), - predating_events: HashMap::new(), + db, + actor_channel_size, } } @@ -122,28 +122,24 @@ impl Coordinator { // If an actor already exists for this account ID, something has gone wrong. if let Some(handle) = self.actor_registry.remove(&account_id) { - tracing::error!("account actor already exists for account: {}", account_id); + tracing::error!( + account_id = %account_id, + "Account actor already exists" + ); handle.cancel_token.cancel(); } - let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let (event_tx, event_rx) = mpsc::channel(self.actor_channel_size); let cancel_token = tokio_util::sync::CancellationToken::new(); let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); let handle = ActorHandle::new(event_tx, cancel_token); - // Run the actor. + // Run the actor. Actor reads state from DB on startup. let semaphore = self.semaphore.clone(); self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); - // Send the new actor any events that contain notes that predate account creation. - if let Some(predating_events) = self.predating_events.remove(&account_id) { - for event in predating_events.values() { - Self::send(&handle, event.clone()).await?; - } - } - self.actor_registry.insert(account_id, handle); - tracing::info!("created actor for account: {}", account_id); + tracing::info!(account_id = %account_id, "Created actor for account prefix"); Ok(()) } @@ -154,18 +150,21 @@ impl Coordinator { /// message channel and can process it accordingly. /// /// If an actor fails to receive the event, it will be canceled. + #[tracing::instrument(name = "ntx.coordinator.broadcast", skip_all, fields( + actor.count = self.actor_registry.len(), + event.kind = %event.kind() + ))] pub async fn broadcast(&mut self, event: Arc) { - tracing::debug!( - actor_count = self.actor_registry.len(), - "broadcasting event to all actors" - ); - let mut failed_actors = Vec::new(); // Send event to all actors. for (account_id, handle) in &self.actor_registry { if let Err(err) = Self::send(handle, event.clone()).await { - tracing::error!("failed to send event to actor {}: {}", account_id, err); + tracing::error!( + account_id = %account_id, + error = %err, + "Failed to send event to actor" + ); failed_actors.push(*account_id); } } @@ -192,12 +191,7 @@ impl Coordinator { ActorShutdownReason::Cancelled(account_id) => { // Do not remove the actor from the registry, as it may be re-spawned. // The coordinator should always remove actors immediately after cancellation. - tracing::info!("account actor cancelled: {}", account_id); - Ok(()) - }, - ActorShutdownReason::AccountReverted(account_id) => { - tracing::info!("account reverted: {}", account_id); - self.actor_registry.remove(&account_id); + tracing::info!(account_id = %account_id, "Account actor cancelled"); Ok(()) }, ActorShutdownReason::EventChannelClosed => { @@ -219,19 +213,15 @@ impl Coordinator { /// Sends a mempool event to all network account actors that are found in the corresponding /// transaction's notes. /// - /// Caches the mempool event for each network account found in the transaction's notes that does - /// not currently have a corresponding actor. If an actor does not exist for the account, it is - /// assumed that the account has not been created on the chain yet. - /// - /// Cached events will be fed to the corresponding actor when the account creation transaction - /// is processed. + /// Events are sent only to actors that are currently active. Since event effects are already + /// persisted in the DB by `write_event()`, actors that spawn later read their state from the + /// DB and do not need predating events. pub async fn send_targeted( &mut self, event: &Arc, ) -> Result<(), SendError>> { let mut target_actors = HashMap::new(); - if let MempoolEvent::TransactionAdded { id, network_notes, account_delta, .. } = - event.as_ref() + if let MempoolEvent::TransactionAdded { network_notes, account_delta, .. } = event.as_ref() { // We need to inform the account if it was updated. This lets it know that its own // transaction has been applied, and in the future also resolves race conditions with @@ -252,14 +242,7 @@ impl Coordinator { let NetworkNote::SingleTarget(note) = note; let network_account_id = note.account_id(); if let Some(actor) = self.actor_registry.get(&network_account_id) { - // Register actor as target. target_actors.insert(network_account_id, actor); - } else { - // Cache event for every note that doesn't have a corresponding actor. - self.predating_events - .entry(network_account_id) - .or_default() - .insert(*id, event.clone()); } } } @@ -270,16 +253,55 @@ impl Coordinator { Ok(()) } - /// Removes any cached events for a given transaction ID from all account caches. - pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { - // Remove the transaction from all account caches. - // This iterates over all predating events which is fine because the count is expected to be - // low. - self.predating_events.retain(|_, account_events| { - account_events.shift_remove(tx_id); - // Remove entries for accounts with no more cached events. - !account_events.is_empty() - }); + /// Writes mempool event effects to the database. + /// + /// This must be called BEFORE sending notifications to actors. For `TransactionsReverted`, + /// returns the list of account IDs whose creation was reverted. + pub async fn write_event( + &self, + event: &MempoolEvent, + ) -> Result, DatabaseError> { + match event { + MempoolEvent::TransactionAdded { + id, + nullifiers, + network_notes, + account_delta, + } => { + let notes: Vec = network_notes + .iter() + .map(|n| { + let NetworkNote::SingleTarget(note) = n; + note.clone() + }) + .collect(); + + self.db + .handle_transaction_added(*id, account_delta.clone(), notes, nullifiers.clone()) + .await?; + Ok(Vec::new()) + }, + MempoolEvent::BlockCommitted { header, txs } => { + self.db + .handle_block_committed( + txs.clone(), + header.block_num(), + header.as_ref().clone(), + ) + .await?; + Ok(Vec::new()) + }, + MempoolEvent::TransactionsReverted(tx_ids) => { + self.db.handle_transactions_reverted(tx_ids.iter().copied().collect()).await + }, + } + } + + /// Cancels an actor by its account ID. + pub fn cancel_actor(&mut self, account_id: &NetworkAccountId) { + if let Some(handle) = self.actor_registry.remove(account_id) { + handle.cancel_token.cancel(); + } } /// Helper function to send an event to a single account actor. diff --git a/crates/ntx-builder/src/db/migrations.rs b/crates/ntx-builder/src/db/migrations.rs new file mode 100644 index 0000000000..f3955cb2ad --- /dev/null +++ b/crates/ntx-builder/src/db/migrations.rs @@ -0,0 +1,29 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use miden_node_db::DatabaseError; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::schema_hash::verify_schema; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target: COMPONENT, migrations = migrations.len(), "Applying pending migrations"); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + // Migrations applied successfully, verify schema hash. + verify_schema(conn)?; + return Ok(()); + }; + tracing::warn!(target: COMPONENT, "Failed to apply migration: {e:?}"); + // Something went wrong; revert the last migration. + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql @@ -0,0 +1 @@ + diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql new file mode 100644 index 0000000000..68f3793d83 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql @@ -0,0 +1,71 @@ +-- Singleton row storing the chain tip header. +-- The chain MMR is reconstructed on startup from the store and maintained in memory. +CREATE TABLE chain_state ( + -- Singleton constraint: only one row allowed. + id INTEGER NOT NULL PRIMARY KEY CHECK (id = 0), + -- Block number of the chain tip. + block_num INTEGER NOT NULL, + -- Serialized BlockHeader. + block_header BLOB NOT NULL, + + CONSTRAINT chain_state_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) +); + +-- Account states: both committed and inflight. +-- Committed rows have transaction_id = NULL. Inflight rows have transaction_id set. +-- The auto-incrementing order_id preserves insertion order (VecDeque semantics). +CREATE TABLE accounts ( + -- Auto-incrementing ID preserves insertion order. + order_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + -- AccountId serialized bytes (8 bytes). + account_id BLOB NOT NULL, + -- Serialized Account state. + account_data BLOB NOT NULL, + -- NULL if this is the committed state; transaction ID if inflight. + transaction_id BLOB +); + +-- At most one committed row per account. +CREATE UNIQUE INDEX idx_accounts_committed ON accounts(account_id) WHERE transaction_id IS NULL; +-- At most one inflight row per (account, transaction) pair. +CREATE UNIQUE INDEX idx_accounts_inflight ON accounts(account_id, transaction_id) + WHERE transaction_id IS NOT NULL; +CREATE INDEX idx_accounts_account ON accounts(account_id); +CREATE INDEX idx_accounts_tx ON accounts(transaction_id) WHERE transaction_id IS NOT NULL; + +-- Notes: committed, inflight, and nullified — all in one table. +-- created_by = NULL means committed note; non-NULL means created by inflight tx. +-- consumed_by = NULL means unconsumed; non-NULL means consumed by inflight tx. +-- Row is deleted once consumption is committed. +CREATE TABLE notes ( + -- Nullifier bytes (32 bytes). Primary key. + nullifier BLOB PRIMARY KEY, + -- Target account ID. + account_id BLOB NOT NULL, + -- Serialized SingleTargetNetworkNote. + note_data BLOB NOT NULL, + -- Backoff tracking: number of failed execution attempts. + attempt_count INTEGER NOT NULL DEFAULT 0, + -- Backoff tracking: block number of the last failed attempt. NULL if never attempted. + last_attempt INTEGER, + -- NULL if the note came from a committed block; transaction ID if created by inflight tx. + created_by BLOB, + -- NULL if unconsumed; transaction ID of the consuming inflight tx. + consumed_by BLOB, + + CONSTRAINT notes_attempt_count_non_negative CHECK (attempt_count >= 0), + CONSTRAINT notes_last_attempt_is_u32 CHECK (last_attempt BETWEEN 0 AND 0xFFFFFFFF) +) WITHOUT ROWID; + +CREATE INDEX idx_notes_account ON notes(account_id); +CREATE INDEX idx_notes_created_by ON notes(created_by) WHERE created_by IS NOT NULL; +CREATE INDEX idx_notes_consumed_by ON notes(consumed_by) WHERE consumed_by IS NOT NULL; + +-- Persistent cache of note scripts, keyed by script root hash. +-- Survives restarts so scripts don't need to be re-fetched from the store. +CREATE TABLE note_scripts ( + -- Script root hash (Word serialized to 32 bytes). + script_root BLOB PRIMARY KEY, + -- Serialized NoteScript bytes. + script_data BLOB NOT NULL +) WITHOUT ROWID; diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs new file mode 100644 index 0000000000..47352e29ed --- /dev/null +++ b/crates/ntx-builder/src/db/mod.rs @@ -0,0 +1,220 @@ +use std::path::PathBuf; + +use anyhow::Context; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::Word; +use miden_protocol::account::Account; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{NoteScript, Nullifier}; +use miden_protocol::transaction::TransactionId; +use tracing::{info, instrument}; + +use crate::COMPONENT; +use crate::actor::inflight_note::InflightNetworkNote; +use crate::db::migrations::apply_migrations; +use crate::db::models::queries; + +pub(crate) mod models; + +mod migrations; +mod schema_hash; + +/// [diesel](https://diesel.rs) generated schema. +pub(crate) mod schema; + +pub type Result = std::result::Result; + +#[derive(Clone)] +pub struct Db { + inner: miden_node_db::Db, +} + +impl Db { + /// Creates and initializes the database, then opens an async connection pool. + #[instrument( + target = COMPONENT, + name = "ntx_builder.database.setup", + skip_all, + fields(path=%database_filepath.display()), + err, + )] + pub async fn setup(database_filepath: PathBuf) -> anyhow::Result { + let inner = miden_node_db::Db::new(&database_filepath) + .context("failed to build connection pool")?; + + info!( + target: COMPONENT, + sqlite = %database_filepath.display(), + "Connected to the database" + ); + + let me = Db { inner }; + me.inner + .query("migrations", apply_migrations) + .await + .context("failed to apply migrations on pool connection")?; + Ok(me) + } + + // PUBLIC QUERY METHODS + // ============================================================================================ + + /// Returns `true` if there are notes available for consumption by the given account. + pub async fn has_available_notes( + &self, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_attempts: usize, + ) -> Result { + self.inner + .query("has_available_notes", move |conn| { + let notes = queries::available_notes(conn, account_id, block_num, max_attempts)?; + Ok(!notes.is_empty()) + }) + .await + } + + /// Returns the latest account state and available notes for the given account. + pub async fn select_candidate( + &self, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_note_attempts: usize, + ) -> Result<(Option, Vec)> { + self.inner + .query("select_candidate", move |conn| { + let account = queries::get_account(conn, account_id)?; + let notes = + queries::available_notes(conn, account_id, block_num, max_note_attempts)?; + Ok((account, notes)) + }) + .await + } + + /// Marks notes as failed by incrementing `attempt_count` and setting `last_attempt`. + pub async fn notes_failed( + &self, + nullifiers: Vec, + block_num: BlockNumber, + ) -> Result<()> { + self.inner + .transact("notes_failed", move |conn| { + queries::notes_failed(conn, &nullifiers, block_num) + }) + .await + } + + /// Handles a `TransactionAdded` mempool event by writing effects to the DB. + pub async fn handle_transaction_added( + &self, + tx_id: TransactionId, + account_delta: Option, + notes: Vec, + nullifiers: Vec, + ) -> Result<()> { + self.inner + .transact("handle_transaction_added", move |conn| { + queries::add_transaction(conn, &tx_id, account_delta.as_ref(), ¬es, &nullifiers) + }) + .await + } + + /// Handles a `BlockCommitted` mempool event by committing transaction effects. + pub async fn handle_block_committed( + &self, + txs: Vec, + block_num: BlockNumber, + header: BlockHeader, + ) -> Result<()> { + self.inner + .transact("handle_block_committed", move |conn| { + queries::commit_block(conn, &txs, block_num, &header) + }) + .await + } + + /// Handles a `TransactionsReverted` mempool event by undoing transaction effects. + /// + /// Returns the list of account IDs whose creation was reverted. + pub async fn handle_transactions_reverted( + &self, + tx_ids: Vec, + ) -> Result> { + self.inner + .transact("handle_transactions_reverted", move |conn| { + queries::revert_transaction(conn, &tx_ids) + }) + .await + } + + /// Purges all inflight state. Called on startup to get a clean slate. + pub async fn purge_inflight(&self) -> Result<()> { + self.inner.transact("purge_inflight", queries::purge_inflight).await + } + + /// Inserts or replaces the singleton chain state row. + pub async fn upsert_chain_state( + &self, + block_num: BlockNumber, + header: BlockHeader, + ) -> Result<()> { + self.inner + .transact("upsert_chain_state", move |conn| { + queries::upsert_chain_state(conn, block_num, &header) + }) + .await + } + + /// Syncs an account and its notes from the store into the DB. + pub async fn sync_account_from_store( + &self, + account_id: NetworkAccountId, + account: Account, + notes: Vec, + ) -> Result<()> { + self.inner + .transact("sync_account_from_store", move |conn| { + queries::upsert_committed_account(conn, account_id, &account)?; + queries::insert_committed_notes(conn, ¬es)?; + Ok(()) + }) + .await + } + + /// Looks up a cached note script by root hash. + pub async fn lookup_note_script(&self, script_root: Word) -> Result> { + self.inner + .query("lookup_note_script", move |conn| { + queries::lookup_note_script(conn, &script_root) + }) + .await + } + + /// Persists a note script to the local cache. + pub async fn insert_note_script(&self, script_root: Word, script: &NoteScript) -> Result<()> { + let script = script.clone(); + self.inner + .transact("insert_note_script", move |conn| { + queries::insert_note_script(conn, &script_root, &script) + }) + .await + } + + /// Creates a file-backed SQLite test connection with migrations applied. + #[cfg(test)] + pub fn test_conn() -> (diesel::SqliteConnection, tempfile::TempDir) { + use diesel::{Connection, SqliteConnection}; + use miden_node_db::configure_connection_on_creation; + + let dir = tempfile::tempdir().expect("failed to create temp directory"); + let db_path = dir.path().join("test.sqlite3"); + let mut conn = SqliteConnection::establish(db_path.to_str().unwrap()) + .expect("temp file sqlite should always work"); + configure_connection_on_creation(&mut conn).expect("connection configuration should work"); + apply_migrations(&mut conn).expect("migrations should apply on empty database"); + (conn, dir) + } +} diff --git a/crates/ntx-builder/src/db/models/conv.rs b/crates/ntx-builder/src/db/models/conv.rs new file mode 100644 index 0000000000..26bb99868b --- /dev/null +++ b/crates/ntx-builder/src/db/models/conv.rs @@ -0,0 +1,90 @@ +//! Conversions between Miden domain types and database column types. + +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_node_proto::generated as proto; +use miden_protocol::Word; +use miden_protocol::account::{Account, AccountId}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, NoteScript, Nullifier}; +use miden_protocol::transaction::TransactionId; +use miden_tx::utils::{Deserializable, Serializable}; +use prost::Message; + +// SERIALIZATION (domain → DB) +// ================================================================================================ + +pub fn account_to_bytes(account: &Account) -> Vec { + account.to_bytes() +} + +pub fn block_header_to_bytes(header: &BlockHeader) -> Vec { + header.to_bytes() +} + +pub fn network_account_id_to_bytes(id: NetworkAccountId) -> Vec { + id.inner().to_bytes() +} + +pub fn transaction_id_to_bytes(id: &TransactionId) -> Vec { + id.to_bytes() +} + +pub fn nullifier_to_bytes(nullifier: &Nullifier) -> Vec { + nullifier.to_bytes() +} + +pub fn block_num_to_i64(block_num: BlockNumber) -> i64 { + i64::from(block_num.as_u32()) +} + +#[expect(clippy::cast_sign_loss)] +pub fn block_num_from_i64(val: i64) -> BlockNumber { + BlockNumber::from(val as u32) +} + +/// Serializes a `SingleTargetNetworkNote` to bytes using its protobuf representation. +pub fn single_target_note_to_bytes(note: &SingleTargetNetworkNote) -> Vec { + let proto_note: proto::note::NetworkNote = Note::from(note.clone()).into(); + proto_note.encode_to_vec() +} + +// DESERIALIZATION (DB → domain) +// ================================================================================================ + +pub fn account_from_bytes(bytes: &[u8]) -> Result { + Account::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("account", e)) +} + +pub fn account_id_from_bytes(bytes: &[u8]) -> Result { + AccountId::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("account id", e)) +} + +pub fn network_account_id_from_bytes(bytes: &[u8]) -> Result { + let account_id = account_id_from_bytes(bytes)?; + NetworkAccountId::try_from(account_id) + .map_err(|e| DatabaseError::deserialization("network account id", e)) +} + +/// Deserializes a `SingleTargetNetworkNote` from its protobuf byte representation. +pub fn single_target_note_from_bytes( + bytes: &[u8], +) -> Result { + let proto_note = proto::note::NetworkNote::decode(bytes) + .map_err(|e| DatabaseError::deserialization("network note proto", e))?; + SingleTargetNetworkNote::try_from(proto_note) + .map_err(|e| DatabaseError::deserialization("network note conversion", e)) +} + +pub fn word_to_bytes(word: &Word) -> Vec { + word.to_bytes() +} + +pub fn note_script_to_bytes(script: &NoteScript) -> Vec { + script.to_bytes() +} + +pub fn note_script_from_bytes(bytes: &[u8]) -> Result { + NoteScript::read_from_bytes(bytes).map_err(|e| DatabaseError::deserialization("note script", e)) +} diff --git a/crates/ntx-builder/src/db/models/mod.rs b/crates/ntx-builder/src/db/models/mod.rs new file mode 100644 index 0000000000..405fe08146 --- /dev/null +++ b/crates/ntx-builder/src/db/models/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod conv; + +pub mod queries; diff --git a/crates/ntx-builder/src/db/models/queries/accounts.rs b/crates/ntx-builder/src/db/models/queries/accounts.rs new file mode 100644 index 0000000000..833f60ed8c --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/accounts.rs @@ -0,0 +1,102 @@ +//! Account-related queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_protocol::account::Account; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +/// Row for inserting into the unified `accounts` table. +/// +/// `transaction_id = None` means committed; `Some(tx_id_bytes)` means inflight. +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::accounts)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct AccountInsert { + pub account_id: Vec, + pub account_data: Vec, + pub transaction_id: Option>, +} + +/// Row read from `accounts`. +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name = schema::accounts)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct AccountRow { + pub account_data: Vec, +} + +// QUERIES +// ================================================================================================ + +/// Inserts or replaces the committed account state (`transaction_id = NULL`). +/// +/// Deletes any existing committed row first, then inserts a fresh one. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM accounts WHERE account_id = ?1 AND transaction_id IS NULL +/// +/// INSERT INTO accounts (account_id, account_data, transaction_id) +/// VALUES (?1, ?2, NULL) +/// ``` +pub fn upsert_committed_account( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + account: &Account, +) -> Result<(), DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // Delete the existing committed row (if any). + diesel::delete( + schema::accounts::table + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::transaction_id.is_null()), + ) + .execute(conn)?; + + // Insert the new committed row. + let row = AccountInsert { + account_id: account_id_bytes, + account_data: conversions::account_to_bytes(account), + transaction_id: None, + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn)?; + Ok(()) +} + +/// Returns the latest account state: last inflight row (highest `order_id`), or committed if +/// none. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT account_data +/// FROM accounts +/// WHERE account_id = ?1 +/// ORDER BY order_id DESC +/// LIMIT 1 +/// ``` +pub fn get_account( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, +) -> Result, DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // ORDER BY order_id DESC returns the latest inflight first, then committed. + let row: Option = schema::accounts::table + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .order(schema::accounts::order_id.desc()) + .select(AccountRow::as_select()) + .first(conn) + .optional()?; + + row.map(|AccountRow { account_data, .. }| conversions::account_from_bytes(&account_data)) + .transpose() +} diff --git a/crates/ntx-builder/src/db/models/queries/chain_state.rs b/crates/ntx-builder/src/db/models/queries/chain_state.rs new file mode 100644 index 0000000000..9b529cadc5 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/chain_state.rs @@ -0,0 +1,46 @@ +//! Chain state queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_protocol::block::{BlockHeader, BlockNumber}; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::chain_state)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct ChainStateInsert { + /// Singleton row ID. Always `0` to satisfy the `CHECK (id = 0)` constraint. + pub id: i32, + pub block_num: i64, + pub block_header: Vec, +} + +// QUERIES +// ================================================================================================ + +/// Inserts or replaces the singleton chain state row. +/// +/// # Raw SQL +/// +/// ```sql +/// INSERT OR REPLACE INTO chain_state (id, block_num, block_header) +/// VALUES (0, ?1, ?2) +/// ``` +pub fn upsert_chain_state( + conn: &mut SqliteConnection, + block_num: BlockNumber, + block_header: &BlockHeader, +) -> Result<(), DatabaseError> { + let row = ChainStateInsert { + id: 0, + block_num: conversions::block_num_to_i64(block_num), + block_header: conversions::block_header_to_bytes(block_header), + }; + diesel::replace_into(schema::chain_state::table).values(&row).execute(conn)?; + Ok(()) +} diff --git a/crates/ntx-builder/src/db/models/queries/mod.rs b/crates/ntx-builder/src/db/models/queries/mod.rs new file mode 100644 index 0000000000..2ee11ee287 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/mod.rs @@ -0,0 +1,319 @@ +//! Database query functions for the NTX builder. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; + +use crate::actor::account_effect::NetworkAccountEffect; +use crate::db::models::conv as conversions; +use crate::db::schema; + +mod accounts; +pub use accounts::*; + +mod chain_state; +pub use chain_state::*; + +mod note_scripts; +pub use note_scripts::*; + +mod notes; +pub use notes::*; + +#[cfg(test)] +mod tests; + +// STARTUP QUERIES +// ================================================================================================ + +/// Purges all inflight state. Called on startup to get a clean state. +/// +/// - Deletes account rows with `transaction_id IS NOT NULL`. +/// - Deletes note rows with `created_by IS NOT NULL`. +/// - Sets `consumed_by = NULL` on notes consumed by inflight transactions. +/// +/// # Raw SQL +/// +/// ```sql +/// DELETE FROM accounts WHERE transaction_id IS NOT NULL +/// +/// DELETE FROM notes WHERE created_by IS NOT NULL +/// +/// UPDATE notes SET consumed_by = NULL WHERE consumed_by IS NOT NULL +/// ``` +pub fn purge_inflight(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + // Delete inflight account rows. + diesel::delete(schema::accounts::table.filter(schema::accounts::transaction_id.is_not_null())) + .execute(conn)?; + + // Delete inflight-created notes. + diesel::delete(schema::notes::table.filter(schema::notes::created_by.is_not_null())) + .execute(conn)?; + + // Un-nullify notes consumed by inflight transactions. + diesel::update(schema::notes::table.filter(schema::notes::consumed_by.is_not_null())) + .set(schema::notes::consumed_by.eq(None::>)) + .execute(conn)?; + + Ok(()) +} + +// MEMPOOL EVENT HANDLERS +// ================================================================================================ + +/// Handles a `TransactionAdded` event by writing effects to the DB. +/// +/// # Raw SQL +/// +/// For account updates (applies delta to latest state and inserts inflight row): +/// +/// ```sql +/// -- Fetch latest account (see latest_account) +/// INSERT INTO accounts (account_id, transaction_id, account_data) +/// VALUES (?1, ?2, ?3) +/// ``` +/// +/// Per note (idempotent via `INSERT OR IGNORE`): +/// +/// ```sql +/// INSERT OR IGNORE INTO notes +/// (nullifier, account_id, note_data, attempt_count, last_attempt, created_by, consumed_by) +/// VALUES (?1, ?2, ?3, 0, NULL, ?4, NULL) +/// ``` +/// +/// Per nullifier (marks notes as consumed): +/// +/// ```sql +/// UPDATE notes +/// SET consumed_by = ?1 +/// WHERE nullifier = ?2 AND consumed_by IS NULL +/// ``` +pub fn add_transaction( + conn: &mut SqliteConnection, + tx_id: &TransactionId, + account_delta: Option<&AccountUpdateDetails>, + notes: &[SingleTargetNetworkNote], + nullifiers: &[Nullifier], +) -> Result<(), DatabaseError> { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Process account delta. + if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { + let account_id = update.network_account_id(); + match update { + NetworkAccountEffect::Updated(ref account_delta) => { + // Query latest_account, apply delta, insert inflight row. + let current_account = + get_account(conn, account_id)?.expect("account must exist to apply delta"); + let mut updated = current_account; + updated.apply_delta(account_delta).expect( + "network account delta should apply since it was accepted by the mempool", + ); + + let insert = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(tx_id_bytes.clone()), + account_data: conversions::account_to_bytes(&updated), + }; + diesel::insert_into(schema::accounts::table).values(&insert).execute(conn)?; + }, + NetworkAccountEffect::Created(ref account) => { + let insert = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(tx_id_bytes.clone()), + account_data: conversions::account_to_bytes(account), + }; + diesel::insert_into(schema::accounts::table).values(&insert).execute(conn)?; + }, + } + } + + // Insert notes with created_by = tx_id. + // Uses INSERT OR IGNORE to make this idempotent if the same event is delivered twice + // (the nullifier PK would otherwise cause a constraint violation). + for note in notes { + let insert = NoteInsert { + nullifier: conversions::nullifier_to_bytes(¬e.nullifier()), + account_id: conversions::network_account_id_to_bytes(note.account_id()), + note_data: conversions::single_target_note_to_bytes(note), + attempt_count: 0, + last_attempt: None, + created_by: Some(tx_id_bytes.clone()), + consumed_by: None, + }; + diesel::insert_or_ignore_into(schema::notes::table) + .values(&insert) + .execute(conn)?; + } + + // Mark consumed notes: set consumed_by = tx_id for matching nullifiers. + for nullifier in nullifiers { + let nullifier_bytes = conversions::nullifier_to_bytes(nullifier); + + // Only mark notes that are not already consumed. + diesel::update( + schema::notes::table + .find(&nullifier_bytes) + .filter(schema::notes::consumed_by.is_null()), + ) + .set(schema::notes::consumed_by.eq(Some(&tx_id_bytes))) + .execute(conn)?; + } + + Ok(()) +} + +/// Handles a `BlockCommitted` event by committing transaction effects. +/// +/// # Raw SQL +/// +/// Per committed transaction: +/// +/// ```sql +/// -- Find inflight accounts for this tx +/// SELECT account_id FROM accounts WHERE transaction_id = ?1 +/// +/// -- Delete old committed row +/// DELETE FROM accounts WHERE account_id = ?1 AND transaction_id IS NULL +/// +/// -- Promote inflight row to committed +/// UPDATE accounts SET transaction_id = NULL +/// WHERE account_id = ?1 AND transaction_id = ?2 +/// +/// -- Delete consumed notes +/// DELETE FROM notes WHERE consumed_by = ?1 +/// +/// -- Promote inflight-created notes to committed +/// UPDATE notes SET created_by = NULL WHERE created_by = ?1 +/// ``` +/// +/// Finally updates chain state (see [`upsert_chain_state`]). +pub fn commit_block( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], + block_num: BlockNumber, + block_header: &BlockHeader, +) -> Result<(), DatabaseError> { + for tx_id in tx_ids { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Promote inflight account rows: delete old committed, set transaction_id = NULL. + // Find accounts that have an inflight row for this tx. + let inflight_account_ids: Vec> = schema::accounts::table + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)) + .select(schema::accounts::account_id) + .load(conn)?; + + for account_id_bytes in &inflight_account_ids { + // Delete the old committed row for this account. + diesel::delete( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .filter(schema::accounts::transaction_id.is_null()), + ) + .execute(conn)?; + + // Promote the inflight row to committed (set transaction_id = NULL). + // Only promote the row for this specific tx. + diesel::update( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)), + ) + .set(schema::accounts::transaction_id.eq(None::>)) + .execute(conn)?; + } + + // Delete consumed notes (consumed_by = tx_id). + diesel::delete(schema::notes::table.filter(schema::notes::consumed_by.eq(&tx_id_bytes))) + .execute(conn)?; + + // Promote inflight-created notes to committed (set created_by = NULL). + diesel::update(schema::notes::table.filter(schema::notes::created_by.eq(&tx_id_bytes))) + .set(schema::notes::created_by.eq(None::>)) + .execute(conn)?; + } + + // Update chain state. + upsert_chain_state(conn, block_num, block_header)?; + + Ok(()) +} + +/// Handles a `TransactionsReverted` event by undoing transaction effects. +/// +/// Returns the list of account IDs whose creation was reverted (no committed row exists for that +/// account after removing the inflight rows). +/// +/// # Raw SQL +/// +/// Per reverted transaction: +/// +/// ```sql +/// -- Find affected accounts +/// SELECT account_id FROM accounts WHERE transaction_id = ?1 +/// +/// -- Delete inflight account rows +/// DELETE FROM accounts WHERE transaction_id = ?1 +/// +/// -- Check if account creation was fully reverted +/// SELECT COUNT(*) FROM accounts WHERE account_id = ?1 +/// +/// -- Delete inflight-created notes +/// DELETE FROM notes WHERE created_by = ?1 +/// +/// -- Restore consumed notes +/// UPDATE notes SET consumed_by = NULL WHERE consumed_by = ?1 +/// ``` +pub fn revert_transaction( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], +) -> Result, DatabaseError> { + let mut reverted_accounts = Vec::new(); + + for tx_id in tx_ids { + let tx_id_bytes = conversions::transaction_id_to_bytes(tx_id); + + // Find accounts affected by this transaction. + let affected_account_ids: Vec> = schema::accounts::table + .filter(schema::accounts::transaction_id.eq(&tx_id_bytes)) + .select(schema::accounts::account_id) + .load(conn)?; + + // Delete inflight account rows for this tx. + diesel::delete( + schema::accounts::table.filter(schema::accounts::transaction_id.eq(&tx_id_bytes)), + ) + .execute(conn)?; + + // Check if any affected accounts had their creation fully reverted + // (no committed row and no remaining inflight rows). + for account_id_bytes in &affected_account_ids { + let remaining: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id_bytes)) + .count() + .get_result(conn)?; + + if remaining == 0 { + let account_id = conversions::network_account_id_from_bytes(account_id_bytes)?; + reverted_accounts.push(account_id); + } + } + + // Delete inflight-created notes (created_by = tx_id). + diesel::delete(schema::notes::table.filter(schema::notes::created_by.eq(&tx_id_bytes))) + .execute(conn)?; + + // Un-nullify consumed notes (set consumed_by = NULL where consumed_by = tx_id). + diesel::update(schema::notes::table.filter(schema::notes::consumed_by.eq(&tx_id_bytes))) + .set(schema::notes::consumed_by.eq(None::>)) + .execute(conn)?; + } + + Ok(reverted_accounts) +} diff --git a/crates/ntx-builder/src/db/models/queries/note_scripts.rs b/crates/ntx-builder/src/db/models/queries/note_scripts.rs new file mode 100644 index 0000000000..09c03e4c1e --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/note_scripts.rs @@ -0,0 +1,56 @@ +//! Database queries for persisting and retrieving note scripts. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_protocol::Word; +use miden_protocol::note::NoteScript; + +use crate::db::models::conv as conversions; +use crate::db::schema; + +#[derive(Insertable)] +#[diesel(table_name = schema::note_scripts)] +struct NoteScriptInsert { + script_root: Vec, + script_data: Vec, +} + +#[derive(Queryable, Selectable)] +#[diesel(table_name = schema::note_scripts)] +struct NoteScriptRow { + script_data: Vec, +} + +/// Looks up a note script by its root hash. +pub fn lookup_note_script( + conn: &mut SqliteConnection, + script_root: &Word, +) -> Result, DatabaseError> { + let root_bytes = conversions::word_to_bytes(script_root); + + let row: Option = schema::note_scripts::table + .find(root_bytes) + .select(NoteScriptRow::as_select()) + .first(conn) + .optional()?; + + row.map(|r| conversions::note_script_from_bytes(&r.script_data)).transpose() +} + +/// Inserts a note script (idempotent via INSERT OR IGNORE). +pub fn insert_note_script( + conn: &mut SqliteConnection, + script_root: &Word, + script: &NoteScript, +) -> Result<(), DatabaseError> { + let insert = NoteScriptInsert { + script_root: conversions::word_to_bytes(script_root), + script_data: conversions::note_script_to_bytes(script), + }; + + diesel::insert_or_ignore_into(schema::note_scripts::table) + .values(&insert) + .execute(conn)?; + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/models/queries/notes.rs b/crates/ntx-builder/src/db/models/queries/notes.rs new file mode 100644 index 0000000000..1c0145a9b1 --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/notes.rs @@ -0,0 +1,166 @@ +//! Note-related queries and models. + +use diesel::prelude::*; +use miden_node_db::DatabaseError; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; + +use crate::actor::inflight_note::InflightNetworkNote; +use crate::db::models::conv as conversions; +use crate::db::schema; + +// MODELS +// ================================================================================================ + +/// Row read from the unified `notes` table. +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name = schema::notes)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct NoteRow { + pub note_data: Vec, + pub attempt_count: i32, + pub last_attempt: Option, +} + +/// Row for inserting into the unified `notes` table. +#[derive(Debug, Clone, Insertable)] +#[diesel(table_name = schema::notes)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct NoteInsert { + pub nullifier: Vec, + pub account_id: Vec, + pub note_data: Vec, + pub attempt_count: i32, + pub last_attempt: Option, + pub created_by: Option>, + pub consumed_by: Option>, +} + +// QUERIES +// ================================================================================================ + +/// Batch inserts committed notes (`created_by = NULL`, `consumed_by = NULL`). +/// +/// # Raw SQL +/// +/// Per note: +/// +/// ```sql +/// INSERT OR REPLACE INTO notes +/// (nullifier, account_id, note_data, attempt_count, last_attempt, created_by, consumed_by) +/// VALUES (?1, ?2, ?3, 0, NULL, NULL, NULL) +/// ``` +pub fn insert_committed_notes( + conn: &mut SqliteConnection, + notes: &[SingleTargetNetworkNote], +) -> Result<(), DatabaseError> { + for note in notes { + let row = NoteInsert { + nullifier: conversions::nullifier_to_bytes(¬e.nullifier()), + account_id: conversions::network_account_id_to_bytes(note.account_id()), + note_data: conversions::single_target_note_to_bytes(note), + attempt_count: 0, + last_attempt: None, + created_by: None, + consumed_by: None, + }; + diesel::replace_into(schema::notes::table).values(&row).execute(conn)?; + } + Ok(()) +} + +/// Returns notes available for consumption by a given account. +/// +/// Queries unconsumed notes (`consumed_by IS NULL`) for the account that have not exceeded the +/// maximum attempt count, then applies backoff filtering in Rust via +/// `InflightNetworkNote::is_available`. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT note_data, attempt_count, last_attempt +/// FROM notes +/// WHERE +/// account_id = ?1 +/// AND consumed_by IS NULL +/// AND attempt_count < ?2 +/// ``` +#[expect(clippy::cast_possible_wrap)] +pub fn available_notes( + conn: &mut SqliteConnection, + account_id: NetworkAccountId, + block_num: BlockNumber, + max_attempts: usize, +) -> Result, DatabaseError> { + let account_id_bytes = conversions::network_account_id_to_bytes(account_id); + + // Get unconsumed notes for this account that haven't exceeded the max attempt count. + let rows: Vec = schema::notes::table + .filter(schema::notes::account_id.eq(&account_id_bytes)) + .filter(schema::notes::consumed_by.is_null()) + .filter(schema::notes::attempt_count.lt(max_attempts as i32)) + .select(NoteRow::as_select()) + .load(conn)?; + + let mut result = Vec::new(); + for row in rows { + #[expect(clippy::cast_sign_loss)] + let attempt_count = row.attempt_count as usize; + let note = note_row_to_inflight( + &row.note_data, + attempt_count, + row.last_attempt.map(conversions::block_num_from_i64), + )?; + if note.is_available(block_num) { + result.push(note); + } + } + + Ok(result) +} + +/// Marks notes as failed by incrementing `attempt_count` and setting `last_attempt`. +/// +/// # Raw SQL +/// +/// Per nullifier: +/// +/// ```sql +/// UPDATE notes +/// SET attempt_count = attempt_count + 1, last_attempt = ?1 +/// WHERE nullifier = ?2 +/// ``` +pub fn notes_failed( + conn: &mut SqliteConnection, + nullifiers: &[Nullifier], + block_num: BlockNumber, +) -> Result<(), DatabaseError> { + let block_num_val = conversions::block_num_to_i64(block_num); + + for nullifier in nullifiers { + let nullifier_bytes = conversions::nullifier_to_bytes(nullifier); + + diesel::update(schema::notes::table.find(&nullifier_bytes)) + .set(( + schema::notes::attempt_count.eq(schema::notes::attempt_count + 1), + schema::notes::last_attempt.eq(Some(block_num_val)), + )) + .execute(conn)?; + } + Ok(()) +} + +// HELPERS +// ================================================================================================ + +/// Constructs an `InflightNetworkNote` from DB row fields. +fn note_row_to_inflight( + note_data: &[u8], + attempt_count: usize, + last_attempt: Option, +) -> Result { + let note = conversions::single_target_note_from_bytes(note_data)?; + Ok(InflightNetworkNote::from_parts(note, attempt_count, last_attempt)) +} diff --git a/crates/ntx-builder/src/db/models/queries/tests.rs b/crates/ntx-builder/src/db/models/queries/tests.rs new file mode 100644 index 0000000000..0db95c018a --- /dev/null +++ b/crates/ntx-builder/src/db/models/queries/tests.rs @@ -0,0 +1,562 @@ +//! DB-level tests for NTX builder query functions. + +use diesel::prelude::*; +use miden_node_proto::domain::account::NetworkAccountId; +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::Word; +use miden_protocol::account::{AccountId, AccountStorageMode, AccountType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteExecutionHint; +use miden_protocol::testing::account_id::{ + ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE, + AccountIdBuilder, +}; +use miden_protocol::transaction::TransactionId; +use miden_standards::note::NetworkAccountTarget; +use miden_standards::testing::note::NoteBuilder; +use rand_chacha::ChaCha20Rng; +use rand_chacha::rand_core::SeedableRng; + +use super::*; +use crate::db::models::conv as conversions; +use crate::db::{Db, schema}; + +// TEST HELPERS +// ================================================================================================ + +/// Creates a file-backed SQLite connection with migrations applied. +fn test_conn() -> (SqliteConnection, tempfile::TempDir) { + Db::test_conn() +} + +/// Creates a network account ID from a test constant. +fn mock_network_account_id() -> NetworkAccountId { + let account_id: AccountId = + ACCOUNT_ID_REGULAR_NETWORK_ACCOUNT_IMMUTABLE_CODE.try_into().unwrap(); + NetworkAccountId::try_from(account_id).unwrap() +} + +/// Creates a distinct network account ID using a seeded RNG. +fn mock_network_account_id_seeded(seed: u8) -> NetworkAccountId { + let account_id = AccountIdBuilder::new() + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Network) + .build_with_seed([seed; 32]); + NetworkAccountId::try_from(account_id).unwrap() +} + +/// Creates a unique `TransactionId` from a seed value. +fn mock_tx_id(seed: u64) -> TransactionId { + let w = |n: u64| Word::try_from([n, 0, 0, 0]).unwrap(); + TransactionId::new(w(seed), w(seed + 1), w(seed + 2), w(seed + 3)) +} + +/// Creates a `SingleTargetNetworkNote` targeting the given network account. +fn mock_single_target_note( + network_account_id: NetworkAccountId, + seed: u8, +) -> SingleTargetNetworkNote { + let mut rng = ChaCha20Rng::from_seed([seed; 32]); + let sender = AccountIdBuilder::new() + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Private) + .build_with_rng(&mut rng); + + let target = NetworkAccountTarget::new(network_account_id.inner(), NoteExecutionHint::Always) + .expect("network account should be valid target"); + + let note = NoteBuilder::new(sender, rng).attachment(target).build().unwrap(); + + SingleTargetNetworkNote::try_from(note).expect("note should be single-target network note") +} + +/// Counts the total number of rows in the `notes` table. +fn count_notes(conn: &mut SqliteConnection) -> i64 { + schema::notes::table.count().get_result(conn).unwrap() +} + +/// Counts the total number of rows in the `accounts` table. +fn count_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table.count().get_result(conn).unwrap() +} + +/// Counts inflight account rows. +fn count_inflight_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table + .filter(schema::accounts::transaction_id.is_not_null()) + .count() + .get_result(conn) + .unwrap() +} + +/// Counts committed account rows. +fn count_committed_accounts(conn: &mut SqliteConnection) -> i64 { + schema::accounts::table + .filter(schema::accounts::transaction_id.is_null()) + .count() + .get_result(conn) + .unwrap() +} + +// PURGE INFLIGHT TESTS +// ================================================================================================ + +#[test] +fn purge_inflight_clears_all_inflight_state() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Insert committed account. + upsert_committed_account(conn, account_id, &mock_account(account_id)).unwrap(); + + // Insert a transaction (creates inflight account row + note + consumption). + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + assert!(count_inflight_accounts(conn) == 0); // No account delta, so no inflight account. + assert_eq!(count_notes(conn), 1); + + // Mark note as consumed by another tx. + let tx_id2 = mock_tx_id(2); + add_transaction(conn, &tx_id2, None, &[], &[note.nullifier()]).unwrap(); + + // Verify consumed_by is set. + let consumed_count: i64 = schema::notes::table + .filter(schema::notes::consumed_by.is_not_null()) + .count() + .get_result(conn) + .unwrap(); + assert_eq!(consumed_count, 1); + + // Purge inflight state. + purge_inflight(conn).unwrap(); + + // Inflight accounts should be gone. + assert_eq!(count_inflight_accounts(conn), 0); + // Committed account should remain. + assert_eq!(count_committed_accounts(conn), 1); + // Inflight-created notes should be gone. + assert_eq!(count_notes(conn), 0); +} + +// HANDLE TRANSACTION ADDED TESTS +// ================================================================================================ + +#[test] +fn transaction_added_inserts_notes_and_marks_consumed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note1 = mock_single_target_note(account_id, 10); + let note2 = mock_single_target_note(account_id, 20); + + // Insert committed note first (to test consumption). + insert_committed_notes(conn, std::slice::from_ref(¬e1)).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Add transaction that creates note2 and consumes note1. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e2), &[note1.nullifier()]) + .unwrap(); + + // Should now have 2 notes total. + assert_eq!(count_notes(conn), 2); + + // note1 should be consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e1.nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_some()); + + // note2 should have created_by set. + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e2.nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_some()); +} + +#[test] +fn transaction_added_is_idempotent_for_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Insert the same transaction twice. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + // Should only have one note (INSERT OR IGNORE). + assert_eq!(count_notes(conn), 1); +} + +// HANDLE BLOCK COMMITTED TESTS +// ================================================================================================ + +#[test] +fn block_committed_promotes_inflight_notes_to_committed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + + // Add a transaction that creates a note. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + + // Verify created_by is set. + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_some()); + + // Commit the block. + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // created_by should now be NULL (promoted to committed). + let created: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::created_by) + .first(conn) + .unwrap(); + assert!(created.is_none()); +} + +#[test] +fn block_committed_deletes_consumed_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + // Insert a committed note. + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Consume it via a transaction. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note.nullifier()]).unwrap(); + + // Commit the block. + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // Consumed note should be deleted. + assert_eq!(count_notes(conn), 0); +} + +#[test] +fn block_committed_promotes_inflight_account_to_committed() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let account = mock_account(account_id); + + // Insert committed account. + upsert_committed_account(conn, account_id, &account).unwrap(); + assert_eq!(count_committed_accounts(conn), 1); + + // Insert inflight row. + let tx_id = mock_tx_id(1); + let row = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(conversions::transaction_id_to_bytes(&tx_id)), + account_data: conversions::account_to_bytes(&account), + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn).unwrap(); + + assert_eq!(count_inflight_accounts(conn), 1); + assert_eq!(count_committed_accounts(conn), 1); + + // Commit the block. + let block_num = BlockNumber::from(1u32); + let header = mock_block_header(block_num); + commit_block(conn, &[tx_id], block_num, &header).unwrap(); + + // Should have 1 committed and 0 inflight. + assert_eq!(count_committed_accounts(conn), 1); + assert_eq!(count_inflight_accounts(conn), 0); +} + +// HANDLE TRANSACTIONS REVERTED TESTS +// ================================================================================================ + +#[test] +fn transactions_reverted_restores_consumed_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + // Insert committed note. + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + + // Consume it via a transaction. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note.nullifier()]).unwrap(); + + // Verify consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_some()); + + // Revert the transaction. + let reverted = revert_transaction(conn, &[tx_id]).unwrap(); + assert!(reverted.is_empty()); + + // Note should be un-consumed. + let consumed: Option> = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select(schema::notes::consumed_by) + .first(conn) + .unwrap(); + assert!(consumed.is_none()); +} + +#[test] +fn transactions_reverted_deletes_inflight_created_notes() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let tx_id = mock_tx_id(1); + let note = mock_single_target_note(account_id, 10); + + // Add transaction that creates a note. + add_transaction(conn, &tx_id, None, std::slice::from_ref(¬e), &[]).unwrap(); + assert_eq!(count_notes(conn), 1); + + // Revert the transaction. + revert_transaction(conn, &[tx_id]).unwrap(); + + // Inflight-created note should be deleted. + assert_eq!(count_notes(conn), 0); +} + +#[test] +fn transactions_reverted_reports_reverted_account_creations() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let account = mock_account(account_id); + let tx_id = mock_tx_id(1); + + // Insert an inflight account row (simulating account creation by tx). + let row = AccountInsert { + account_id: conversions::network_account_id_to_bytes(account_id), + transaction_id: Some(conversions::transaction_id_to_bytes(&tx_id)), + account_data: conversions::account_to_bytes(&account), + }; + diesel::insert_into(schema::accounts::table).values(&row).execute(conn).unwrap(); + + // Revert the transaction --- account creation should be reported. + let reverted = revert_transaction(conn, &[tx_id]).unwrap(); + assert_eq!(reverted.len(), 1); + assert_eq!(reverted[0], account_id); + + // Account should be gone. + assert_eq!(count_accounts(conn), 0); +} + +// AVAILABLE NOTES TESTS +// ================================================================================================ + +#[test] +fn available_notes_filters_consumed_and_exceeded_attempts() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note_good = mock_single_target_note(account_id, 10); + let note_consumed = mock_single_target_note(account_id, 20); + let note_failed = mock_single_target_note(account_id, 30); + + // Insert all as committed. + insert_committed_notes(conn, &[note_good.clone(), note_consumed.clone(), note_failed.clone()]) + .unwrap(); + + // Consume one note. + let tx_id = mock_tx_id(1); + add_transaction(conn, &tx_id, None, &[], &[note_consumed.nullifier()]).unwrap(); + + // Mark one note as failed many times (exceed max_attempts=3). + let block_num = BlockNumber::from(100u32); + notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note_failed.nullifier()], block_num).unwrap(); + + // Query available notes with max_attempts=3. + let result = available_notes(conn, account_id, block_num, 3).unwrap(); + + // Only note_good should be available (note_consumed is consumed, note_failed exceeded + // attempts). + assert_eq!(result.len(), 1); + assert_eq!(result[0].to_inner().nullifier(), note_good.nullifier()); +} + +#[test] +fn available_notes_only_returns_notes_for_specified_account() { + let (conn, _dir) = &mut test_conn(); + + let account_id_1 = mock_network_account_id(); + let account_id_2 = mock_network_account_id_seeded(42); + + let note_acct1 = mock_single_target_note(account_id_1, 10); + let note_acct2 = mock_single_target_note(account_id_2, 20); + + insert_committed_notes(conn, &[note_acct1.clone(), note_acct2]).unwrap(); + + let block_num = BlockNumber::from(100u32); + let result = available_notes(conn, account_id_1, block_num, 30).unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].to_inner().nullifier(), note_acct1.nullifier()); +} + +// NOTES FAILED TESTS +// ================================================================================================ + +#[test] +fn notes_failed_increments_attempt_count() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note = mock_single_target_note(account_id, 10); + + insert_committed_notes(conn, std::slice::from_ref(¬e)).unwrap(); + + let block_num = BlockNumber::from(5u32); + notes_failed(conn, &[note.nullifier()], block_num).unwrap(); + notes_failed(conn, &[note.nullifier()], block_num).unwrap(); + + let (attempt_count, last_attempt): (i32, Option) = schema::notes::table + .find(conversions::nullifier_to_bytes(¬e.nullifier())) + .select((schema::notes::attempt_count, schema::notes::last_attempt)) + .first(conn) + .unwrap(); + + assert_eq!(attempt_count, 2); + assert_eq!(last_attempt, Some(conversions::block_num_to_i64(block_num))); +} + +// CHAIN STATE TESTS +// ================================================================================================ + +#[test] +fn upsert_chain_state_updates_singleton() { + let (conn, _dir) = &mut test_conn(); + + let block_num_1 = BlockNumber::from(1u32); + let header_1 = mock_block_header(block_num_1); + upsert_chain_state(conn, block_num_1, &header_1).unwrap(); + + // Upsert again with higher block. + let block_num_2 = BlockNumber::from(2u32); + let header_2 = mock_block_header(block_num_2); + upsert_chain_state(conn, block_num_2, &header_2).unwrap(); + + // Should only have one row. + let row_count: i64 = schema::chain_state::table.count().get_result(conn).unwrap(); + assert_eq!(row_count, 1); + + // Should have the latest block number. + let stored_block_num: i64 = schema::chain_state::table + .select(schema::chain_state::block_num) + .first(conn) + .unwrap(); + assert_eq!(stored_block_num, conversions::block_num_to_i64(block_num_2)); +} + +// NOTE SCRIPT TESTS +// ================================================================================================ + +#[test] +fn note_script_insert_and_lookup() { + let (conn, _dir) = &mut test_conn(); + + // Extract a NoteScript from a mock note. + let account_id = mock_network_account_id(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into(); + let script = note.script().clone(); + let root = script.root(); + + // Insert the script. + insert_note_script(conn, &root, &script).unwrap(); + + // Look it up — should match the original. + let found = lookup_note_script(conn, &root).unwrap(); + assert!(found.is_some()); + assert_eq!(found.unwrap().root(), script.root()); +} + +#[test] +fn note_script_lookup_returns_none_for_missing() { + let (conn, _dir) = &mut test_conn(); + + let missing_root = Word::default(); + let found = lookup_note_script(conn, &missing_root).unwrap(); + assert!(found.is_none()); +} + +#[test] +fn note_script_insert_is_idempotent() { + let (conn, _dir) = &mut test_conn(); + + let account_id = mock_network_account_id(); + let note: miden_protocol::note::Note = mock_single_target_note(account_id, 10).into(); + let script = note.script().clone(); + let root = script.root(); + + // Insert the same script twice — should not error. + insert_note_script(conn, &root, &script).unwrap(); + insert_note_script(conn, &root, &script).unwrap(); + + // Should still be retrievable. + let found = lookup_note_script(conn, &root).unwrap(); + assert!(found.is_some()); +} + +// HELPERS (domain type construction) +// ================================================================================================ + +/// Creates a mock `Account` for a network account. +/// +/// Uses `AccountBuilder` with minimal components needed for serialization. +fn mock_account(_account_id: NetworkAccountId) -> miden_protocol::account::Account { + use miden_protocol::account::auth::PublicKeyCommitment; + use miden_protocol::account::{AccountBuilder, AccountComponent}; + use miden_standards::account::auth::AuthFalcon512Rpo; + + let component_code = miden_standards::code_builder::CodeBuilder::default() + .compile_component_code("test::interface", "pub proc test_proc push.1.2 add end") + .unwrap(); + + let component = + AccountComponent::new(component_code, vec![]).unwrap().with_supports_all_types(); + + AccountBuilder::new([0u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Network) + .with_component(component) + .with_auth_component(AuthFalcon512Rpo::new(PublicKeyCommitment::from(Word::default()))) + .build_existing() + .unwrap() +} + +/// Creates a mock `BlockHeader` for the given block number. +fn mock_block_header(block_num: BlockNumber) -> miden_protocol::block::BlockHeader { + miden_protocol::block::BlockHeader::mock(block_num, None, None, &[], Word::default()) +} diff --git a/crates/ntx-builder/src/db/schema.rs b/crates/ntx-builder/src/db/schema.rs new file mode 100644 index 0000000000..93dca8ce5e --- /dev/null +++ b/crates/ntx-builder/src/db/schema.rs @@ -0,0 +1,39 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + accounts (order_id) { + order_id -> Integer, + account_id -> Binary, + account_data -> Binary, + transaction_id -> Nullable, + } +} + +diesel::table! { + chain_state (id) { + id -> Integer, + block_num -> BigInt, + block_header -> Binary, + } +} + +diesel::table! { + note_scripts (script_root) { + script_root -> Binary, + script_data -> Binary, + } +} + +diesel::table! { + notes (nullifier) { + nullifier -> Binary, + account_id -> Binary, + note_data -> Binary, + attempt_count -> Integer, + last_attempt -> Nullable, + created_by -> Nullable, + consumed_by -> Nullable, + } +} + +diesel::allow_tables_to_appear_in_same_query!(accounts, chain_state, note_scripts, notes,); diff --git a/crates/ntx-builder/src/db/schema_hash.rs b/crates/ntx-builder/src/db/schema_hash.rs new file mode 100644 index 0000000000..80d00b4c47 --- /dev/null +++ b/crates/ntx-builder/src/db/schema_hash.rs @@ -0,0 +1,191 @@ +//! Schema verification to detect database schema changes. +//! +//! Detects: +//! +//! - Direct modifications to the database schema outside of migrations +//! - Running a node against a database created with different set of migrations +//! - Forgetting to reset the database after schema changes i.e. for a specific migration +//! +//! The verification works by creating an in-memory reference database, applying all +//! migrations to it, and comparing its schema against the actual database schema. + +use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use miden_node_db::SchemaVerificationError; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::MIGRATIONS; + +/// Represents a schema object for comparison. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +struct SchemaObject { + object_type: String, + name: String, + sql: String, +} + +/// Represents a row from the `sqlite_schema` table. +#[derive(diesel::QueryableByName, Debug)] +struct SqliteSchemaRow { + #[diesel(sql_type = diesel::sql_types::Text)] + schema_type: String, + #[diesel(sql_type = diesel::sql_types::Text)] + name: String, + #[diesel(sql_type = diesel::sql_types::Nullable)] + sql: Option, +} + +/// Extracts all schema objects from a database connection. +fn extract_schema( + conn: &mut SqliteConnection, +) -> Result, SchemaVerificationError> { + let rows: Vec = diesel::sql_query( + "SELECT type as schema_type, name, sql FROM sqlite_schema \ + WHERE type IN ('table', 'index') \ + AND name NOT LIKE 'sqlite_%' \ + AND name NOT LIKE '__diesel_%' \ + ORDER BY type, name", + ) + .load(conn) + .map_err(SchemaVerificationError::SchemaExtraction)?; + + let mut objects: Vec = rows + .into_iter() + .filter_map(|row| { + row.sql.map(|sql| SchemaObject { + object_type: row.schema_type, + name: row.name, + sql, + }) + }) + .collect(); + + objects.sort(); + Ok(objects) +} + +/// Computes the expected schema by applying migrations to an in-memory database. +fn compute_expected_schema() -> Result, SchemaVerificationError> { + let mut conn = SqliteConnection::establish(":memory:") + .map_err(SchemaVerificationError::InMemoryDbCreation)?; + + conn.run_pending_migrations(MIGRATIONS) + .map_err(SchemaVerificationError::MigrationApplication)?; + + extract_schema(&mut conn) +} + +/// Verifies that the database schema matches the expected schema. +/// +/// Creates an in-memory database, applies all migrations, and compares schemas. +/// +/// # Errors +/// +/// Returns `SchemaVerificationError::Mismatch` if schemas differ. +#[instrument(level = "info", target = COMPONENT, skip_all, err)] +pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificationError> { + let expected = compute_expected_schema()?; + let actual = extract_schema(conn)?; + + if actual != expected { + let expected_names: Vec<_> = expected.iter().map(|o| &o.name).collect(); + let actual_names: Vec<_> = actual.iter().map(|o| &o.name).collect(); + + // Find differences for better error messages. + let missing: Vec<_> = expected.iter().filter(|e| !actual.contains(e)).collect(); + let extra: Vec<_> = actual.iter().filter(|a| !expected.contains(a)).collect(); + + tracing::error!( + target: COMPONENT, + ?expected_names, + ?actual_names, + missing_count = missing.len(), + extra_count = extra.len(), + "Database schema mismatch detected" + ); + + // Log specific differences at debug level. + for obj in &missing { + tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + } + for obj in &extra { + tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + } + + return Err(SchemaVerificationError::Mismatch { + expected_count: expected.len(), + actual_count: actual.len(), + missing_count: missing.len(), + extra_count: extra.len(), + }); + } + + tracing::info!( + target: COMPONENT, + objects = expected.len(), + "Database schema verification passed" + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use miden_node_db::DatabaseError; + + use super::*; + use crate::db::migrations::apply_migrations; + + #[test] + fn verify_schema_passes_for_correct_schema() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + verify_schema(&mut conn).expect("Should pass for correct schema"); + } + + #[test] + fn verify_schema_fails_for_added_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE rogue_table (id INTEGER PRIMARY KEY)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn verify_schema_fails_for_removed_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("DROP TABLE notes").execute(&mut conn).unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn apply_migrations_succeeds_on_fresh_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + apply_migrations(&mut conn).expect("Should succeed on fresh database"); + } + + #[test] + fn apply_migrations_fails_on_tampered_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE tampered (id INTEGER)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 62088ce6cc..02c9f547ce 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,9 +1,24 @@ use std::num::NonZeroUsize; +use std::path::PathBuf; +use std::sync::Arc; + +use actor::AccountActorContext; +use anyhow::Context; +use block_producer::BlockProducerClient; +use builder::{ChainState, MempoolEventStream}; +use coordinator::Coordinator; +use db::Db; +use futures::TryStreamExt; +use miden_node_utils::lru_cache::LruCache; +use store::StoreClient; +use tokio::sync::{RwLock, mpsc}; +use url::Url; mod actor; mod block_producer; mod builder; mod coordinator; +pub(crate) mod db; mod store; pub use builder::NetworkTransactionBuilder; @@ -13,12 +28,251 @@ pub use builder::NetworkTransactionBuilder; const COMPONENT: &str = "miden-ntx-builder"; -/// Maximum number of network notes a network transaction is allowed to consume. -const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).unwrap(); -const _: () = assert!(MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); +/// Default maximum number of network notes a network transaction is allowed to consume. +const DEFAULT_MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).expect("literal is non-zero"); +const _: () = assert!(DEFAULT_MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); -/// Maximum number of network transactions which should be in progress concurrently. +/// Default maximum number of network transactions which should be in progress concurrently. /// /// This only counts transactions which are being computed locally and does not include /// uncommitted transactions in the mempool. -const MAX_IN_PROGRESS_TXS: usize = 4; +const DEFAULT_MAX_CONCURRENT_TXS: usize = 4; + +/// Default maximum number of blocks to keep in the chain MMR. +const DEFAULT_MAX_BLOCK_COUNT: usize = 4; + +/// Default channel capacity for account loading from the store. +const DEFAULT_ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; + +/// Default channel size for actor event channels. +const DEFAULT_ACTOR_CHANNEL_SIZE: usize = 100; + +/// Default maximum number of attempts to execute a failing note before dropping it. +const DEFAULT_MAX_NOTE_ATTEMPTS: usize = 30; + +/// Default script cache size. +const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = + NonZeroUsize::new(1_000).expect("literal is non-zero"); + +// CONFIGURATION +// ================================================================================================= + +/// Configuration for the Network Transaction Builder. +/// +/// This struct contains all the settings needed to create and run a `NetworkTransactionBuilder`. +#[derive(Debug, Clone)] +pub struct NtxBuilderConfig { + /// Address of the store gRPC server (ntx-builder API). + pub store_url: Url, + + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + + /// Address of the validator gRPC server. + pub validator_url: Url, + + /// Address of the remote transaction prover. If `None`, transactions will be proven locally. + pub tx_prover_url: Option, + + /// Size of the LRU cache for note scripts. Scripts are fetched from the store and cached + /// to avoid repeated gRPC calls. + pub script_cache_size: NonZeroUsize, + + /// Maximum number of network transactions which should be in progress concurrently across + /// all account actors. + pub max_concurrent_txs: usize, + + /// Maximum number of network notes a single transaction is allowed to consume. + pub max_notes_per_tx: NonZeroUsize, + + /// Maximum number of attempts to execute a failing note before dropping it. + /// Notes use exponential backoff between attempts. + pub max_note_attempts: usize, + + /// Maximum number of blocks to keep in the chain MMR. Older blocks are pruned. + pub max_block_count: usize, + + /// Channel capacity for loading accounts from the store during startup. + pub account_channel_capacity: usize, + + /// Channel size for each actor's event channel. + pub actor_channel_size: usize, + + /// Path to the SQLite database file used for persistent state. + pub database_filepath: PathBuf, +} + +impl NtxBuilderConfig { + pub fn new( + store_url: Url, + block_producer_url: Url, + validator_url: Url, + database_filepath: PathBuf, + ) -> Self { + Self { + store_url, + block_producer_url, + validator_url, + tx_prover_url: None, + script_cache_size: DEFAULT_SCRIPT_CACHE_SIZE, + max_concurrent_txs: DEFAULT_MAX_CONCURRENT_TXS, + max_notes_per_tx: DEFAULT_MAX_NOTES_PER_TX, + max_note_attempts: DEFAULT_MAX_NOTE_ATTEMPTS, + max_block_count: DEFAULT_MAX_BLOCK_COUNT, + account_channel_capacity: DEFAULT_ACCOUNT_CHANNEL_CAPACITY, + actor_channel_size: DEFAULT_ACTOR_CHANNEL_SIZE, + database_filepath, + } + } + + /// Sets the remote transaction prover URL. + /// + /// If not set, transactions will be proven locally. + #[must_use] + pub fn with_tx_prover_url(mut self, url: Option) -> Self { + self.tx_prover_url = url; + self + } + + /// Sets the script cache size. + #[must_use] + pub fn with_script_cache_size(mut self, size: NonZeroUsize) -> Self { + self.script_cache_size = size; + self + } + + /// Sets the maximum number of concurrent transactions. + #[must_use] + pub fn with_max_concurrent_txs(mut self, max: usize) -> Self { + self.max_concurrent_txs = max; + self + } + + /// Sets the maximum number of notes per transaction. + /// + /// # Panics + /// + /// Panics if `max` exceeds `miden_tx::MAX_NUM_CHECKER_NOTES`. + #[must_use] + pub fn with_max_notes_per_tx(mut self, max: NonZeroUsize) -> Self { + assert!( + max.get() <= miden_tx::MAX_NUM_CHECKER_NOTES, + "max_notes_per_tx ({}) exceeds MAX_NUM_CHECKER_NOTES ({})", + max, + miden_tx::MAX_NUM_CHECKER_NOTES + ); + self.max_notes_per_tx = max; + self + } + + /// Sets the maximum number of note execution attempts. + #[must_use] + pub fn with_max_note_attempts(mut self, max: usize) -> Self { + self.max_note_attempts = max; + self + } + + /// Sets the maximum number of blocks to keep in the chain MMR. + #[must_use] + pub fn with_max_block_count(mut self, max: usize) -> Self { + self.max_block_count = max; + self + } + + /// Sets the account channel capacity for startup loading. + #[must_use] + pub fn with_account_channel_capacity(mut self, capacity: usize) -> Self { + self.account_channel_capacity = capacity; + self + } + + /// Sets the actor event channel size. + #[must_use] + pub fn with_actor_channel_size(mut self, size: usize) -> Self { + self.actor_channel_size = size; + self + } + + /// Builds and initializes the network transaction builder. + /// + /// This method connects to the store and block producer services, fetches the current + /// chain tip, and subscribes to mempool events. + /// + /// # Errors + /// + /// Returns an error if: + /// - The store connection fails + /// - The mempool subscription fails (after retries) + /// - The store contains no blocks (not bootstrapped) + pub async fn build(self) -> anyhow::Result { + // Set up the database (bootstrap + connection pool). + let db = Db::setup(self.database_filepath.clone()).await?; + + // Purge inflight state from previous run. + db.purge_inflight().await.context("failed to purge inflight state")?; + + let script_cache = LruCache::new(self.script_cache_size); + let coordinator = + Coordinator::new(self.max_concurrent_txs, self.actor_channel_size, db.clone()); + + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + let (chain_tip_header, chain_mmr, mempool_events) = loop { + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .context("store should contain a latest block")?; + + match block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + { + Ok(subscription) => { + let stream: MempoolEventStream = Box::pin(subscription.into_stream()); + break (chain_tip_header, chain_mmr, stream); + }, + Err(status) if status.code() == tonic::Code::InvalidArgument => { + tracing::warn!( + err = %status, + "mempool subscription failed due to chain tip desync, retrying" + ); + }, + Err(err) => return Err(err).context("failed to subscribe to mempool events"), + } + }; + + // Store the chain tip in the DB. + db.upsert_chain_state(chain_tip_header.block_num(), chain_tip_header.clone()) + .await + .context("failed to upsert chain state")?; + + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let (notification_tx, notification_rx) = mpsc::channel(1); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + validator_url: self.validator_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache, + max_notes_per_tx: self.max_notes_per_tx, + max_note_attempts: self.max_note_attempts, + db: db.clone(), + notification_tx, + }; + + Ok(NetworkTransactionBuilder::new( + self, + coordinator, + store, + db, + chain_state, + actor_context, + mempool_events, + notification_rx, + )) + } +} diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 42a418cc29..ac5f4c8637 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -236,10 +236,10 @@ impl StoreClient { &self, sender: tokio::sync::mpsc::Sender, ) -> Result<(), StoreError> { - let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + let mut block_range = BlockNumber::GENESIS..=BlockNumber::MAX; while let Some(next_start) = self.load_accounts_page(block_range, &sender).await? { - block_range = next_start..=BlockNumber::from(u32::MAX); + block_range = next_start..=BlockNumber::MAX; } Ok(()) diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 6d3589ca3d..2e9767f887 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -33,7 +33,8 @@ assert_matches = { workspace = true } proptest = { version = "1.7" } [build-dependencies] -fs-err = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { version = "7.6" } -tonic-prost-build = { workspace = true } +fs-err = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { version = "7.6" } +tonic-prost-build = { workspace = true } diff --git a/crates/proto/build.rs b/crates/proto/build.rs index b0ac773a72..4f64f4e9dd 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -1,4 +1,5 @@ use std::env; +use std::fmt::Write; use std::path::{Path, PathBuf}; use fs_err as fs; @@ -22,6 +23,8 @@ fn main() -> miette::Result<()> { println!("cargo::rerun-if-changed=../../proto/proto"); println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); + // Skip this build script in BUILD_PROTO environment variable is not set to `1`. if env::var("BUILD_PROTO").unwrap_or("0".to_string()) == "0" { return Ok(()); @@ -90,16 +93,17 @@ fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { submodules.sort(); - let contents = submodules.iter().map(|f| format!("pub mod {f};\n")); - let contents = std::iter::once( - "#![allow(clippy::pedantic, reason = \"generated by build.rs and tonic\")]\n".to_string(), - ) - .chain(std::iter::once( - "#![allow(clippy::large_enum_variant, reason = \"generated by build.rs and tonic\")]\n\n" - .to_string(), - )) - .chain(contents) - .collect::(); + // Lints we need to allow for the generated code. + let lints = ["pedantic", "large_enum_variant", "allow_attributes"]; + let lints = lints.into_iter().fold(String::new(), |mut s, lint| { + writeln!(s, " clippy::{lint},").unwrap(); + s + }); + let lints = + format!("#![expect(\n{lints} reason = \"generated by build.rs and tonic\"\n)]\n\n"); + + let modules = submodules.iter().map(|f| format!("pub mod {f};\n")); + let contents = std::iter::once(lints).chain(modules).collect::(); fs::write(mod_filepath, contents) } diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index aa94f306dd..112f84e50b 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -3,7 +3,14 @@ use std::ops::RangeInclusive; use miden_protocol::account::AccountId; use miden_protocol::block::nullifier_tree::NullifierWitness; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; +use miden_protocol::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + FeeParameters, + SignedBlock, +}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_protocol::note::{NoteId, NoteInclusionProof}; use miden_protocol::transaction::PartialBlockchain; @@ -115,6 +122,84 @@ impl TryFrom for BlockHeader { } } +// BLOCK BODY +// ================================================================================================ + +impl From<&BlockBody> for proto::blockchain::BlockBody { + fn from(body: &BlockBody) -> Self { + Self { block_body: body.to_bytes() } + } +} + +impl From for proto::blockchain::BlockBody { + fn from(body: BlockBody) -> Self { + (&body).into() + } +} + +impl TryFrom<&proto::blockchain::BlockBody> for BlockBody { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::BlockBody) -> Result { + value.try_into() + } +} + +impl TryFrom for BlockBody { + type Error = ConversionError; + fn try_from(value: proto::blockchain::BlockBody) -> Result { + BlockBody::read_from_bytes(&value.block_body) + .map_err(|source| ConversionError::deserialization_error("BlockBody", source)) + } +} + +// SIGNED BLOCK +// ================================================================================================ + +impl From<&SignedBlock> for proto::blockchain::SignedBlock { + fn from(block: &SignedBlock) -> Self { + Self { + header: Some(block.header().into()), + body: Some(block.body().into()), + signature: Some(block.signature().into()), + } + } +} + +impl From for proto::blockchain::SignedBlock { + fn from(block: SignedBlock) -> Self { + (&block).into() + } +} + +impl TryFrom<&proto::blockchain::SignedBlock> for SignedBlock { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::SignedBlock) -> Result { + value.try_into() + } +} + +impl TryFrom for SignedBlock { + type Error = ConversionError; + fn try_from(value: proto::blockchain::SignedBlock) -> Result { + let header = value + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + let body = value + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + let signature = value + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + Ok(SignedBlock::new_unchecked(header, body, signature)) + } +} + // BLOCK INPUTS // ================================================================================================ diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 94fea5bebc..1f7c9cb0db 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -23,6 +23,33 @@ use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; +// NOTE TYPE +// ================================================================================================ + +impl From for proto::note::NoteType { + fn from(note_type: NoteType) -> Self { + match note_type { + NoteType::Public => proto::note::NoteType::Public, + NoteType::Private => proto::note::NoteType::Private, + } + } +} + +impl TryFrom for NoteType { + type Error = ConversionError; + + fn try_from(note_type: proto::note::NoteType) -> Result { + match note_type { + proto::note::NoteType::Public => Ok(NoteType::Public), + proto::note::NoteType::Private => Ok(NoteType::Private), + proto::note::NoteType::Unspecified => Err(ConversionError::EnumDiscriminantOutOfRange), + } + } +} + +// NOTE METADATA +// ================================================================================================ + impl TryFrom for NoteMetadata { type Error = ConversionError; @@ -31,7 +58,9 @@ impl TryFrom for NoteMetadata { .sender .ok_or_else(|| proto::note::NoteMetadata::missing_field(stringify!(sender)))? .try_into()?; - let note_type = NoteType::try_from(u64::from(value.note_type))?; + let note_type = proto::note::NoteType::try_from(value.note_type) + .map_err(|_| ConversionError::EnumDiscriminantOutOfRange)? + .try_into()?; let tag = NoteTag::new(value.tag); // Deserialize attachment if present @@ -77,7 +106,7 @@ impl From for proto::note::NetworkNote { impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); - let note_type = val.note_type() as u32; + let note_type = proto::note::NoteType::from(val.note_type()) as i32; let tag = val.tag().as_u32(); let attachment = val.attachment().to_bytes(); diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 69bbe2e28e..135d763e14 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -1,11 +1,13 @@ // This file is @generated by prost-build. -/// Represents a block. +/// Represents a signed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Block { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::Block\]. - #[prost(bytes = "vec", tag = "1")] - pub block: ::prost::alloc::vec::Vec, +pub struct SignedBlock { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub body: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub signature: ::core::option::Option, } /// Represents a proposed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index 61e3a53790..4ec0ae408d 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -1,5 +1,9 @@ -#![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] -#![allow(clippy::large_enum_variant, reason = "generated by build.rs and tonic")] +#![expect( + clippy::pedantic, + clippy::large_enum_variant, + clippy::allow_attributes, + reason = "generated by build.rs and tonic" +)] pub mod account; pub mod block_producer; diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs index 83d56aeb6b..8bff5858cd 100644 --- a/crates/proto/src/generated/note.rs +++ b/crates/proto/src/generated/note.rs @@ -19,9 +19,9 @@ pub struct NoteMetadata { /// The account which sent the note. #[prost(message, optional, tag = "1")] pub sender: ::core::option::Option, - /// The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - #[prost(uint32, tag = "2")] - pub note_type: u32, + /// The type of the note. + #[prost(enumeration = "NoteType", tag = "2")] + pub note_type: i32, /// A value which can be used by the recipient(s) to identify notes intended for them. /// /// See `miden_protocol::note::note_tag` for more info. @@ -128,3 +128,36 @@ pub struct NoteScript { #[prost(bytes = "vec", tag = "2")] pub mast: ::prost::alloc::vec::Vec, } +/// The type of a note. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NoteType { + /// Unspecified note type (default value, should not be used). + Unspecified = 0, + /// Public note - details are visible on-chain. + Public = 1, + /// Private note - details are not visible on-chain. + Private = 2, +} +impl NoteType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "NOTE_TYPE_UNSPECIFIED", + Self::Public => "NOTE_TYPE_PUBLIC", + Self::Private => "NOTE_TYPE_PRIVATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NOTE_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "NOTE_TYPE_PUBLIC" => Some(Self::Public), + "NOTE_TYPE_PRIVATE" => Some(Self::Private), + _ => None, + } + } +} diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 798a1d18e8..5cedf12088 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -428,51 +428,27 @@ pub struct SyncNotesResponse { #[prost(message, repeated, tag = "4")] pub notes: ::prost::alloc::vec::Vec, } -/// State synchronization request. -/// -/// Specifies state updates the requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -/// `account_ids` for that block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateRequest { - /// Last block known by the requester. The response will contain data starting from the next block, - /// until the first block which contains a note of matching the requested tag, or the chain tip - /// if there are no notes. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Accounts' commitment to include in the response. +/// Chain MMR synchronization request. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncChainMmrRequest { + /// Block range from which to synchronize the chain MMR. /// - /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is - /// possible there was an update to the account for the given range, but if it is not the latest, - /// it won't be included in the response. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "3")] - pub note_tags: ::prost::alloc::vec::Vec, + /// The response will contain MMR delta starting after `block_range.block_from` up to + /// `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + /// block already present in the caller's MMR so the delta begins at the next block. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, } -/// Represents the result of syncing state request. +/// Represents the result of syncing chain MMR. #[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateResponse { - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// Block header of the block with the first note matching the specified criteria. +pub struct SyncChainMmrResponse { + /// For which block range the MMR delta is returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Data needed to update the partial MMR from `request.block_range.block_from + 1` to + /// `response.block_range.block_to` or the chain tip. #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - #[prost(message, optional, tag = "3")] pub mmr_delta: ::core::option::Option, - /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - #[prost(message, repeated, tag = "5")] - pub accounts: ::prost::alloc::vec::Vec, - /// List of transactions executed against requested accounts between `request.block_num + 1` and - /// `response.block_header.block_num`. - #[prost(message, repeated, tag = "6")] - pub transactions: ::prost::alloc::vec::Vec, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "7")] - pub notes: ::prost::alloc::vec::Vec, } /// Storage map synchronization request. /// @@ -585,7 +561,7 @@ pub struct TransactionRecord { #[derive(Clone, PartialEq, ::prost::Message)] pub struct RpcLimits { /// Maps RPC endpoint names to their parameter limits. - /// Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + /// Key: endpoint name (e.g., "CheckNullifiers") /// Value: map of parameter names to their limit values #[prost(map = "string, message", tag = "1")] pub endpoints: ::std::collections::HashMap< @@ -713,6 +689,29 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); self.inner.unary(req, path, codec).await } + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + pub async fn get_limits( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + self.inner.unary(req, path, codec).await + } /// Returns a Sparse Merkle Tree opening proof for each requested nullifier /// /// Each proof demonstrates either: @@ -928,14 +927,12 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SubmitProvenBatch")); self.inner.unary(req, path, codec).await } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( + /// Returns transactions records for specific accounts within a block range. + pub async fn sync_transactions( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -947,17 +944,27 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); self.inner.unary(req, path, codec).await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( + /// Returns info which can be used by the client to sync up to the tip of chain for the notes + /// they are interested in. + /// + /// Client specifies the `note_tags` they are interested in, and the block height from which to + /// search for new for matching notes for. The request will then return the next block containing + /// any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block + /// until reaching the tip of the chain. + pub async fn sync_notes( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -969,25 +976,19 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( + /// Note that only 16-bit prefixes are supported at this time. + pub async fn sync_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -999,31 +1000,17 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - pub async fn sync_state( + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1035,9 +1022,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); self.inner.unary(req, path, codec).await } /// Returns storage map updates for specified account and storage slots within a block range. @@ -1065,12 +1052,12 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( + /// Returns MMR delta needed to synchronize the chain MMR within the requested block range. + pub async fn sync_chain_mmr( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1082,32 +1069,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncChainMmr"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - pub async fn get_limits( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncChainMmr")); self.inner.unary(req, path, codec).await } } @@ -1130,6 +1094,15 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result, tonic::Status>; + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + async fn get_limits( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; /// Returns a Sparse Merkle Tree opening proof for each requested nullifier /// /// Each proof demonstrates either: @@ -1212,33 +1185,25 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( + /// Returns transactions records for specific accounts within a block range. + async fn sync_transactions( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. + /// Returns info which can be used by the client to sync up to the tip of chain for the notes + /// they are interested in. /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// Client specifies the `note_tags` they are interested in, and the block height from which to + /// search for new for matching notes for. The request will then return the next block containing + /// any note matching the provided tags. /// /// The response includes each note's metadata and inclusion proof. /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. + /// A basic note sync can be implemented by repeatedly requesting the previous response's block + /// until reaching the tip of the chain. async fn sync_notes( &self, request: tonic::Request, @@ -1246,26 +1211,22 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - async fn sync_state( + /// Note that only 16-bit prefixes are supported at this time. + async fn sync_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, + tonic::Status, + >; + /// Returns account vault updates for specified account within a block range. + async fn sync_account_vault( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, tonic::Status, >; /// Returns storage map updates for specified account and storage slots within a block range. @@ -1276,23 +1237,14 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( + /// Returns MMR delta needed to synchronize the chain MMR within the requested block range. + async fn sync_chain_mmr( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - async fn get_limits( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; } /// RPC API for the RPC component #[derive(Debug)] @@ -1410,6 +1362,45 @@ pub mod api_server { }; Box::pin(fut) } + "/rpc.Api/GetLimits" => { + #[allow(non_camel_case_types)] + struct GetLimitsSvc(pub Arc); + impl tonic::server::UnaryService<()> for GetLimitsSvc { + type Response = super::RpcLimits; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_limits(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetLimitsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/rpc.Api/CheckNullifiers" => { #[allow(non_camel_case_types)] struct CheckNullifiersSvc(pub Arc); @@ -1775,25 +1766,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncNullifiers" => { + "/rpc.Api/SyncTransactions" => { #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); + struct SyncTransactionsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; + > tonic::server::UnaryService + for SyncTransactionsSvc { + type Response = super::SyncTransactionsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_nullifiers(&inner, request).await + ::sync_transactions(&inner, request).await }; Box::pin(fut) } @@ -1804,7 +1795,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncNullifiersSvc(inner); + let method = SyncTransactionsSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1820,25 +1811,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncAccountVault" => { + "/rpc.Api/SyncNotes" => { #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; + struct SyncNotesSvc(pub Arc); + impl tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::SyncNotesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_account_vault(&inner, request).await + ::sync_notes(&inner, request).await }; Box::pin(fut) } @@ -1849,7 +1838,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncAccountVaultSvc(inner); + let method = SyncNotesSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1865,23 +1854,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncNotes" => { + "/rpc.Api/SyncNullifiers" => { #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; + struct SyncNullifiersSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService + for SyncNullifiersSvc { + type Response = super::SyncNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_notes(&inner, request).await + ::sync_nullifiers(&inner, request).await }; Box::pin(fut) } @@ -1892,7 +1883,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncNotesSvc(inner); + let method = SyncNullifiersSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1908,23 +1899,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncState" => { + "/rpc.Api/SyncAccountVault" => { #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; + struct SyncAccountVaultSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService + for SyncAccountVaultSvc { + type Response = super::SyncAccountVaultResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_state(&inner, request).await + ::sync_account_vault(&inner, request).await }; Box::pin(fut) } @@ -1935,7 +1928,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStateSvc(inner); + let method = SyncAccountVaultSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1996,25 +1989,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncTransactions" => { + "/rpc.Api/SyncChainMmr" => { #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; + struct SyncChainMmrSvc(pub Arc); + impl tonic::server::UnaryService + for SyncChainMmrSvc { + type Response = super::SyncChainMmrResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_transactions(&inner, request).await + ::sync_chain_mmr(&inner, request).await }; Box::pin(fut) } @@ -2025,46 +2016,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetLimits" => { - #[allow(non_camel_case_types)] - struct GetLimitsSvc(pub Arc); - impl tonic::server::UnaryService<()> for GetLimitsSvc { - type Response = super::RpcLimits; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_limits(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetLimitsSvc(inner); + let method = SyncChainMmrSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index be9d1d6469..49081b9336 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -1,4 +1,15 @@ // This file is @generated by prost-build. +/// Applies a block to the state. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ApplyBlockRequest { + /// Ordered batches encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::batch::OrderedBatches\]. + #[prost(bytes = "vec", tag = "1")] + pub ordered_batches: ::prost::alloc::vec::Vec, + /// Block signed by the Validator. + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} /// Returns data required to prove the next block. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockInputsRequest { @@ -628,26 +639,12 @@ pub mod rpc_client { req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - pub async fn sync_state( + /// Returns chain MMR updates within a block range. + pub async fn sync_chain_mmr( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -659,9 +656,9 @@ pub mod rpc_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncState"); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncChainMmr"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncState")); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncChainMmr")); self.inner.unary(req, path, codec).await } /// Returns account vault updates for specified account within a block range. @@ -851,26 +848,12 @@ pub mod rpc_server { tonic::Response, tonic::Status, >; - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - async fn sync_state( + /// Returns chain MMR updates within a block range. + async fn sync_chain_mmr( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns account vault updates for specified account within a block range. @@ -1383,25 +1366,27 @@ pub mod rpc_server { }; Box::pin(fut) } - "/store.Rpc/SyncState" => { + "/store.Rpc/SyncChainMmr" => { #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); + struct SyncChainMmrSvc(pub Arc); impl< T: Rpc, - > tonic::server::UnaryService - for SyncStateSvc { - type Response = super::super::rpc::SyncStateResponse; + > tonic::server::UnaryService + for SyncChainMmrSvc { + type Response = super::super::rpc::SyncChainMmrResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request< + super::super::rpc::SyncChainMmrRequest, + >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_state(&inner, request).await + ::sync_chain_mmr(&inner, request).await }; Box::pin(fut) } @@ -1412,7 +1397,7 @@ pub mod rpc_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStateSvc(inner); + let method = SyncChainMmrSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1707,7 +1692,7 @@ pub mod block_producer_client { /// Applies changes of a new block to the DB and in-memory data structures. pub async fn apply_block( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() @@ -1843,7 +1828,7 @@ pub mod block_producer_server { /// Applies changes of a new block to the DB and in-memory data structures. async fn apply_block( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Retrieves block header by given block number. Optionally, it also returns the MMR path /// and current chain length to authenticate the block's inclusion. @@ -1955,7 +1940,7 @@ pub mod block_producer_server { struct ApplyBlockSvc(pub Arc); impl< T: BlockProducer, - > tonic::server::UnaryService + > tonic::server::UnaryService for ApplyBlockSvc { type Response = (); type Future = BoxFuture< @@ -1964,7 +1949,7 @@ pub mod block_producer_server { >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index f73600f276..e21d19f18b 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -21,9 +21,9 @@ std = ["miden-protocol/std", "miden-tx/std"] tx-prover = ["dep:miden-protocol", "dep:miden-tx", "dep:tokio"] [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] -getrandom = { features = ["wasm_js"], version = "0.3" } +getrandom = { features = ["wasm_js"], version = "0.4" } tonic = { features = ["codegen"], workspace = true } -tonic-web-wasm-client = { default-features = false, version = "0.8" } +tonic-web-wasm-client = { default-features = false, version = "0.9" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } diff --git a/crates/remote-prover-client/src/lib.rs b/crates/remote-prover-client/src/lib.rs index d2e0d01823..a319793d9d 100644 --- a/crates/remote-prover-client/src/lib.rs +++ b/crates/remote-prover-client/src/lib.rs @@ -2,7 +2,7 @@ // We allow unused imports here in order because this `macro_use` only makes sense for code // generated by prost under certain circumstances (when `tx-prover` is enabled and the environment // is not wasm) -#![allow(unused_imports)] +#![expect(unused_imports)] #[macro_use] extern crate alloc; @@ -15,7 +15,14 @@ extern crate std; use thiserror::Error; -pub mod remote_prover; +mod remote_prover; + +#[cfg(feature = "batch-prover")] +pub use remote_prover::batch_prover::RemoteBatchProver; +#[cfg(feature = "block-prover")] +pub use remote_prover::block_prover::RemoteBlockProver; +#[cfg(feature = "tx-prover")] +pub use remote_prover::tx_prover::RemoteTransactionProver; /// ERRORS /// =============================================================================================== diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index d1fa435486..c1562e5975 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -105,7 +105,7 @@ impl RemoteBlockProver { pub async fn prove( &self, tx_batches: OrderedBatches, - block_header: BlockHeader, + block_header: &BlockHeader, block_inputs: BlockInputs, ) -> Result { use miden_protocol::utils::Serializable; diff --git a/crates/remote-prover-client/src/remote_prover/generated/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/mod.rs index 806afe9030..2cd709029b 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/mod.rs @@ -1,4 +1,5 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[cfg(all(feature = "std", target_arch = "wasm32"))] compile_error!("The `std` feature cannot be used when targeting `wasm32`."); diff --git a/crates/rocksdb-cxx-linkage-fix/Cargo.toml b/crates/rocksdb-cxx-linkage-fix/Cargo.toml new file mode 100644 index 0000000000..9e0eb23f7a --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +description = "Miden C++ stdlib link helper" +edition.workspace = true +homepage.workspace = true +license.workspace = true +name = "miden-node-rocksdb-cxx-linkage-fix" +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lib] +path = "src/lib.rs" + +[lints] +workspace = true + +[dependencies] diff --git a/crates/rocksdb-cxx-linkage-fix/src/lib.rs b/crates/rocksdb-cxx-linkage-fix/src/lib.rs new file mode 100644 index 0000000000..35bc05d004 --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/src/lib.rs @@ -0,0 +1,51 @@ +//! A temporary solution to missing c++ std library linkage when using a precompile static library +//! +//! For more information see: + +use std::env; + +pub fn configure() { + println!("cargo:rerun-if-env-changed=ROCKSDB_COMPILE"); + println!("cargo:rerun-if-env-changed=ROCKSDB_LIB_DIR"); + println!("cargo:rerun-if-env-changed=ROCKSDB_STATIC"); + println!("cargo:rerun-if-env-changed=CXXSTDLIB"); + let target = env::var("TARGET").unwrap_or_default(); + if should_link_cpp_stdlib() { + link_cpp_stdlib(&target); + } +} + +fn should_compile() -> bool { + // in sync with + if let Ok(v) = env::var("ROCKSDB_COMPILE") { + if v.to_lowercase() == "true" || v == "1" { + return true; + } + } + false +} + +fn should_link_cpp_stdlib() -> bool { + if should_compile() { + return false; + } + // the value doesn't matter + // + env::var("ROCKSDB_STATIC").is_ok() + // `ROCKSDB_LIB_DIR` is not really discriminative, it only adds extra lookup dirs for the linker +} + +fn link_cpp_stdlib(target: &str) { + // aligned with + // + if let Ok(stdlib) = env::var("CXXSTDLIB") { + println!("cargo:rustc-link-lib=dylib={stdlib}"); + } else if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { + println!("cargo:rustc-link-lib=dylib=c++"); + } else if target.contains("linux") { + println!("cargo:rustc-link-lib=dylib=stdc++"); + } else if target.contains("aix") { + println!("cargo:rustc-link-lib=dylib=c++"); + println!("cargo:rustc-link-lib=dylib=c++abi"); + } +} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 30ec4dcb84..537173e67d 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -38,10 +38,10 @@ url = { workspace = true } [dev-dependencies] miden-air = { features = ["testing"], workspace = true } -miden-node-store = { workspace = true } +miden-node-store = { features = ["rocksdb"], workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { workspace = true } -reqwest = { version = "0.12" } +reqwest = { workspace = true } rstest = { workspace = true } -tempfile = { version = "3.20" } +tempfile = { workspace = true } diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 4d3cf9387f..bfa7909105 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -24,8 +24,8 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [SubmitProvenTransaction](#submitproventransaction) - [SyncAccountVault](#SyncAccountVault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) @@ -215,25 +215,6 @@ When note synchronization fails, detailed error information is provided through --- -### SyncState - -Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts and -notes) the client is interested in. - -**Limits:** `account_id` (1000), `note_tag` (1000) - -This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block -number in the chain. Client is expected to repeat these requests in a loop until -`response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. - -Each request also returns info about new notes, accounts, etc. created. It also returns Chain MMR delta that can be -used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -For preserving some degree of privacy, note tags contain only high part of hashes. Thus, returned data contains excessive -notes, client can make additional filtering of that data on its side. - ---- - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. @@ -256,6 +237,14 @@ When storage map synchronization fails, detailed error information is provided t --- +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range along with pagination info so the caller can continue syncing until the chain tip. + +--- + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 13d26962eb..f2a88cc05a 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -192,16 +192,13 @@ impl api_server::Api for RpcService { self.store.clone().get_block_header_by_number(request).await } - async fn sync_state( + async fn sync_chain_mmr( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); - check::(request.get_ref().account_ids.len())?; - check::(request.get_ref().note_tags.len())?; - - self.store.clone().sync_state(request).await + self.store.clone().sync_chain_mmr(request).await } async fn sync_account_storage_maps( @@ -294,7 +291,7 @@ impl api_server::Api for RpcService { Arc::make_mut(&mut mast).strip_decorators(); let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); + NoteRecipient::new(note.serial_num(), script, note.storage().clone()); let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, @@ -356,7 +353,7 @@ impl api_server::Api for RpcService { Arc::make_mut(&mut mast).strip_decorators(); let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); + NoteRecipient::new(note.serial_num(), script, note.storage().clone()); let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) @@ -407,8 +404,7 @@ impl api_server::Api for RpcService { request: Request, ) -> Result, Status> { use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ - SlotData::MapKeys as ProtoMapKeys, - SlotData::AllEntries as ProtoMapAllEntries + SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, }; let request = request.into_inner(); @@ -483,6 +479,8 @@ impl api_server::Api for RpcService { ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); + check::(request.get_ref().account_ids.len())?; + self.store.clone().sync_transactions(request).await } @@ -505,7 +503,6 @@ fn out_of_range_error(err: E) -> Status { } /// Check, but don't repeat ourselves mapping the error -#[allow(clippy::result_large_err)] fn check(n: usize) -> Result<(), Status> { ::check(n).map_err(out_of_range_error) } @@ -538,11 +535,8 @@ static RPC_LIMITS: LazyLock = LazyLock::new(|| { endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), ), ( - "SyncState".into(), - endpoint_limits(&[ - (AccountId::PARAM_NAME, AccountId::LIMIT), - (NoteTag::PARAM_NAME, NoteTag::LIMIT), - ]), + "SyncTransactions".into(), + endpoint_limits(&[(AccountId::PARAM_NAME, AccountId::LIMIT)]), ), ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index b35fe8b6dc..e70d145638 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -13,12 +13,12 @@ use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, QueryParamNoteIdLimit, - QueryParamNoteTagLimit, QueryParamNullifierLimit, }; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ + Account, AccountBuilder, AccountDelta, AccountId, @@ -28,7 +28,7 @@ use miden_protocol::account::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::testing::noop_auth_component::NoopAuthComponent; -use miden_protocol::transaction::ProvenTransactionBuilder; +use miden_protocol::transaction::{ProvenTransaction, ProvenTransactionBuilder}; use miden_protocol::utils::Serializable; use miden_protocol::vm::ExecutionProof; use miden_standards::account::wallets::BasicWallet; @@ -40,6 +40,53 @@ use url::Url; use crate::Rpc; +/// Byte offset of the account delta commitment in serialized `ProvenTransaction`. +/// Layout: `AccountId` (15) + `initial_commitment` (32) + `final_commitment` (32) = 79 +const DELTA_COMMITMENT_BYTE_OFFSET: usize = 15 + 32 + 32; + +/// Creates a minimal account and its delta for testing proven transaction building. +fn build_test_account(seed: [u8; 32]) -> (Account, AccountDelta) { + let account = AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let delta: AccountDelta = account.clone().try_into().unwrap(); + (account, delta) +} + +/// Creates a minimal proven transaction for testing. +/// +/// This uses `ExecutionProof::new_dummy()` and is intended for tests that +/// need to test validation logic. +fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransaction { + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.commitment(), + delta.to_commitment(), + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(delta.clone())) + .build() + .unwrap() +} + #[tokio::test] async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. @@ -199,6 +246,9 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { let (_, rpc_addr, store_addr) = start_rpc().await; let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; + // Wait for the store to be ready before sending requests. + tokio::time::sleep(Duration::from_millis(100)).await; + // Override the client so that the ACCEPT header is not set. let mut rpc_client = miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) @@ -209,54 +259,19 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let other_account = AccountBuilder::new([1; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Private) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - let incorrect_commitment_delta: AccountDelta = other_account.try_into().unwrap(); - let incorrect_commitment_delta_bytes = incorrect_commitment_delta.to_commitment().as_bytes(); + // Build a valid proven transaction + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + // Create an incorrect delta commitment from a different account + let (other_account, _) = build_test_account([1; 32]); + let incorrect_delta: AccountDelta = other_account.try_into().unwrap(); + let incorrect_commitment_bytes = incorrect_delta.to_commitment().as_bytes(); + // Corrupt the transaction bytes with the incorrect delta commitment let mut tx_bytes = tx.to_bytes(); - let offset = 15 + 32 + 32; - tx_bytes[offset..offset + 32].copy_from_slice(&incorrect_commitment_delta_bytes); + tx_bytes[DELTA_COMMITMENT_BYTE_OFFSET..DELTA_COMMITMENT_BYTE_OFFSET + 32] + .copy_from_slice(&incorrect_commitment_bytes); let request = proto::transaction::ProvenTransaction { transaction: tx_bytes, @@ -295,39 +310,8 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); let request = proto::transaction::ProvenTransaction { transaction: tx.to_bytes(), @@ -439,6 +423,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, @@ -479,6 +464,7 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, @@ -509,27 +495,32 @@ async fn get_limits_endpoint() { limits.endpoints.get("CheckNullifiers").expect("CheckNullifiers should exist"); assert_eq!( - check_nullifiers.parameters.get("nullifier"), + check_nullifiers.parameters.get(QueryParamNullifierLimit::PARAM_NAME), Some(&(QueryParamNullifierLimit::LIMIT as u32)), - "CheckNullifiers nullifier limit should be {}", + "CheckNullifiers {} limit should be {}", + QueryParamNullifierLimit::PARAM_NAME, QueryParamNullifierLimit::LIMIT ); - // Verify SyncState endpoint has multiple parameters - let sync_state = limits.endpoints.get("SyncState").expect("SyncState should exist"); + let sync_transactions = + limits.endpoints.get("SyncTransactions").expect("SyncTransactions should exist"); assert_eq!( - sync_state.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + sync_transactions.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), Some(&(QueryParamAccountIdLimit::LIMIT as u32)), - "SyncState {} limit should be {}", + "SyncTransactions {} limit should be {}", QueryParamAccountIdLimit::PARAM_NAME, QueryParamAccountIdLimit::LIMIT ); - assert_eq!( - sync_state.parameters.get(QueryParamNoteTagLimit::PARAM_NAME), - Some(&(QueryParamNoteTagLimit::LIMIT as u32)), - "SyncState {} limit should be {}", - QueryParamNoteTagLimit::PARAM_NAME, - QueryParamNoteTagLimit::LIMIT + + // SyncAccountVault and SyncAccountStorageMaps accept a singular account_id, + // not a repeated list, so they do not have list parameter limits. + assert!( + !limits.endpoints.contains_key("SyncAccountVault"), + "SyncAccountVault should not have list parameter limits" + ); + assert!( + !limits.endpoints.contains_key("SyncAccountStorageMaps"), + "SyncAccountStorageMaps should not have list parameter limits" ); // Verify GetNotesById endpoint @@ -545,3 +536,21 @@ async fn get_limits_endpoint() { // Shutdown to avoid runtime drop error. shutdown_store(store_runtime).await; } + +#[tokio::test] +async fn sync_chain_mmr_returns_delta() { + let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + let request = proto::rpc::SyncChainMmrRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), + }; + let response = rpc_client.sync_chain_mmr(request).await.expect("sync_chain_mmr should succeed"); + let response = response.into_inner(); + + let mmr_delta = response.mmr_delta.expect("mmr_delta should exist"); + assert_eq!(mmr_delta.forest, 0); + assert!(mmr_delta.data.is_empty()); + + shutdown_store(store_runtime).await; +} diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 1c62c7ab7c..59dae55e1a 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -15,21 +15,24 @@ version.workspace = true workspace = true [dependencies] -anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } -fs-err = { workspace = true } -hex = { version = "0.4" } -indexmap = { workspace = true } -libsqlite3-sys = { workspace = true } -miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } -miden-node-proto = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-utils = { workspace = true } -miden-standards = { workspace = true } +anyhow = { workspace = true } +deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } +deadpool-diesel = { features = ["sqlite"], version = "0.6" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } +futures = { workspace = true } +hex = { version = "0.4" } +indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } +miden-block-prover = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-node-db = { workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-utils = { workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } +miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } @@ -39,15 +42,22 @@ serde = { features = ["derive"], version = "1" } thiserror = { workspace = true } tokio = { features = ["fs", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } +toml = { workspace = true } tonic = { default-features = true, workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } +url = { workspace = true } + +[build-dependencies] +fs-err = { workspace = true } +miden-agglayer = { branch = "next", features = ["testing"], git = "https://github.com/0xMiden/miden-base" } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miden-protocol = { features = ["std"], workspace = true } [dev-dependencies] assert_matches = { workspace = true } -criterion = { version = "0.5" } +criterion = "0.8" fs-err = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } @@ -55,7 +65,8 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } -termtree = { version = "0.5" } +tempfile = { workspace = true } +termtree = "1.0" [features] default = ["rocksdb"] diff --git a/crates/store/README.md b/crates/store/README.md index ea44889d04..65a4f148ba 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -54,8 +54,8 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [SyncNullifiers](#syncnullifiers) - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) @@ -228,23 +228,6 @@ When note synchronization fails, detailed error information is provided through --- -### SyncState - -Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts, -notes, nullifiers) the client is interested in. - -This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block -number in the chain. Client is expected to repeat these requests in a loop until -`response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. - -Each request also returns info about new notes, nullifiers etc. created. It also returns Chain MMR delta that can be -used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -For preserving some degree of privacy, note tags and nullifiers filters contain only high part of hashes. Thus, returned -data contains excessive notes and nullifiers, client can make additional filtering of that data on its side. - ---- - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. @@ -267,6 +250,14 @@ When storage map synchronization fails, detailed error information is provided t --- +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range and the returned `block_range` reflects the last block included, which may be the chain tip. + +--- + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/crates/store/build.rs b/crates/store/build.rs index d08f3fd0e6..cd6fca23f9 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -1,9 +1,106 @@ // This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in // `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . + +use std::path::PathBuf; +use std::sync::Arc; + +use miden_agglayer::{create_existing_agglayer_faucet, create_existing_bridge_account}; +use miden_protocol::account::{Account, AccountCode, AccountFile}; +use miden_protocol::{Felt, Word}; + fn main() { println!("cargo:rerun-if-changed=./src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // println!("cargo:rerun-if-changed=Cargo.toml"); + + // Generate sample agglayer account files for genesis config samples. + generate_agglayer_sample_accounts(); + miden_node_rocksdb_cxx_linkage_fix::configure(); +} + +/// Generates sample agglayer account files for the `02-with-account-files` genesis config sample. +/// +/// Creates: +/// - `02-with-account-files/bridge.mac` - agglayer bridge account +/// - `02-with-account-files/agglayer_faucet_eth.mac` - agglayer faucet for wrapped ETH +/// - `02-with-account-files/agglayer_faucet_usdc.mac` - agglayer faucet for wrapped USDC +fn generate_agglayer_sample_accounts() { + // Use CARGO_MANIFEST_DIR to get the absolute path to the crate root + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set"); + let samples_dir: PathBuf = + [&manifest_dir, "src", "genesis", "config", "samples", "02-with-account-files"] + .iter() + .collect(); + + // Create the directory if it doesn't exist + fs_err::create_dir_all(&samples_dir).expect("Failed to create samples directory"); + + // Use deterministic seeds for reproducible builds + // WARNING: DO NOT USE THIS IN PRODUCTION + let bridge_seed: Word = Word::new([Felt::new(1u64); 4]); + let eth_faucet_seed: Word = Word::new([Felt::new(2u64); 4]); + let usdc_faucet_seed: Word = Word::new([Felt::new(3u64); 4]); + + // Create the bridge account first (faucets need to reference it) + // Use "existing" variant so accounts have nonce > 0 (required for genesis) + let bridge_account = create_existing_bridge_account(bridge_seed); + let bridge_account_id = bridge_account.id(); + + // Create AggLayer faucets using "existing" variant + // ETH: 18 decimals, max supply of 1 billion tokens + let eth_faucet = create_existing_agglayer_faucet( + eth_faucet_seed, + "ETH", + 18, + Felt::new(1_000_000_000), + bridge_account_id, + ); + + // USDC: 6 decimals, max supply of 10 billion tokens + let usdc_faucet = create_existing_agglayer_faucet( + usdc_faucet_seed, + "USDC", + 6, + Felt::new(10_000_000_000), + bridge_account_id, + ); + + // Strip source location decorators from account code to ensure deterministic output. + let bridge_account = strip_code_decorators(bridge_account); + let eth_faucet = strip_code_decorators(eth_faucet); + let usdc_faucet = strip_code_decorators(usdc_faucet); + + // Save account files (without secret keys since these use NoAuth) + let bridge_file = AccountFile::new(bridge_account, vec![]); + let eth_faucet_file = AccountFile::new(eth_faucet, vec![]); + let usdc_faucet_file = AccountFile::new(usdc_faucet, vec![]); + + // Write files + bridge_file + .write(samples_dir.join("bridge.mac")) + .expect("Failed to write bridge.mac"); + eth_faucet_file + .write(samples_dir.join("agglayer_faucet_eth.mac")) + .expect("Failed to write agglayer_faucet_eth.mac"); + usdc_faucet_file + .write(samples_dir.join("agglayer_faucet_usdc.mac")) + .expect("Failed to write agglayer_faucet_usdc.mac"); +} + +/// Strips source location decorators from an account's code MAST forest. +/// +/// This is necessary because the MAST forest embeds absolute file paths from the Cargo build +/// directory, which include a hash that differs between `cargo check` and `cargo build`. Stripping +/// decorators ensures the serialized `.mac` files are identical regardless of which cargo command +/// is used (CI or local builds or tests). +fn strip_code_decorators(account: Account) -> Account { + let (id, vault, storage, code, nonce, seed) = account.into_parts(); + + let mut mast = code.mast(); + Arc::make_mut(&mut mast).strip_decorators(); + let code = AccountCode::from_parts(mast, code.procedures().to_vec()); + + Account::new_unchecked(id, vault, storage, code, nonce, seed) } diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index d015408adb..2508c9d2df 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -37,7 +37,7 @@ pub type PersistentAccountTree = AccountTree = (0..account_count) + let account_ids: Vec<_> = (0..account_count) .map(|i| AccountIdBuilder::new().build_with_seed([i as u8; 32])) .collect(); // Create initial state with all accounts having value [i, 0, 0, 0] - let initial_state: Vec<_> = ids + let initial_state: Vec<_> = account_ids .iter() .enumerate() .map(|(i, &id)| (id, Word::from([i as u32, 0, 0, 0]))) @@ -173,7 +172,7 @@ mod account_tree_with_history_tests { .map(|i| { let idx = ((block - 1) * 5 + i) % account_count; let new_value = Word::from([idx as u32 + block as u32 * 100, 0, 0, 0]); - (ids[idx], new_value) + (account_ids[idx], new_value) }) .collect(); hist.compute_and_apply_mutations(updates).unwrap(); @@ -184,7 +183,7 @@ mod account_tree_with_history_tests { // Check genesis state for a few accounts for i in 0..4 { - let witness = hist.open_at(ids[i], BlockNumber::GENESIS).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::GENESIS).unwrap(); assert_eq!( witness.state_commitment(), Word::from([i as u32, 0, 0, 0]), @@ -197,7 +196,8 @@ mod account_tree_with_history_tests { for block in 1..=num_blocks { for i in 0..5 { let idx = ((block - 1) * 5 + i) % account_count; - let witness = hist.open_at(ids[idx], BlockNumber::from(block as u32)).unwrap(); + let witness = + hist.open_at(account_ids[idx], BlockNumber::from(block as u32)).unwrap(); let expected = Word::from([idx as u32 + block as u32 * 100, 0, 0, 0]); assert_eq!( witness.state_commitment(), @@ -302,7 +302,7 @@ mod account_tree_with_history_tests { fn test_sparse_updates_many_accounts() { // Create 200 accounts but only update a few at a time let account_count = 200; - let ids: Vec<_> = (0..account_count) + let account_ids: Vec<_> = (0..account_count) .map(|i| { let mut seed = [0u8; 32]; seed[0] = i as u8; @@ -312,7 +312,7 @@ mod account_tree_with_history_tests { .collect(); // Create initial state with first 50 accounts - let initial_state: Vec<_> = ids + let initial_state: Vec<_> = account_ids .iter() .take(50) .enumerate() @@ -323,7 +323,7 @@ mod account_tree_with_history_tests { let mut hist = AccountTreeWithHistory::new(initial_tree, BlockNumber::GENESIS); // Block 1: Add 50 more accounts - let updates1: Vec<_> = ids + let updates1: Vec<_> = account_ids .iter() .skip(50) .take(50) @@ -333,7 +333,7 @@ mod account_tree_with_history_tests { hist.compute_and_apply_mutations(updates1).unwrap(); // Block 2: Update every 10th account - let updates2: Vec<_> = ids + let updates2: Vec<_> = account_ids .iter() .enumerate() .filter(|(i, _)| i % 10 == 0) @@ -343,7 +343,7 @@ mod account_tree_with_history_tests { hist.compute_and_apply_mutations(updates2).unwrap(); // Block 3: Add remaining accounts - let updates3: Vec<_> = ids + let updates3: Vec<_> = account_ids .iter() .skip(100) .enumerate() @@ -354,13 +354,13 @@ mod account_tree_with_history_tests { // Verify states at different blocks // Check genesis - first 50 accounts exist, others don't for i in 0..50 { - let witness = hist.open_at(ids[i], BlockNumber::GENESIS).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::GENESIS).unwrap(); assert_eq!(witness.state_commitment(), Word::from([i as u32, 0, 0, 0])); } // Check block 1 - first 100 accounts exist for i in 50..100 { - let witness = hist.open_at(ids[i], BlockNumber::from(1)).unwrap(); + let witness = hist.open_at(account_ids[i], BlockNumber::from(1)).unwrap(); assert_eq!(witness.state_commitment(), Word::from([i as u32, 1, 0, 0])); } @@ -368,14 +368,14 @@ mod account_tree_with_history_tests { for i in 0..10 { let idx = i * 10; if idx < 100 { - let witness = hist.open_at(ids[idx], BlockNumber::from(2)).unwrap(); + let witness = hist.open_at(account_ids[idx], BlockNumber::from(2)).unwrap(); assert_eq!(witness.state_commitment(), Word::from([idx as u32, 2, 0, 0])); } } // Check block 3 - all 200 accounts should be accessible for i in [0, 50, 100, 150, 199] { - let witness = hist.open_at(ids[i], BlockNumber::from(3)); + let witness = hist.open_at(account_ids[i], BlockNumber::from(3)); assert!(witness.is_some(), "Account {} should exist at block 3", i); } } diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index 01521e5787..10ce01409e 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -13,16 +13,16 @@ pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations" #[instrument(level = "debug", target = COMPONENT, skip_all, err)] pub fn apply_migrations( conn: &mut SqliteConnection, -) -> std::result::Result<(), crate::errors::DatabaseError> { +) -> std::result::Result<(), miden_node_db::DatabaseError> { let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); - tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + tracing::info!(target = COMPONENT, migrations = migrations.len(), "Applying migrations"); let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { // Migrations applied successfully, verify schema hash verify_schema(conn)?; return Ok(()); }; - tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + tracing::warn!(target = COMPONENT, error = ?e, "Failed to apply migration"); // something went wrong, MIGRATIONS contains conn.revert_last_migration(MIGRATIONS) .expect("Duality is maintained by the developer"); diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 0858e71d10..1f0e151ab1 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -1,6 +1,8 @@ CREATE TABLE block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, + signature BLOB NOT NULL, + commitment BLOB NOT NULL, PRIMARY KEY (block_num), CONSTRAINT block_header_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) @@ -59,7 +61,7 @@ CREATE TABLE notes ( consumed_at INTEGER, -- Block number when the note was consumed nullifier BLOB, -- Only known for public notes, null for private notes assets BLOB, - inputs BLOB, + storage BLOB, script_root BLOB, serial_num BLOB, @@ -155,3 +157,9 @@ CREATE TABLE transactions ( CREATE INDEX idx_transactions_account_id ON transactions(account_id); -- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); + +CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; +CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; + +CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; +CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql deleted file mode 100644 index 1a15b55c4d..0000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP INDEX IF EXISTS idx_account_storage_map_latest_by_account_slot_key; -DROP INDEX IF EXISTS idx_account_vault_assets_latest_by_account_key; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql deleted file mode 100644 index 83233e157e..0000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql +++ /dev/null @@ -1,2 +0,0 @@ -CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; -CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab4..74aa8ce3bd 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,16 +1,16 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::ops::RangeInclusive; +use std::ops::{Deref, DerefMut, RangeInclusive}; use std::path::PathBuf; use anyhow::Context; use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; -use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; +use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; use miden_protocol::asset::{Asset, AssetVaultKey}; -use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, SignedBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ NoteDetails, @@ -23,19 +23,21 @@ use miden_protocol::note::{ use miden_protocol::transaction::TransactionId; use miden_protocol::utils::{Deserializable, Serializable}; use tokio::sync::oneshot; -use tracing::{Instrument, info, instrument}; +use tracing::{info, instrument}; use crate::COMPONENT; -use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; -use crate::db::models::queries::StorageMapValuesPage; +pub use crate::db::models::queries::{ + AccountCommitmentsPage, + NullifiersPage, + PublicAccountIdsPage, +}; +use crate::db::models::queries::{BlockHeaderCommitment, StorageMapValuesPage}; use crate::db::models::{Page, queries}; -use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; +use crate::errors::{DatabaseError, NoteSyncError}; use crate::genesis::GenesisBlock; -pub(crate) mod manager; - mod migrations; mod schema_hash; @@ -49,8 +51,25 @@ pub(crate) mod schema; pub type Result = std::result::Result; +/// The Store's database. +/// +/// Extends the underlying [`miden_node_db::Db`] type with functionality specific to the Store. pub struct Db { - pool: deadpool_diesel::Pool>, + db: miden_node_db::Db, +} + +impl Deref for Db { + type Target = miden_node_db::Db; + + fn deref(&self) -> &Self::Target { + &self.db + } +} + +impl DerefMut for Db { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.db + } } /// Describes the value of an asset for an account ID at `block_num` specifically. @@ -88,13 +107,6 @@ impl PartialEq<(Nullifier, BlockNumber)> for NullifierInfo { } } -#[derive(Debug, PartialEq)] -pub struct TransactionSummary { - pub account_id: AccountId, - pub block_num: BlockNumber, - pub transaction_id: TransactionId, -} - #[derive(Debug, PartialEq)] pub struct TransactionRecord { pub block_num: BlockNumber, @@ -172,14 +184,6 @@ impl From for proto::note::NoteSyncRecord { } } -#[derive(Debug, PartialEq)] -pub struct StateSyncUpdate { - pub notes: Vec, - pub block_header: BlockHeader, - pub account_updates: Vec, - pub transactions: Vec, -} - #[derive(Debug, PartialEq)] pub struct NoteSyncUpdate { pub notes: Vec, @@ -238,7 +242,7 @@ impl Db { ) .context("failed to open a database connection")?; - configure_connection_on_creation(&mut conn)?; + miden_node_db::configure_connection_on_creation(&mut conn)?; // Run migrations. apply_migrations(&mut conn).context("failed to apply database migrations")?; @@ -249,6 +253,7 @@ impl Db { models::queries::apply_block( conn, genesis.header(), + genesis.signature(), &[], &[], genesis.body().updated_accounts(), @@ -259,83 +264,42 @@ impl Db { Ok(()) } - /// Create and commit a transaction with the queries added in the provided closure - pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send - + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result - + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .in_current_span() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) - .in_current_span() - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - - /// Run the query _without_ a transaction - pub(crate) async fn query(&self, msg: M, query: Q) -> std::result::Result - where - Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, - R: Send + 'static, - M: Send + ToString, - E: From, - E: std::error::Error + Send + Sync + 'static, - { - let conn = self - .pool - .get() - .await - .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - - conn.interact(move |conn| { - let r = query(conn)?; - Ok(r) - }) - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? - } - /// Open a connection to the DB and apply any pending migrations. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(database_filepath: PathBuf) -> Result { - let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); - let pool = deadpool_diesel::Pool::builder(manager).max_size(16).build()?; - + pub async fn load(database_filepath: PathBuf) -> Result { + let db = miden_node_db::Db::new(&database_filepath)?; info!( target: COMPONENT, sqlite= %database_filepath.display(), "Connected to the database" ); - let me = Db { pool }; - me.query("migrations", apply_migrations).await?; - Ok(me) + db.query("migrations", apply_migrations).await?; + Ok(Self { db }) } - /// Loads all the nullifiers from the DB. + /// Returns a page of nullifiers for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub(crate) async fn select_all_nullifiers(&self) -> Result> { - self.transact("all nullifiers", move |conn| { - let nullifiers = queries::select_all_nullifiers(conn)?; - Ok(nullifiers) + pub async fn select_nullifiers_paged( + &self, + page_size: std::num::NonZeroUsize, + after_nullifier: Option, + ) -> Result { + self.transact("read nullifiers paged", move |conn| { + queries::select_nullifiers_paged(conn, page_size, after_nullifier) }) .await } /// Loads the nullifiers that match the prefixes from the DB. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + #[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(prefix_len, prefixes = nullifier_prefixes.len()), + ret(level = "debug"), + err + )] pub async fn select_nullifiers_by_prefix( &self, prefix_len: u32, @@ -395,20 +359,38 @@ impl Db { .await } - /// TODO marked for removal, replace with paged version + /// Loads all the block headers from the DB. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_block_header_commitments(&self) -> Result> { + self.transact("all block headers", |conn| { + let raw = queries::select_all_block_header_commitments(conn)?; + Ok(raw) + }) + .await + } + + /// Returns a page of account commitments for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_account_commitments(&self) -> Result> { - self.transact("read all account commitments", move |conn| { - queries::select_all_account_commitments(conn) + pub async fn select_account_commitments_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read account commitments paged", move |conn| { + queries::select_account_commitments_paged(conn, page_size, after_account_id) }) .await } - /// Returns all account IDs that have public state. + /// Returns a page of public account IDs for forest rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_public_account_ids(&self) -> Result> { - self.transact("read all public account IDs", move |conn| { - queries::select_all_public_account_ids(conn) + pub async fn select_public_account_ids_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read public account IDs paged", move |conn| { + queries::select_public_account_ids_paged(conn, page_size, after_account_id) }) .await } @@ -497,19 +479,6 @@ impl Db { .await } - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn get_state_sync( - &self, - block_number: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result { - self.transact::("state sync", move |conn| { - queries::get_state_sync(conn, block_number, account_ids, note_tags) - }) - .await - } - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_note_sync( &self, @@ -566,17 +535,18 @@ impl Db { &self, allow_acquire: oneshot::Sender<()>, acquire_done: oneshot::Receiver<()>, - block: ProvenBlock, + signed_block: SignedBlock, notes: Vec<(NoteRecord, Option)>, ) -> Result<()> { self.transact("apply block", move |conn| -> Result<()> { models::queries::apply_block( conn, - block.header(), + signed_block.header(), + signed_block.signature(), ¬es, - block.body().created_nullifiers(), - block.body().updated_accounts(), - block.body().transactions(), + signed_block.body().created_nullifiers(), + signed_block.body().updated_accounts(), + signed_block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist @@ -585,6 +555,8 @@ impl Db { tracing::warn!(target: COMPONENT, "failed to send notification for successful block application, potential deadlock"); } + models::queries::prune_history(conn, signed_block.header().block_num())?; + acquire_done.blocking_recv()?; Ok(()) diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 2e6313bf61..2176ea0d46 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -14,30 +14,32 @@ //! Notice: Changing any of these are _backwards-incompatible_ changes that are not caught/covered //! by migrations! -#![allow( +#![expect( clippy::inline_always, reason = "Just unification helpers of 1-2 lines of casting types" )] -#![allow( +#![expect( dead_code, reason = "Not all converters are used bidirectionally, however, keeping them is a good thing" )] -#![allow( +#![expect( clippy::cast_sign_loss, reason = "This is the one file where we map the signed database types to the working types" )] -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize casting will cause issues on relevant platforms" )] +use miden_crypto::Word; +use miden_crypto::utils::Deserializable; use miden_protocol::Felt; use miden_protocol::account::{StorageSlotName, StorageSlotType}; -use miden_protocol::block::BlockNumber; +use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::NoteTag; -use crate::db::models::queries::NetworkAccountType; +use crate::db::models::queries::{BlockHeaderCommitment, NetworkAccountType}; #[derive(Debug, thiserror::Error)] #[error("failed to convert from database type {from_type} into {into_type}")] @@ -50,7 +52,7 @@ pub struct DatabaseTypeConversionError { /// Convert from and to it's database representation and back /// /// We do not assume sanity of DB types. -pub(crate) trait SqlTypeConvert: Sized { +pub trait SqlTypeConvert: Sized { type Raw: Sized; fn to_raw_sql(self) -> Self::Raw; @@ -67,6 +69,32 @@ pub(crate) trait SqlTypeConvert: Sized { } } +impl SqlTypeConvert for BlockHeaderCommitment { + type Raw = Vec; + fn from_raw_sql( + raw: Self::Raw, + ) -> Result { + let inner = + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err)?; + Ok(BlockHeaderCommitment(inner)) + } + fn to_raw_sql(self) -> Self::Raw { + self.0.as_bytes().to_vec() + } +} + +impl SqlTypeConvert for BlockHeader { + type Raw = Vec; + + fn from_raw_sql(raw: Self::Raw) -> Result { + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + miden_crypto::utils::Serializable::to_bytes(&self) + } +} + impl SqlTypeConvert for NetworkAccountType { type Raw = i32; @@ -107,7 +135,7 @@ impl SqlTypeConvert for NoteTag { #[inline(always)] fn from_raw_sql(raw: Self::Raw) -> Result { - #[allow(clippy::cast_sign_loss)] + #[expect(clippy::cast_sign_loss)] Ok(NoteTag::new(raw as u32)) } @@ -189,7 +217,7 @@ pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { } #[inline(always)] -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { raw as u8 } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 6f8e3834f6..9b7a8e146e 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -17,11 +18,7 @@ use diesel::{ SqliteConnection, }; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; -use miden_node_utils::limiter::{ - MAX_RESPONSE_PAYLOAD_BYTES, - QueryParamAccountIdLimit, - QueryParamLimiter, -}; +use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ @@ -44,7 +41,8 @@ use miden_protocol::utils::{Deserializable, Serializable}; use crate::COMPONENT; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; -use crate::db::models::{serialize_vec, vec_raw_try_into}; +#[cfg(test)] +use crate::db::models::vec_raw_try_into; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; @@ -254,11 +252,19 @@ pub(crate) fn select_network_account_by_id( } } -/// Select all account commitments from the DB using the given [`SqliteConnection`]. -/// -/// # Returns +/// Page of account commitments returned by [`select_account_commitments_paged`]. +#[derive(Debug)] +pub struct AccountCommitmentsPage { + /// The account commitments in this page. + pub commitments: Vec<(AccountId, Word)>, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + +/// Selects account commitments with pagination. /// -/// The vector with the account id and corresponding commitment, or an error. +/// Returns up to `page_size` account commitments, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. /// /// # Raw SQL /// @@ -270,31 +276,71 @@ pub(crate) fn select_network_account_by_id( /// accounts /// WHERE /// is_latest = 1 +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_account_commitments( +pub(crate) fn select_account_commitments_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - let raw = SelectDsl::select( + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; + + // Fetch one extra to determine if there are more results + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::<(Vec, Vec)>(conn)?; + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::<(Vec, Vec)>(conn)?; - Result::, DatabaseError>::from_iter(raw.into_iter().map( + let mut commitments = Result::, DatabaseError>::from_iter(raw.into_iter().map( |(ref account, ref commitment)| { Ok((AccountId::read_from_bytes(account)?, Word::read_from_bytes(commitment)?)) }, - )) + ))?; + + // If we got more than page_size, there are more results + let next_cursor = if commitments.len() > page_size.get() { + commitments.pop(); // Remove the extra element + commitments.last().map(|(id, _)| *id) + } else { + None + }; + + Ok(AccountCommitmentsPage { commitments, next_cursor }) } -/// Select all account IDs that have public state. +/// Page of public account IDs returned by [`select_public_account_ids_paged`]. +#[derive(Debug)] +pub struct PublicAccountIdsPage { + /// The public account IDs in this page. + pub account_ids: Vec, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + +/// Selects public account IDs with pagination. +/// +/// Returns up to `page_size` public account IDs, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. /// -/// This filters accounts in-memory after loading only the account IDs (not commitments), -/// which is more efficient than loading full commitments when only IDs are needed. +/// Public accounts are those with `AccountStorageMode::Public` or `AccountStorageMode::Network`. +/// We identify them by checking `code_commitment IS NOT NULL` - public accounts store their full +/// state (including `code_commitment`), while private accounts only store the `account_commitment`. /// /// # Raw SQL /// @@ -305,31 +351,48 @@ pub(crate) fn select_all_account_commitments( /// accounts /// WHERE /// is_latest = 1 +/// AND code_commitment IS NOT NULL +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_public_account_ids( +pub(crate) fn select_public_account_ids_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - // We could technically use a `LIKE` constraint for both postgres and sqlite backends, - // but diesel doesn't expose that. - let raw: Vec> = - SelectDsl::select(schema::accounts::table, schema::accounts::account_id) - .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::>(conn)?; + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; - Result::from_iter( - raw.into_iter() - .map(|bytes| { - AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) - }) - .filter_map(|result| match result { - Ok(id) if id.has_public_state() => Some(Ok(id)), - Ok(_) => None, - Err(e) => Some(Err(e)), - }), - ) + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::code_commitment.is_not_null()) + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::>(conn)?; + + let mut account_ids: Vec = Result::from_iter(raw.into_iter().map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }))?; + + // If we got more than page_size, there are more results + let next_cursor = if account_ids.len() > page_size.get() { + account_ids.pop(); // Remove the extra element + account_ids.last().copied() + } else { + None + }; + + Ok(PublicAccountIdsPage { account_ids, next_cursor }) } /// Select account vault assets within a block range (inclusive). @@ -418,49 +481,6 @@ pub(crate) fn select_account_vault_assets( Ok((last_block_included, values)) } -/// Select [`AccountSummary`] from the DB using the given [`SqliteConnection`], given that the -/// account update was in the given block range (inclusive). -/// -/// # Returns -/// -/// The vector of [`AccountSummary`] with the matching accounts. -/// -/// # Raw SQL -/// -/// ```sql -/// SELECT -/// account_id, -/// account_commitment, -/// block_num -/// FROM -/// accounts -/// WHERE -/// block_num > ?1 AND -/// block_num <= ?2 AND -/// account_id IN (?3) -/// ORDER BY -/// block_num ASC -/// ``` -pub fn select_accounts_by_block_range( - conn: &mut SqliteConnection, - account_ids: &[AccountId], - block_range: RangeInclusive, -) -> Result, DatabaseError> { - QueryParamAccountIdLimit::check(account_ids.len())?; - - let desired_account_ids = serialize_vec(account_ids); - let raw: Vec = - SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) - .filter(schema::accounts::block_num.gt(block_range.start().to_raw_sql())) - .filter(schema::accounts::block_num.le(block_range.end().to_raw_sql())) - .filter(schema::accounts::account_id.eq_any(desired_account_ids)) - .order(schema::accounts::block_num.asc()) - .load::(conn)?; - // SAFETY `From` implies `TryFrom `AccountSummary` - Ok(vec_raw_try_into(raw).unwrap()) -} - /// Select all accounts from the DB using the given [`SqliteConnection`]. /// /// # Returns @@ -851,11 +871,13 @@ pub(crate) fn insert_account_vault_asset( // First, update any existing rows with the same (account_id, vault_key) to set // is_latest=false let vault_key: Word = vault_key.into(); + let vault_key_bytes = vault_key.to_bytes(); + let account_id_bytes = account_id.to_bytes(); let update_count = diesel::update(schema::account_vault_assets::table) .filter( schema::account_vault_assets::account_id - .eq(&account_id.to_bytes()) - .and(schema::account_vault_assets::vault_key.eq(&vault_key.to_bytes())) + .eq(account_id_bytes) + .and(schema::account_vault_assets::vault_key.eq(vault_key_bytes)) .and(schema::account_vault_assets::is_latest.eq(true)), ) .set(schema::account_vault_assets::is_latest.eq(false)) @@ -919,7 +941,7 @@ pub(crate) fn insert_account_storage_map_value( } /// Attention: Assumes the account details are NOT null! The schema explicitly allows this though! -#[allow(clippy::too_many_lines)] +#[expect(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -1189,3 +1211,78 @@ pub(crate) struct AccountStorageMapRowInsert { pub(crate) value: Vec, pub(crate) is_latest: bool, } + +// CLEANUP FUNCTIONS +// ================================================================================================ + +/// Number of historical blocks to retain for vault assets and storage map values. +/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be deleted, +/// except for entries marked with `is_latest=true` which are always retained. +pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; + +/// Clean up old entries for all accounts, deleting entries older than the retention window. +/// +/// Deletes rows where `block_num < chain_tip - HISTORICAL_BLOCK_RETENTION` and `is_latest = false`. +/// This is a simple and efficient approach that doesn't require window functions. +/// +/// # Returns +/// A tuple of `(vault_assets_deleted, storage_map_values_deleted)` +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +pub(crate) fn prune_history( + conn: &mut SqliteConnection, + chain_tip: BlockNumber, +) -> Result<(usize, usize), DatabaseError> { + let cutoff_block = i64::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + tracing::Span::current().record("cutoff_block", cutoff_block); + let vault_deleted = prune_account_vault_assets(conn, cutoff_block)?; + let storage_deleted = prune_account_storage_map_values(conn, cutoff_block)?; + + Ok((vault_deleted, storage_deleted)) +} + +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +fn prune_account_vault_assets( + conn: &mut SqliteConnection, + cutoff_block: i64, +) -> Result { + diesel::delete( + schema::account_vault_assets::table.filter( + schema::account_vault_assets::block_num + .lt(cutoff_block) + .and(schema::account_vault_assets::is_latest.eq(false)), + ), + ) + .execute(conn) + .map_err(DatabaseError::Diesel) +} + +#[tracing::instrument( + target = COMPONENT, + skip_all, + err, + fields(cutoff_block), +)] +fn prune_account_storage_map_values( + conn: &mut SqliteConnection, + cutoff_block: i64, +) -> Result { + diesel::delete( + schema::account_storage_map_values::table.filter( + schema::account_storage_map_values::block_num + .lt(cutoff_block) + .and(schema::account_storage_map_values::is_latest.eq(false)), + ), + ) + .execute(conn) + .map_err(DatabaseError::Diesel) +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 790c4519ed..dd1ab97483 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -161,6 +161,7 @@ fn create_test_account_with_storage() -> (Account, AccountId) { fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { use crate::db::schema::block_headers; + let secret_key = SecretKey::new(); let block_header = BlockHeader::new( 1_u8.into(), Word::default(), @@ -171,15 +172,18 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { Word::default(), Word::default(), Word::default(), - SecretKey::new().public_key(), + secret_key.public_key(), test_fee_params(), 0_u8.into(), ); + let signature = secret_key.sign(block_header.commitment()); diesel::insert_into(block_headers::table) .values(( block_headers::block_num.eq(i64::from(block_num.as_u32())), block_headers::block_header.eq(block_header.to_bytes()), + block_headers::signature.eq(signature.to_bytes()), + block_headers::commitment.eq(block_header.commitment().to_bytes()), )) .execute(conn) .expect("Failed to insert block header"); @@ -655,28 +659,6 @@ fn test_select_account_vault_at_block_historical_with_updates() { .expect("upsert_accounts failed"); } - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_2, - ) - .expect("upsert_accounts block 2 failed"); - - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_3, - ) - .expect("upsert_accounts block 3 failed"); - // Insert vault asset at block 1: vault_key_1 = 1000 tokens let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ Felt::new(1), @@ -783,28 +765,6 @@ fn test_select_account_vault_at_block_with_deletion() { .expect("upsert_accounts failed"); } - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_2, - ) - .expect("upsert_accounts block 2 failed"); - - upsert_accounts( - &mut conn, - &[BlockAccountUpdate::new( - account_id, - account.commitment(), - AccountUpdateDetails::Private, - )], - block_3, - ) - .expect("upsert_accounts block 3 failed"); - // Insert vault asset at block 1 let vault_key = AssetVaultKey::new_unchecked(Word::from([ Felt::new(1), diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 3c295c72b8..bfcd34ee7a 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,6 +11,8 @@ use diesel::{ SelectableHelper, SqliteConnection, }; +use miden_crypto::Word; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; @@ -124,34 +126,93 @@ pub fn select_all_block_headers( vec_raw_try_into(raw_block_headers) } +/// Select all block headers from the DB using the given [`SqliteConnection`]. +/// +/// # Returns +/// +/// A vector of [`BlockHeader`] or an error. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT commitment +/// FROM block_headers +/// ORDER BY block_num ASC +/// ``` +pub fn select_all_block_header_commitments( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + let raw_commitments = + QueryDsl::select(schema::block_headers::table, schema::block_headers::commitment) + .order(schema::block_headers::block_num.asc()) + .load::>(conn)?; + let commitments = + Result::from_iter(raw_commitments.into_iter().map(BlockHeaderCommitment::from_raw_sql))?; + Ok(commitments) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(transparent)] +pub struct BlockHeaderCommitment(pub(crate) Word); + +impl BlockHeaderCommitment { + pub fn new(header: &BlockHeader) -> Self { + Self(header.commitment()) + } + pub fn word(self) -> Word { + self.0 + } +} + #[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderRawRow { - #[allow(dead_code)] + #[expect(dead_code)] pub block_num: i64, pub block_header: Vec, + pub signature: Vec, + pub commitment: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; fn try_into(self) -> Result { - let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let block_header = BlockHeader::from_raw_sql(self.block_header)?; + // we're bust if this invariant doesn't hold + debug_assert_eq!( + BlockHeaderCommitment::new(&block_header), + BlockHeaderCommitment::from_raw_sql(self.commitment) + .expect("Database always contains valid format commitments") + ); Ok(block_header) } } +impl TryInto<(BlockHeader, Signature)> for BlockHeaderRawRow { + type Error = DatabaseError; + fn try_into(self) -> Result<(BlockHeader, Signature), Self::Error> { + let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let signature = Signature::read_from_bytes(&self.signature[..])?; + Ok((block_header, signature)) + } +} + #[derive(Debug, Clone, Insertable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, + pub signature: Vec, + pub commitment: Vec, } -impl From<&BlockHeader> for BlockHeaderInsert { - fn from(block_header: &BlockHeader) -> Self { +impl From<(&BlockHeader, &Signature)> for BlockHeaderInsert { + fn from((header, signature): (&BlockHeader, &Signature)) -> Self { Self { - block_num: block_header.block_num().to_raw_sql(), - block_header: block_header.to_bytes(), + block_num: header.block_num().to_raw_sql(), + block_header: header.to_bytes(), + signature: signature.to_bytes(), + commitment: BlockHeaderCommitment::new(header).to_raw_sql(), } } } @@ -174,8 +235,9 @@ impl From<&BlockHeader> for BlockHeaderInsert { pub(crate) fn insert_block_header( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, ) -> Result { - let block_header = BlockHeaderInsert::from(block_header); + let block_header = BlockHeaderInsert::from((block_header, signature)); let count = diesel::insert_into(schema::block_headers::table) .values(&[block_header]) .execute(conn)?; diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0f29b00157..35c38c5ad2 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -25,20 +25,14 @@ //! transaction, any nesting of further `transaction(conn, || {})` has no effect and should be //! considered unnecessary boilerplate by default. -#![allow( - clippy::needless_pass_by_value, - reason = "The parent scope does own it, passing by value avoids additional boilerplate" -)] - use diesel::SqliteConnection; -use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::OrderedTransactionHeaders; use super::DatabaseError; -use crate::db::{NoteRecord, StateSyncUpdate}; -use crate::errors::StateSyncError; +use crate::db::NoteRecord; mod transactions; pub use transactions::*; @@ -47,6 +41,7 @@ pub use block_headers::*; mod accounts; pub use accounts::*; mod nullifiers; +pub use nullifiers::NullifiersPage; pub(crate) use nullifiers::*; mod notes; pub(crate) use notes::*; @@ -59,6 +54,7 @@ pub(crate) use notes::*; pub(crate) fn apply_block( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, notes: &[(NoteRecord, Option)], nullifiers: &[Nullifier], accounts: &[BlockAccountUpdate], @@ -66,7 +62,7 @@ pub(crate) fn apply_block( ) -> Result { let mut count = 0; // Note: ordering here is important as the relevant tables have FK dependencies. - count += insert_block_header(conn, block_header)?; + count += insert_block_header(conn, block_header, signature)?; count += upsert_accounts(conn, accounts, block_header.block_num())?; count += insert_scripts(conn, notes.iter().map(|(note, _)| note))?; count += insert_notes(conn, notes)?; @@ -74,52 +70,3 @@ pub(crate) fn apply_block( count += insert_nullifiers_for_block(conn, nullifiers, block_header.block_num())?; Ok(count) } - -/// Loads the state necessary for a state sync -/// -/// The state sync covers from `from_start_block` until the last block that has a note matching the -/// given `note_tags`. -pub(crate) fn get_state_sync( - conn: &mut SqliteConnection, - from_start_block: BlockNumber, - account_ids: Vec, - note_tags: Vec, -) -> Result { - let chain_tip = select_block_header_by_block_num(conn, None)? - .expect("Chain tip is not found") - .block_num(); - - // Sync notes from the starting block to the latest in the chain. - let block_range = from_start_block..=chain_tip; - - // select notes since block by tag and sender - let (notes, _) = select_notes_since_block_by_tag_and_sender( - conn, - &account_ids[..], - ¬e_tags[..], - block_range, - )?; - - // select block header by block num - let maybe_note_block_num = notes.first().map(|note| note.block_num); - let block_header: BlockHeader = select_block_header_by_block_num(conn, maybe_note_block_num)? - .ok_or_else(|| StateSyncError::EmptyBlockHeadersTable)?; - - // select accounts by block range - let to_end_block = block_header.block_num(); - let account_updates = - select_accounts_by_block_range(conn, &account_ids, from_start_block..=to_end_block)?; - - // select transactions by accounts and block range - let transactions = select_transactions_by_accounts_and_block_range( - conn, - &account_ids, - from_start_block..=to_end_block, - )?; - Ok(StateSyncUpdate { - notes, - block_header, - account_updates, - transactions, - }) -} diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a2ab7b1bb0..67b3a708b2 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -1,4 +1,4 @@ -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] @@ -41,10 +41,10 @@ use miden_protocol::note::{ NoteDetails, NoteId, NoteInclusionProof, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, Nullifier, @@ -203,7 +203,7 @@ pub(crate) fn select_notes_since_block_by_tag_and_sender( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -283,7 +283,7 @@ pub(crate) fn select_existing_note_commitments( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -427,7 +427,7 @@ pub(crate) fn select_note_script_by_root( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script, @@ -441,14 +441,7 @@ pub(crate) fn select_note_script_by_root( /// ORDER BY notes.rowid ASC /// LIMIT ?4 /// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -#[allow( - clippy::too_many_lines, - reason = "Lines will be reduced when schema is updated to simplify logic" -)] +#[expect(clippy::cast_sign_loss, reason = "row_id is a positive integer")] pub(crate) fn select_unconsumed_network_notes_by_account_id( conn: &mut SqliteConnection, account_id: AccountId, @@ -460,7 +453,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( diesel::dsl::sql::("notes.rowid >= ") .bind::(page.token.unwrap_or_default() as i64); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -470,7 +463,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( i64, // rowid (from sql::("notes.rowid")) ); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -550,7 +543,6 @@ pub struct NoteSyncRecordRawRow { pub inclusion_path: Vec, // SparseMerklePath } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for NoteSyncRecordRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -575,7 +567,7 @@ impl TryInto for NoteSyncRecordRawRow { #[diesel(check_for_backend(Sqlite))] pub struct NoteDetailsRawRow { pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, } @@ -601,7 +593,7 @@ pub struct NoteRecordWithScriptRawJoined { // #[diesel(embed)] // pub metadata: NoteMetadataRaw, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, // #[diesel(embed)] @@ -623,7 +615,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, } = note; @@ -638,7 +630,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, script, @@ -666,7 +658,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { attachment, // metadata ^^^, assets, - inputs, + storage, serial_num, //details ^^^, inclusion_path, @@ -675,7 +667,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { } = raw; let index = BlockNoteIndexRawRow { batch_index, note_index }; let metadata = NoteMetadataRawRow { note_type, sender, tag, attachment }; - let details = NoteDetailsRawRow { assets, inputs, serial_num }; + let details = NoteDetailsRawRow { assets, storage, serial_num }; let metadata = metadata.try_into()?; let committed_at = BlockNumber::from_raw_sql(committed_at)?; @@ -684,16 +676,21 @@ impl TryInto for NoteRecordWithScriptRawJoined { let script = script.map(|script| NoteScript::read_from_bytes(&script[..])).transpose()?; let details = if let NoteDetailsRawRow { assets: Some(assets), - inputs: Some(inputs), + storage: Some(storage), serial_num: Some(serial_num), } = details { - let inputs = NoteInputs::read_from_bytes(&inputs[..])?; + let storage = NoteStorage::read_from_bytes(&storage[..])?; let serial_num = Word::read_from_bytes(&serial_num[..])?; - let script = script.ok_or_else(|| { - DatabaseError::conversiont_from_sql::(None) - })?; - let recipient = NoteRecipient::new(serial_num, script, inputs); + let script = + script.ok_or_else(|| { + miden_node_db::DatabaseError::conversiont_from_sql::< + NoteRecipient, + DatabaseError, + _, + >(None) + })?; + let recipient = NoteRecipient::new(serial_num, script, storage); let assets = NoteAssets::read_from_bytes(&assets[..])?; Some(NoteDetails::new(assets, recipient)) } else { @@ -730,7 +727,7 @@ pub struct NoteRecordRawRow { pub attachment: Vec, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, pub inclusion_path: Vec, @@ -746,13 +743,13 @@ pub struct NoteMetadataRawRow { attachment: Vec, } -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] impl TryInto for NoteMetadataRawRow { type Error = DatabaseError; fn try_into(self) -> Result { let sender = AccountId::read_from_bytes(&self.sender[..])?; let note_type = NoteType::try_from(self.note_type as u32) - .map_err(DatabaseError::conversiont_from_sql::)?; + .map_err(miden_node_db::DatabaseError::conversiont_from_sql::)?; let tag = NoteTag::new(self.tag as u32); let attachment = NoteAttachment::read_from_bytes(&self.attachment)?; Ok(NoteMetadata::new(sender, note_type, tag).with_attachment(attachment)) @@ -767,14 +764,16 @@ pub struct BlockNoteIndexRawRow { pub note_index: i32, // index within batch } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] +#[expect(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for BlockNoteIndexRawRow { type Error = DatabaseError; fn try_into(self) -> Result { let batch_index = self.batch_index as usize; let note_index = self.note_index as usize; let index = BlockNoteIndex::new(batch_index, note_index).ok_or_else(|| { - DatabaseError::conversiont_from_sql::(None) + miden_node_db::DatabaseError::conversiont_from_sql::( + None, + ) })?; Ok(index) } @@ -791,7 +790,6 @@ impl TryInto for BlockNoteIndexRawRow { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -822,7 +820,6 @@ pub(crate) fn insert_notes( /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -868,7 +865,7 @@ pub struct NoteInsertRow { pub consumed_at: Option, pub nullifier: Option>, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub script_root: Option>, pub serial_num: Option>, } @@ -902,7 +899,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRow { consumed_at: None::, // New notes are always unconsumed. nullifier: nullifier.as_ref().map(Nullifier::to_bytes), assets: note.details.as_ref().map(|d| d.assets().to_bytes()), - inputs: note.details.as_ref().map(|d| d.inputs().to_bytes()), + storage: note.details.as_ref().map(|d| d.storage().to_bytes()), script_root: note.details.as_ref().map(|d| d.script().root().to_bytes()), serial_num: note.details.as_ref().map(|d| d.serial_num().to_bytes()), } diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index 5ab5785374..84e89ebad5 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -1,3 +1,4 @@ +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::query_dsl::methods::SelectDsl; @@ -128,6 +129,7 @@ pub(crate) fn select_nullifiers_by_prefix( /// ORDER BY /// block_num ASC /// ``` +#[cfg(test)] pub(crate) fn select_all_nullifiers( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { @@ -137,6 +139,67 @@ pub(crate) fn select_all_nullifiers( vec_raw_try_into(nullifiers_raw) } +/// Page of nullifiers returned by [`select_nullifiers_paged`]. +#[derive(Debug)] +pub struct NullifiersPage { + /// The nullifiers in this page. + pub nullifiers: Vec, + /// If `Some`, there are more results. Use this as the `after_nullifier` for the next page. + pub next_cursor: Option, +} + +/// Selects nullifiers with pagination. +/// +/// Returns up to `page_size` nullifiers, starting after `after_nullifier` if provided. +/// Results are ordered by nullifier bytes for stable pagination. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// nullifier, +/// block_num +/// FROM +/// nullifiers +/// WHERE +/// (nullifier > :after_nullifier OR :after_nullifier IS NULL) +/// ORDER BY +/// nullifier ASC +/// LIMIT :page_size + 1 +/// ``` +pub(crate) fn select_nullifiers_paged( + conn: &mut SqliteConnection, + page_size: NonZeroUsize, + after_nullifier: Option, +) -> Result { + // Fetch one extra to determine if there are more results + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = + SelectDsl::select(schema::nullifiers::table, NullifierWithoutPrefixRawRow::as_select()) + .order_by(schema::nullifiers::nullifier.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_nullifier { + query = query.filter(schema::nullifiers::nullifier.gt(cursor.to_bytes())); + } + + let nullifiers_raw = query.load::(conn)?; + let mut nullifiers: Vec = vec_raw_try_into(nullifiers_raw)?; + + // If we got more than page_size, there are more results + let next_cursor = if nullifiers.len() > page_size.get() { + nullifiers.pop(); // Remove the extra element + nullifiers.last().map(|info| info.nullifier) + } else { + None + }; + + Ok(NullifiersPage { nullifiers, next_cursor }) +} + /// Insert nullifiers for a block into the database. /// /// # Parameters @@ -163,7 +226,6 @@ pub(crate) fn select_all_nullifiers( /// INSERT INTO nullifiers (nullifier, nullifier_prefix, block_num) /// VALUES (?1, ?2, ?3) /// ``` -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index be132e1a56..1095fc1899 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -27,67 +27,7 @@ use super::DatabaseError; use crate::COMPONENT; use crate::db::models::conv::SqlTypeConvert; use crate::db::models::{serialize_vec, vec_raw_try_into}; -use crate::db::{TransactionSummary, schema}; - -/// Select transactions for given accounts in a specified block range -/// -/// # Parameters -/// * `account_ids`: List of account IDs to filter by -/// - Limit: 0 <= size <= 1000 -/// * `block_range`: Range of blocks to include inclusive -/// -/// # Returns -/// -/// A vector of [`TransactionSummary`] types or an error. -/// -/// # Raw SQL -/// ```sql -/// SELECT -/// account_id, -/// block_num, -/// transaction_id -/// FROM -/// transactions -/// WHERE -/// block_num > ?1 AND -/// block_num <= ?2 AND -/// account_id IN (?3) -/// ORDER BY -/// transaction_id ASC -/// ``` -pub fn select_transactions_by_accounts_and_block_range( - conn: &mut SqliteConnection, - account_ids: &[AccountId], - block_range: RangeInclusive, -) -> Result, DatabaseError> { - QueryParamAccountIdLimit::check(account_ids.len())?; - - let desired_account_ids = serialize_vec(account_ids); - let raw = SelectDsl::select( - schema::transactions::table, - ( - schema::transactions::account_id, - schema::transactions::block_num, - schema::transactions::transaction_id, - ), - ) - .filter(schema::transactions::block_num.gt(block_range.start().to_raw_sql())) - .filter(schema::transactions::block_num.le(block_range.end().to_raw_sql())) - .filter(schema::transactions::account_id.eq_any(desired_account_ids)) - .order(schema::transactions::transaction_id.asc()) - .load::(conn) - .map_err(DatabaseError::from)?; - vec_raw_try_into(raw) -} - -#[derive(Debug, Clone, PartialEq, Queryable, Selectable, QueryableByName)] -#[diesel(table_name = schema::transactions)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct TransactionSummaryRaw { - account_id: Vec, - block_num: i64, - transaction_id: Vec, -} +use crate::db::schema; #[derive(Debug, Clone, PartialEq, Queryable, Selectable, QueryableByName)] #[diesel(table_name = schema::transactions)] @@ -103,17 +43,6 @@ pub struct TransactionRecordRaw { size_in_bytes: i64, } -impl TryInto for TransactionSummaryRaw { - type Error = DatabaseError; - fn try_into(self) -> Result { - Ok(crate::db::TransactionSummary { - account_id: AccountId::read_from_bytes(&self.account_id[..])?, - block_num: BlockNumber::from_raw_sql(self.block_num)?, - transaction_id: TransactionId::read_from_bytes(&self.transaction_id[..])?, - }) - } -} - impl TryInto for TransactionRecordRaw { type Error = DatabaseError; fn try_into(self) -> Result { @@ -150,7 +79,6 @@ impl TryInto for TransactionRecordRaw { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -161,10 +89,9 @@ pub(crate) fn insert_transactions( block_num: BlockNumber, transactions: &OrderedTransactionHeaders, ) -> Result { - #[allow(clippy::into_iter_on_ref)] // false positive let rows: Vec<_> = transactions .as_slice() - .into_iter() + .iter() .map(|tx| TransactionSummaryRowInsert::new(tx, block_num)) .collect(); @@ -187,7 +114,7 @@ pub struct TransactionSummaryRowInsert { } impl TransactionSummaryRowInsert { - #[allow( + #[expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] @@ -197,11 +124,25 @@ impl TransactionSummaryRowInsert { ) -> Self { const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments - // Serialize input notes using binary format (store nullifiers) - let nullifiers_binary = transaction_header.input_notes().to_bytes(); - - // Serialize output notes using binary format (store note IDs) - let output_notes_binary = transaction_header.output_notes().to_bytes(); + // Extract nullifiers from input notes and serialize them. + // We only store the nullifiers (not the full `InputNoteCommitment`) since + // that's all that's needed when reading back `TransactionRecords`. + let nullifiers: Vec = transaction_header + .input_notes() + .iter() + .map(miden_protocol::transaction::InputNoteCommitment::nullifier) + .collect(); + let nullifiers_binary = nullifiers.to_bytes(); + + // Extract note IDs from output note headers and serialize them. + // We only store the `NoteId`s (not the full `NoteHeader` with metadata) since + // that's all that's needed when reading back `TransactionRecords`. + let output_note_ids: Vec = transaction_header + .output_notes() + .iter() + .map(miden_protocol::note::NoteHeader::id) + .collect(); + let output_notes_binary = output_note_ids.to_bytes(); // Manually calculate the estimated size of the transaction header to avoid // the cost of serialization. The size estimation includes: @@ -341,12 +282,13 @@ pub fn select_transactions_records( // Add transactions from this chunk one by one until we hit the limit let mut added_from_chunk = 0; - let mut last_added_tx: Option = None; for tx in chunk { if total_size + tx.size_in_bytes <= max_payload_bytes { total_size += tx.size_in_bytes; - last_added_tx = Some(tx); + last_block_num = Some(tx.block_num); + last_transaction_id = Some(tx.transaction_id.clone()); + all_transactions.push(tx); added_from_chunk += 1; } else { // Can't fit this transaction, stop here @@ -354,13 +296,6 @@ pub fn select_transactions_records( } } - // Update cursor position only for the last transaction that was actually added - if let Some(tx) = last_added_tx { - last_block_num = Some(tx.block_num); - last_transaction_id = Some(tx.transaction_id.clone()); - all_transactions.push(tx); - } - // Break if chunk incomplete (size limit hit or data exhausted) if added_from_chunk < NUM_TXS_PER_CHUNK { break; diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index c472940e45..ef74e86fac 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -1,6 +1,6 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_protocol::note::Nullifier; -use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::utils::Serializable; use crate::errors::DatabaseError; @@ -14,16 +14,6 @@ pub(crate) fn vec_raw_try_into>( ) } -#[allow(dead_code)] -/// Deserialize an iterable container full of byte blobs `B` to types `T` -pub(crate) fn deserialize_raw_vec, T: Deserializable>( - raw: impl IntoIterator, -) -> Result, DeserializationError> { - Result::, DeserializationError>::from_iter( - raw.into_iter().map(|raw| T::read_from_bytes(raw.as_ref())), - ) -} - /// Utility to convert an iterable container to a vector of byte blobs pub(crate) fn serialize_vec<'a, D: Serializable + 'a>( raw: impl IntoIterator, @@ -38,7 +28,6 @@ pub fn get_nullifier_prefix(nullifier: &Nullifier) -> u16 { /// Converts a slice of length `N` to an array, returns `None` if invariant /// isn'crates/store/src/db/mod.rs upheld. -#[allow(dead_code)] pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { if bytes.len() != N { return None; @@ -48,7 +37,7 @@ pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { Some(arr) } -#[allow(dead_code)] +#[expect(dead_code)] #[inline] pub fn from_be_to_u32(bytes: &[u8]) -> Option { slice_to_array::<4>(bytes).map(u32::from_be_bytes) @@ -62,8 +51,8 @@ pub struct PragmaSchemaVersion { } /// Returns the schema version of the database. -#[allow(dead_code)] -#[allow( +#[expect(dead_code)] +#[expect( clippy::cast_sign_loss, reason = "schema version is always positive and we will never reach 0xEFFF_..._FFFF" )] diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 0ae4b8e1e1..f93afc16e8 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -47,6 +47,8 @@ diesel::table! { block_headers (block_num) { block_num -> BigInt, block_header -> Binary, + signature -> Binary, + commitment -> Binary, } } @@ -74,7 +76,7 @@ diesel::table! { consumed_at -> Nullable, nullifier -> Nullable, assets -> Nullable, - inputs -> Nullable, + storage -> Nullable, script_root -> Nullable, serial_num -> Nullable, } diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs index 28e480fc0c..9a5ad1328a 100644 --- a/crates/store/src/db/schema_hash.rs +++ b/crates/store/src/db/schema_hash.rs @@ -11,11 +11,11 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use diesel_migrations::MigrationHarness; +use miden_node_db::SchemaVerificationError; use tracing::instrument; use crate::COMPONENT; use crate::db::migrations::MIGRATIONS; -use crate::errors::SchemaVerificationError; /// Represents a schema object for comparison. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] @@ -107,10 +107,20 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati // Log specific differences at debug level for obj in &missing { - tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Missing or modified" + ); } for obj in &extra { - tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Extra or modified" + ); } return Err(SchemaVerificationError::Mismatch { @@ -129,7 +139,6 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati mod tests { use super::*; use crate::db::migrations::apply_migrations; - use crate::errors::DatabaseError; #[test] fn verify_schema_passes_for_correct_schema() { @@ -181,6 +190,9 @@ mod tests { .execute(&mut conn) .unwrap(); - assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + assert!(matches!( + apply_migrations(&mut conn), + Err(miden_node_db::DatabaseError::SchemaVerification(_)) + )); } } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 44b11c9b43..fed7143353 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1,6 +1,3 @@ -#![allow(clippy::similar_names, reason = "naming dummy test values is hard")] -#![allow(clippy::too_many_lines, reason = "test code can be long")] - use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; @@ -52,7 +49,6 @@ use miden_protocol::note::{ use miden_protocol::testing::account_id::{ ACCOUNT_ID_PRIVATE_SENDER, ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, - ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; @@ -73,9 +69,12 @@ use pretty_assertions::assert_eq; use rand::Rng; use super::{AccountInfo, NoteRecord, NullifierInfo}; -use crate::db::TransactionSummary; use crate::db::migrations::apply_migrations; -use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; +use crate::db::models::queries::{ + HISTORICAL_BLOCK_RETENTION, + StorageMapValue, + insert_account_storage_map_value, +}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; @@ -101,7 +100,8 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { 11_u8.into(), ); - conn.transaction(|conn| queries::insert_block_header(conn, &block_header)) + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + conn.transaction(|conn| queries::insert_block_header(conn, &block_header, &dummy_signature)) .unwrap(); } @@ -162,33 +162,6 @@ fn sql_insert_transactions() { assert_eq!(count, 2, "Two elements must have been inserted"); } -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_select_transactions() { - fn query_transactions(conn: &mut SqliteConnection) -> Vec { - queries::select_transactions_by_accounts_and_block_range( - conn, - &[AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap()], - BlockNumber::from(0)..=BlockNumber::from(2), - ) - .unwrap() - } - - let mut conn = create_db(); - let conn = &mut conn; - let transactions = query_transactions(conn); - - assert!(transactions.is_empty(), "No elements must be initially in the DB"); - - let count = insert_transactions(conn); - - assert_eq!(count, 2, "Two elements must have been inserted"); - - let transactions = query_transactions(conn); - - assert_eq!(transactions.len(), 2, "Two elements must be in the DB"); -} - #[test] #[miden_node_test_macro::enable_logging] fn sql_select_nullifiers() { @@ -768,7 +741,8 @@ fn db_block_header() { ); // test insertion - queries::insert_block_header(conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(conn, &block_header, &dummy_signature).unwrap(); // test fetch unknown block header let block_number = 1; @@ -799,7 +773,8 @@ fn db_block_header() { 21_u8.into(), ); - queries::insert_block_header(conn, &block_header2).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header2.commitment()); + queries::insert_block_header(conn, &block_header2, &dummy_signature).unwrap(); let res = queries::select_block_header_by_block_num(conn, None).unwrap(); assert_eq!(res.unwrap(), block_header2); @@ -808,80 +783,6 @@ fn db_block_header() { assert_eq!(res, [block_header, block_header2]); } -#[test] -#[miden_node_test_macro::enable_logging] -fn db_account() { - let mut conn = create_db(); - let conn = &mut conn; - let block_num: BlockNumber = 1.into(); - create_block(conn, block_num); - - // test empty table - let account_ids: Vec = - [ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, 1, 2, 3, 4, 5] - .iter() - .map(|acc_id| (*acc_id).try_into().unwrap()) - .collect(); - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); - - // test insertion - let account_id = ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE; - let account_commitment = num_to_word(0); - - let row_count = queries::upsert_accounts( - conn, - &[BlockAccountUpdate::new( - account_id.try_into().unwrap(), - account_commitment, - AccountUpdateDetails::Private, - )], - block_num, - ) - .unwrap(); - - assert_eq!(row_count, 1); - - // test successful query - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), - ) - .unwrap(); - assert_eq!( - res, - vec![AccountSummary { - account_id: account_id.try_into().unwrap(), - account_commitment, - block_num, - }] - ); - - // test query for update outside the block range - let res = queries::select_accounts_by_block_range( - conn, - &account_ids, - (block_num.as_u32() + 1).into()..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); - - // test query with unknown accounts - let res = queries::select_accounts_by_block_range( - conn, - &[6.try_into().unwrap(), 7.try_into().unwrap(), 8.try_into().unwrap()], - (block_num + 1)..=u32::MAX.into(), - ) - .unwrap(); - assert!(res.is_empty()); -} - #[test] #[miden_node_test_macro::enable_logging] fn notes() { @@ -890,7 +791,7 @@ fn notes() { let block_num_1 = 1.into(); create_block(conn, block_num_1); - let block_range = BlockNumber::from(0)..=BlockNumber::from(1); + let block_range = BlockNumber::GENESIS..=BlockNumber::from(1); // test empty table let (res, last_included_block) = @@ -1917,7 +1818,8 @@ fn db_roundtrip_block_header() { ); // Insert - queries::insert_block_header(&mut conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(&mut conn, &block_header, &dummy_signature).unwrap(); // Retrieve let retrieved = @@ -2045,47 +1947,6 @@ fn db_roundtrip_notes() { ); } -#[test] -#[miden_node_test_macro::enable_logging] -fn db_roundtrip_transactions() { - let mut conn = create_db(); - let block_num = BlockNumber::from(1); - create_block(&mut conn, block_num); - - let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) - .unwrap(); - - let tx = mock_block_transaction(account_id, 1); - let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); - - // Insert - queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); - - // Retrieve - let retrieved = queries::select_transactions_by_accounts_and_block_range( - &mut conn, - &[account_id], - BlockNumber::from(0)..=BlockNumber::from(2), - ) - .unwrap(); - - assert_eq!(retrieved.len(), 1, "Should have one transaction"); - let retrieved_tx = &retrieved[0]; - - assert_eq!( - tx.account_id(), - retrieved_tx.account_id, - "AccountId DB roundtrip must be symmetric" - ); - assert_eq!( - tx.id(), - retrieved_tx.transaction_id, - "TransactionId DB roundtrip must be symmetric" - ); - assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); -} - #[test] #[miden_node_test_macro::enable_logging] fn db_roundtrip_vault_assets() { @@ -2280,7 +2141,7 @@ fn db_roundtrip_account_storage_with_maps() { #[test] #[miden_node_test_macro::enable_logging] -fn test_note_metadata_with_attachment_roundtrip() { +fn db_roundtrip_note_metadata_attachment() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2331,3 +2192,323 @@ fn test_note_metadata_with_attachment_roundtrip() { "NetworkAccountTarget should have the correct target account ID" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn test_prune_history() { + let mut conn = create_db(); + let conn = &mut conn; + + let public_account_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Create blocks around the retention window. + const GENESIS_BLOCK_NUM: u32 = 0; + const OLD_BLOCK_OFFSET: u32 = 1; + const CUTOFF_BLOCK_OFFSET: u32 = 2; + const UPDATE_BLOCK_OFFSET: u32 = 3; + + let block_0: BlockNumber = GENESIS_BLOCK_NUM.into(); + let block_old: BlockNumber = OLD_BLOCK_OFFSET.into(); + let block_cutoff: BlockNumber = CUTOFF_BLOCK_OFFSET.into(); + let block_update: BlockNumber = UPDATE_BLOCK_OFFSET.into(); + let block_tip: BlockNumber = (HISTORICAL_BLOCK_RETENTION + CUTOFF_BLOCK_OFFSET).into(); + + for block in [block_0, block_old, block_cutoff, block_update, block_tip] { + create_block(conn, block); + } + + // Create account + for block in [block_0, block_old, block_cutoff, block_update, block_tip] { + queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block) + .unwrap(); + } + + // Insert vault assets at different blocks + let vault_key_old = AssetVaultKey::new_unchecked(num_to_word(100)); + let vault_key_cutoff = AssetVaultKey::new_unchecked(num_to_word(200)); + let vault_key_recent = AssetVaultKey::new_unchecked(num_to_word(300)); + let asset_1 = Asset::Fungible(FungibleAsset::new(public_account_id, 1000).unwrap()); + let asset_2 = Asset::Fungible(FungibleAsset::new(public_account_id, 2000).unwrap()); + let asset_3 = Asset::Fungible(FungibleAsset::new(public_account_id, 3000).unwrap()); + + // Old entry at block_old (should be deleted when cutoff is at block_cutoff for + // chain_tip=block_tip) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_old, + vault_key_old, + Some(asset_1), + ) + .unwrap(); + + // Entry exactly at cutoff (block_cutoff, should be retained) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_cutoff, + vault_key_cutoff, + Some(asset_2), + ) + .unwrap(); + + // Recent entry (should always be retained) + queries::insert_account_vault_asset( + conn, + public_account_id, + block_tip, + vault_key_recent, + Some(asset_3), + ) + .unwrap(); + + // Update an entry to create a non-latest version + let updated_asset = Asset::Fungible(FungibleAsset::new(public_account_id, 1500).unwrap()); + queries::insert_account_vault_asset( + conn, + public_account_id, + block_update, + vault_key_old, + Some(updated_asset), + ) + .unwrap(); + + // Insert storage map values at different blocks + let slot_name = StorageSlotName::mock(5); + let map_key_old = num_to_word(10); + let map_key_cutoff = num_to_word(20); + let map_key_recent = num_to_word(30); + let value_1 = num_to_word(111); + let value_2 = num_to_word(222); + let value_3 = num_to_word(333); + let value_updated = num_to_word(444); + + // Old storage map entry at block_old + insert_account_storage_map_value( + conn, + public_account_id, + block_old, + slot_name.clone(), + map_key_old, + value_1, + ) + .unwrap(); + + // Storage map entry at cutoff boundary (block_cutoff) + insert_account_storage_map_value( + conn, + public_account_id, + block_cutoff, + slot_name.clone(), + map_key_cutoff, + value_2, + ) + .unwrap(); + + // Recent storage map entry + insert_account_storage_map_value( + conn, + public_account_id, + block_tip, + slot_name.clone(), + map_key_recent, + value_3, + ) + .unwrap(); + + // Update map_key_old to create a non-latest entry at block_update + insert_account_storage_map_value( + conn, + public_account_id, + block_update, + slot_name.clone(), + map_key_old, + value_updated, + ) + .unwrap(); + + // Verify initial state - should have 4 vault assets and 4 storage map values + let (_, initial_vault_assets) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert_eq!(initial_vault_assets.len(), 4, "should have 4 vault assets before cleanup"); + + let initial_storage_values = + queries::select_account_storage_map_values(conn, public_account_id, block_0..=block_tip) + .unwrap(); + assert_eq!( + initial_storage_values.values.len(), + 4, + "should have 4 storage map values before cleanup" + ); + + // Run cleanup with chain_tip = block_tip, cutoff will be block_tip - HISTORICAL_BLOCK_RETENTION + // = block_cutoff + let (vault_deleted, storage_deleted) = queries::prune_history(conn, block_tip).unwrap(); + + // Verify deletions occurred + assert_eq!(vault_deleted, 1, "should delete 1 old vault asset"); + assert_eq!(storage_deleted, 1, "should delete 1 old storage map value"); + + // Verify remaining vault assets - should have 3 (cutoff, update, tip) + let (_, remaining_vault_assets) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert_eq!(remaining_vault_assets.len(), 3, "should have 3 vault assets after cleanup"); + + // Verify no vault asset at block_old remains + assert!( + !remaining_vault_assets.iter().any(|v| v.block_num == block_old), + "block_old vault asset should be deleted" + ); + + // Verify vault assets at block_cutoff, block_update, block_tip remain + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_cutoff), + "block_cutoff vault asset should be retained (at cutoff)" + ); + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_update), + "block_update vault asset should be retained" + ); + assert!( + remaining_vault_assets.iter().any(|v| v.block_num == block_tip), + "block_tip vault asset should be retained" + ); + + // Verify remaining storage map values - should have 3 (cutoff, update, tip) + let remaining_storage_values = + queries::select_account_storage_map_values(conn, public_account_id, block_0..=block_tip) + .unwrap(); + assert_eq!( + remaining_storage_values.values.len(), + 3, + "should have 3 storage map values after cleanup" + ); + + // Verify no storage map value at block_old remains + assert!( + !remaining_storage_values.values.iter().any(|v| v.block_num == block_old), + "block_old storage map value should be deleted" + ); + + // Verify storage map values at block_cutoff, block_update, block_tip remain + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_cutoff), + "block_cutoff storage map value should be retained (at cutoff)" + ); + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_update), + "block_update storage map value should be retained" + ); + assert!( + remaining_storage_values.values.iter().any(|v| v.block_num == block_tip), + "block_tip storage map value should be retained" + ); + + // Test that is_latest=true entries are never deleted, even if old + // Insert an old entry marked as latest + let vault_key_old_latest = AssetVaultKey::new_unchecked(num_to_word(999)); + let asset_old = Asset::Fungible(FungibleAsset::new(public_account_id, 9999).unwrap()); + queries::insert_account_vault_asset( + conn, + public_account_id, + block_0, + vault_key_old_latest, + Some(asset_old), + ) + .unwrap(); + + // This entry at block 0 is marked as is_latest=true by insert_account_vault_asset + // Run cleanup again + let (vault_deleted_2, _) = queries::prune_history(conn, block_tip).unwrap(); + + // The old latest entry should not be deleted (vault_deleted_2 should be 0) + assert_eq!(vault_deleted_2, 0, "should not delete any is_latest=true entries"); + + // Verify the old latest entry still exists + let (_, vault_assets_with_latest) = + queries::select_account_vault_assets(conn, public_account_id, block_0..=block_tip).unwrap(); + assert!( + vault_assets_with_latest + .iter() + .any(|v| v.block_num == block_0 && v.vault_key == vault_key_old_latest), + "is_latest=true entry should be retained even if old" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); + + // Build two transaction headers with distinct data + let tx1 = mock_block_transaction(account_id, 1); + let tx2 = mock_block_transaction(account_id, 2); + let ordered = OrderedTransactionHeaders::new_unchecked(vec![tx1.clone(), tx2.clone()]); + + // Insert + let count = queries::insert_transactions(&mut conn, block_num, &ordered).unwrap(); + assert_eq!(count, 2, "Should insert 2 transactions"); + + // Retrieve + let (last_block, records) = queries::select_transactions_records( + &mut conn, + &[account_id], + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + assert_eq!(last_block, block_num, "Last block should match"); + assert_eq!(records.len(), 2, "Should retrieve 2 transactions"); + + // Verify each transaction roundtrips correctly. + // Records are ordered by (block_num, transaction_id), so match by ID. + let originals = [&tx1, &tx2]; + for record in &records { + let original = originals + .iter() + .find(|tx| tx.id() == record.transaction_id) + .expect("Retrieved transaction should match one of the originals"); + assert_eq!( + record.transaction_id, + original.id(), + "TransactionId DB roundtrip must be symmetric" + ); + assert_eq!( + record.account_id, + original.account_id(), + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!(record.block_num, block_num, "Block number must match"); + assert_eq!( + record.initial_state_commitment, + original.initial_state_commitment(), + "Initial state commitment DB roundtrip must be symmetric" + ); + assert_eq!( + record.final_state_commitment, + original.final_state_commitment(), + "Final state commitment DB roundtrip must be symmetric" + ); + + // Input notes are stored as nullifiers only + let expected_nullifiers: Vec = + original.input_notes().iter().map(InputNoteCommitment::nullifier).collect(); + assert_eq!( + record.nullifiers, expected_nullifiers, + "Nullifiers (from input notes) DB roundtrip must be symmetric" + ); + + // Output notes are stored as note IDs only + let expected_note_ids: Vec = + original.output_notes().iter().map(NoteHeader::id).collect(); + assert_eq!( + record.output_notes, expected_note_ids, + "Output note IDs DB roundtrip must be symmetric" + ); + } +} diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 6796505808..a277f1c689 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -1,10 +1,9 @@ -use std::any::type_name; use std::io; -use deadpool_sync::InteractError; use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; +use miden_node_utils::ErrorReport; use miden_node_utils::limiter::QueryLimitError; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -29,7 +28,6 @@ use thiserror::Error; use tokio::sync::oneshot::error::RecvError; use tonic::Status; -use crate::db::manager::ConnectionManagerError; use crate::db::models::conv::DatabaseTypeConversionError; use crate::inner_forest::{InnerForestError, WitnessError}; @@ -40,60 +38,30 @@ use crate::inner_forest::{InnerForestError, WitnessError}; pub enum DatabaseError { // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES // --------------------------------------------------------------------------------------------- - #[error("account is incomplete")] - AccountIncomplete, #[error("account error")] AccountError(#[from] AccountError), - #[error("account delta error")] - AccountDeltaError(#[from] AccountDeltaError), #[error("asset vault error")] AssetVaultError(#[from] AssetVaultError), #[error("asset error")] AssetError(#[from] AssetError), #[error("closed channel")] ClosedChannel(#[from] RecvError), + #[error("database error")] + DatabaseError(#[from] miden_node_db::DatabaseError), #[error("deserialization failed")] DeserializationError(#[from] DeserializationError), - #[error("hex parsing error")] - FromHexError(#[from] hex::FromHexError), #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] MerkleError(#[from] MerkleError), - #[error("network account error")] - NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), #[error("storage map error")] StorageMapError(#[from] StorageMapError), - #[error("setup deadpool connection pool failed")] - Deadpool(#[from] deadpool::managed::PoolError), - #[error("setup deadpool connection pool failed")] - ConnectionPoolObtainError(#[from] Box), #[error(transparent)] Diesel(#[from] diesel::result::Error), - #[error("sqlite FFI boundary NUL termination error (not much you can do, file an issue)")] - DieselSqliteFfi(#[from] std::ffi::NulError), - #[error(transparent)] - DeadpoolDiesel(#[from] deadpool_diesel::Error), - #[error(transparent)] - PoolRecycle(#[from] deadpool::managed::RecycleError), - #[error("summing over column {column} of table {table} exceeded {limit}")] - ColumnSumExceedsLimit { - table: &'static str, - column: &'static str, - limit: &'static str, - #[source] - source: Box, - }, #[error(transparent)] QueryParamLimit(#[from] QueryLimitError), - #[error("conversion from SQL to rust type {to} failed")] - ConversionSqlToRust { - #[source] - inner: Option>, - to: &'static str, - }, // OTHER ERRORS // --------------------------------------------------------------------------------------------- @@ -101,39 +69,16 @@ pub enum DatabaseError { AccountCommitmentsMismatch { expected: Word, calculated: Word }, #[error("account {0} not found")] AccountNotFoundInDb(AccountId), - #[error("account {0} state at block height {1} not found")] - AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), - #[error("block {0} not found in database")] - BlockNotFound(BlockNumber), - #[error("historical block {block_num} not available: {reason}")] - HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, - #[error("invalid storage slot type: {0}")] - InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), - #[error("SQLite pool interaction failed: {0}")] - InteractError(String), - #[error("invalid Felt: {0}")] - InvalidFelt(String), - #[error( - "unsupported database version. There is no migration chain from/to this version. \ - Remove all database files and try again." - )] - UnsupportedDatabaseVersion, - #[error("schema verification failed")] - SchemaVerification(#[from] SchemaVerificationError), - #[error(transparent)] - ConnectionManager(#[from] ConnectionManagerError), #[error(transparent)] SqlValueConversion(#[from] DatabaseTypeConversionError), - #[error("Not implemented: {0}")] - NotImplemented(String), #[error("storage root not found for account {account_id}, slot {slot_name}, block {block_num}")] StorageRootNotFound { account_id: AccountId, @@ -142,35 +87,6 @@ pub enum DatabaseError { }, } -impl DatabaseError { - /// Converts from `InteractError` - /// - /// Note: Required since `InteractError` has at least one enum - /// variant that is _not_ `Send + Sync` and hence prevents the - /// `Sync` auto implementation. - /// This does an internal conversion to string while maintaining - /// convenience. - /// - /// Using `MSG` as const so it can be called as - /// `.map_err(DatabaseError::interact::<"Your message">)` - pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { - let msg = msg.to_string(); - Self::InteractError(format!("{msg} failed: {e:?}")) - } - - /// Failed to convert an SQL entry to a rust representation - pub fn conversiont_from_sql(err: MaybeE) -> DatabaseError - where - MaybeE: Into>, - E: std::error::Error + Send + Sync + 'static, - { - DatabaseError::ConversionSqlToRust { - inner: err.into().map(|err| Box::new(err) as Box), - to: type_name::(), - } - } -} - impl From for Status { fn from(err: DatabaseError) -> Self { match err { @@ -203,7 +119,7 @@ pub enum StateInitializationError { #[error("failed to load block store")] BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] - DatabaseLoadError(#[from] DatabaseSetupError), + DatabaseLoadError(#[source] DatabaseError), #[error("inner forest error")] InnerForestError(#[from] InnerForestError), #[error( @@ -223,20 +139,6 @@ pub enum StateInitializationError { AccountToDeltaConversionFailed(String), } -#[derive(Debug, Error)] -pub enum DatabaseSetupError { - #[error("I/O error")] - Io(#[from] io::Error), - #[error("database error")] - Database(#[from] DatabaseError), - #[error("genesis block error")] - GenesisBlock(#[from] GenesisError), - #[error("pool build error")] - PoolBuild(#[from] deadpool::managed::BuildError), - #[error("Setup deadpool connection pool failed")] - Pool(#[from] deadpool::managed::PoolError), -} - #[derive(Debug, Error)] pub enum GenesisError { // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES @@ -313,6 +215,16 @@ pub enum ApplyBlockError { DbUpdateTaskFailed(String), } +impl From for Status { + fn from(err: ApplyBlockError) -> Self { + match err { + ApplyBlockError::InvalidBlockError(_) => Status::invalid_argument(err.as_report()), + + _ => Status::internal(err.as_report()), + } + } +} + #[derive(Error, Debug, GrpcError)] pub enum GetBlockHeaderError { #[error("database error")] @@ -348,6 +260,19 @@ pub enum StateSyncError { FailedToBuildMmrDelta(#[from] MmrError), } +#[derive(Error, Debug, GrpcError)] +pub enum SyncChainMmrError { + #[error("invalid block range")] + InvalidBlockRange(#[source] InvalidBlockRange), + #[error("start block is not known")] + FutureBlock { + chain_tip: BlockNumber, + block_from: BlockNumber, + }, + #[error("malformed block number")] + DeserializationFailed(#[source] ConversionError), +} + impl From for StateSyncError { fn from(value: diesel::result::Error) -> Self { Self::DatabaseError(DatabaseError::from(value)) @@ -359,6 +284,9 @@ pub enum NoteSyncError { #[error("database error")] #[grpc(internal)] DatabaseError(#[from] DatabaseError), + #[error("database error")] + #[grpc(internal)] + UnderlyingDatabaseError(#[from] miden_node_db::DatabaseError), #[error("block headers table is empty")] #[grpc(internal)] EmptyBlockHeadersTable, @@ -478,6 +406,26 @@ pub enum GetBlockByNumberError { DeserializationFailed(#[from] DeserializationError), } +// GET ACCOUNT ERRORS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetAccountError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("malformed request")] + DeserializationFailed(#[from] ConversionError), + #[error("account {0} not found at block {1}")] + AccountNotFound(AccountId, BlockNumber), + #[error("account {0} is not public")] + AccountNotPublic(AccountId), + #[error("block {0} is unknown")] + UnknownBlock(BlockNumber), + #[error("block {0} has been pruned")] + BlockPruned(BlockNumber), +} + // GET NOTES BY ID ERRORS // ================================================================================================ @@ -546,28 +494,81 @@ pub enum GetWitnessesError { WitnessError(#[from] WitnessError), } -// SCHEMA VERIFICATION ERRORS -// ================================================================================================= +#[cfg(test)] +mod get_account_error_tests { + use miden_protocol::account::AccountId; + use miden_protocol::block::BlockNumber; + use miden_protocol::testing::account_id::AccountIdBuilder; + use tonic::Status; -/// Errors that can occur during schema verification. -#[derive(Debug, Error)] -pub enum SchemaVerificationError { - #[error("failed to create in-memory reference database")] - InMemoryDbCreation(#[source] diesel::ConnectionError), - #[error("failed to apply migrations to reference database")] - MigrationApplication(#[source] Box), - #[error("failed to extract schema from database")] - SchemaExtraction(#[source] diesel::result::Error), - #[error( - "schema mismatch: expected {expected_count} objects, found {actual_count} \ - ({missing_count} missing, {extra_count} unexpected)" - )] - Mismatch { - expected_count: usize, - actual_count: usize, - missing_count: usize, - extra_count: usize, - }, + use super::GetAccountError; + + fn test_account_id() -> AccountId { + AccountIdBuilder::new().build_with_seed([1; 32]) + } + + #[test] + fn unknown_block_returns_invalid_argument() { + let block = BlockNumber::from(999); + let err = GetAccountError::UnknownBlock(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + assert!(!status.metadata().is_empty() || !status.details().is_empty()); + } + + #[test] + fn block_pruned_returns_invalid_argument() { + let block = BlockNumber::from(1); + let err = GetAccountError::BlockPruned(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_public_returns_invalid_argument() { + let err = GetAccountError::AccountNotPublic(test_account_id()); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_found_returns_invalid_argument_with_block_context() { + let account_id = test_account_id(); + let block = BlockNumber::from(5); + let err = GetAccountError::AccountNotFound(account_id, block); + let msg = err.to_string(); + assert!(msg.contains("not found"), "error message should mention 'not found'"); + assert!(msg.contains("block"), "error message should include block context"); + + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn each_variant_has_unique_discriminant() { + let account_id = test_account_id(); + let block = BlockNumber::from(1); + + let errors = [ + GetAccountError::AccountNotFound(account_id, block), + GetAccountError::AccountNotPublic(account_id), + GetAccountError::UnknownBlock(block), + GetAccountError::BlockPruned(block), + ]; + + let codes: Vec = errors.iter().map(|e| e.api_error().api_code()).collect(); + + // All non-internal variants should have unique, non-zero discriminants + for &code in &codes { + assert_ne!(code, 0, "non-internal variants should not map to Internal (0)"); + } + + // Check uniqueness + let mut sorted = codes.clone(); + sorted.sort_unstable(); + sorted.dedup(); + assert_eq!(sorted.len(), codes.len(), "all error variants should have unique codes"); + } } // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear @@ -580,7 +581,6 @@ mod compile_tests { AccountDeltaError, AccountError, DatabaseError, - DatabaseSetupError, DeserializationError, GenesisError, NetworkAccountError, @@ -591,7 +591,7 @@ mod compile_tests { /// Ensure all enum variants remain compat with the desired /// trait bounds. Otherwise one gets very unwieldy errors. - #[allow(dead_code)] + #[expect(dead_code)] fn assumed_trait_bounds_upheld() { fn ensure_is_error(_phony: PhantomData) where @@ -612,7 +612,6 @@ mod compile_tests { ensure_is_error::>(PhantomData); ensure_is_error::(PhantomData); - ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); ensure_is_error::(PhantomData); diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index b39495c872..3ea497d547 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use miden_protocol::account::AccountId; use miden_protocol::errors::{ AccountDeltaError, @@ -12,18 +14,25 @@ use miden_standards::account::wallets::BasicWalletError; use crate::genesis::config::TokenSymbolStr; -#[allow(missing_docs, reason = "Error variants must be descriptive by themselves")] #[derive(Debug, thiserror::Error)] pub enum GenesisConfigError { #[error(transparent)] Toml(#[from] toml::de::Error), + #[error("failed to read config file at {1}")] + ConfigFileRead(#[source] std::io::Error, PathBuf), + #[error("failed to read account file at {1}")] + AccountFileRead(#[source] std::io::Error, PathBuf), + #[error("native faucet from file {path} is not a fungible faucet")] + NativeFaucetNotFungible { path: PathBuf }, #[error("account translation from config to state failed")] Account(#[from] AccountError), #[error("asset translation from config to state failed")] Asset(#[from] AssetError), #[error("adding assets to account failed")] AccountDelta(#[from] AccountDeltaError), - #[error("the defined asset {symbol:?} has no corresponding faucet")] + #[error( + "the defined asset '{symbol}' has no corresponding faucet, or the faucet was provided as an account file" + )] MissingFaucetDefinition { symbol: TokenSymbolStr }, #[error("account with id {account_id} was referenced but is not part of given genesis state")] MissingGenesisAccount { account_id: AccountId }, @@ -41,10 +50,10 @@ pub enum GenesisConfigError { BasicWallet(#[from] BasicWalletError), #[error(r#"incompatible combination of `max_supply` ({max_supply})" and `decimals` ({decimals}) exceeding the allowed value range of an `u64`"#)] OutOfRange { max_supply: u64, decimals: u8 }, - #[error("Found duplicate faucet definition for token symbol {symbol:?}")] + #[error("Found duplicate faucet definition for token symbol '{symbol}'")] DuplicateFaucetDefinition { symbol: TokenSymbolStr }, #[error( - "Total issuance {total_issuance} of {symbol:?} exceeds faucet's maximum issuance of {max_supply}" + "Total issuance {total_issuance} of '{symbol}' exceeds faucet's maximum issuance of {max_supply}" )] MaxIssuanceExceeded { symbol: TokenSymbolStr, diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index e7abe8b58d..271c5a8bc3 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -1,6 +1,7 @@ //! Describe a subset of the genesis manifest in easily human readable format use std::cmp::Ordering; +use std::path::{Path, PathBuf}; use std::str::FromStr; use indexmap::IndexMap; @@ -42,27 +43,55 @@ use self::errors::GenesisConfigError; #[cfg(test)] mod tests; +const DEFAULT_NATIVE_FAUCET_SYMBOL: &str = "MIDEN"; +const DEFAULT_NATIVE_FAUCET_DECIMALS: u8 = 6; +const DEFAULT_NATIVE_FAUCET_MAX_SUPPLY: u64 = 100_000_000_000_000_000; + // GENESIS CONFIG // ================================================================================================ +/// An account loaded from a `.mac` file (path relative to genesis config directory). +/// +/// Notice: Generic accounts are not validated (e.g. that their vault assets reference known +/// faucets), leaving the responsibility of ensuring valid genesis state to the operator. +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] +struct GenericAccountConfig { + path: PathBuf, +} + /// Specify a set of faucets and wallets with assets for easier test deployments. /// /// Notice: Any faucet must be declared _before_ it's use in a wallet/regular account. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] pub struct GenesisConfig { version: u32, timestamp: u32, - native_faucet: NativeFaucet, + /// Override the native faucet with a custom faucet account. + /// + /// If unspecified, a default native faucet will be used with: + /// + /// ```toml + /// symbol = "MIDEN" + /// decimals = 6 + /// max_supply = 100_000_000_000_000_000 + /// ``` + #[serde(default)] + native_faucet: Option, fee_parameters: FeeParameterConfig, #[serde(default)] wallet: Vec, #[serde(default)] fungible_faucet: Vec, + #[serde(default)] + account: Vec, + #[serde(skip)] + config_dir: PathBuf, } impl Default for GenesisConfig { fn default() -> Self { - let miden = TokenSymbolStr::from_str("MIDEN").unwrap(); Self { version: 1_u32, timestamp: u32::try_from( @@ -73,30 +102,44 @@ impl Default for GenesisConfig { ) .expect("Timestamp should fit into u32"), wallet: vec![], - native_faucet: NativeFaucet { - max_supply: 100_000_000_000_000_000u64, - decimals: 6u8, - symbol: miden.clone(), - }, + native_faucet: None, fee_parameters: FeeParameterConfig { verification_base_fee: 0 }, fungible_faucet: vec![], + account: vec![], + config_dir: PathBuf::from("."), } } } impl GenesisConfig { - /// Read the genesis accounts from a toml formatted string + /// Read the genesis config from a TOML file. + /// + /// The parent directory of `path` is used to resolve relative paths for account files + /// referenced in the configuration (e.g., `[[account]]` entries with `path` fields). /// /// Notice: It will generate the specified case during [`fn into_state`]. - pub fn read_toml(toml_str: &str) -> Result { - let me = toml::from_str::(toml_str)?; - Ok(me) + pub fn read_toml_file(path: &Path) -> Result { + let toml_str = fs_err::read_to_string(path) + .map_err(|e| GenesisConfigError::ConfigFileRead(e, path.to_path_buf()))?; + let config_dir = path.parent().expect("config file path must have a parent directory"); + Self::read_toml(&toml_str, config_dir) + } + + /// Parse a genesis config from a TOML formatted string. + /// + /// The `config_dir` parameter is stored so that relative paths for account files + /// (e.g., `[[account]]` entries with `path` fields, or native faucet file references) + /// can be resolved later during [`Self::into_state`]. + fn read_toml(toml_str: &str, config_dir: &Path) -> Result { + let mut config: Self = toml::from_str(toml_str)?; + config.config_dir = config_dir.to_path_buf(); + Ok(config) } /// Convert the in memory representation into the new genesis state /// /// Also returns the set of secrets for the generated accounts. - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] pub fn into_state( self, signer: S, @@ -108,10 +151,20 @@ impl GenesisConfig { fee_parameters, fungible_faucet: fungible_faucet_configs, wallet: wallet_configs, - .. + account: account_entries, + config_dir, } = self; - let symbol = native_faucet.symbol.clone(); + // Load account files from disk + let file_loaded_accounts = account_entries + .into_iter() + .map(|acc| { + let full_path = config_dir.join(&acc.path); + let account_file = AccountFile::read(&full_path) + .map_err(|e| GenesisConfigError::AccountFileRead(e, full_path.clone()))?; + Ok(account_file.account) + }) + .collect::, GenesisConfigError>>()?; let mut wallet_accounts = Vec::::new(); // Every asset sitting in a wallet, has to reference a faucet for that asset @@ -121,10 +174,21 @@ impl GenesisConfig { // accounts/sign transactions let mut secrets = Vec::new(); - // First setup all the faucets - for fungible_faucet_config in std::iter::once(native_faucet.to_faucet_config()) - .chain(fungible_faucet_configs.into_iter()) - { + // Handle native faucet: build from defaults or load from file + let (native_faucet_account, symbol, native_secret) = + NativeFaucetConfig(native_faucet).build_account(&config_dir)?; + if let Some(secret_key) = native_secret { + secrets.push(( + format!("faucet_{symbol}.mac", symbol = symbol.to_string().to_lowercase()), + native_faucet_account.id(), + secret_key, + )); + } + let native_faucet_account_id = native_faucet_account.id(); + faucet_accounts.insert(symbol.clone(), native_faucet_account); + + // Setup additional fungible faucets from parameters + for fungible_faucet_config in fungible_faucet_configs { let symbol = fungible_faucet_config.symbol.clone(); let (faucet_account, secret_key) = fungible_faucet_config.build_account()?; @@ -141,11 +205,6 @@ impl GenesisConfig { // we know the remaining supply in the faucets. } - let native_faucet_account_id = faucet_accounts - .get(&symbol) - .expect("Parsing guarantees the existence of a native faucet.") - .id(); - let fee_parameters = FeeParameters::new(native_faucet_account_id, fee_parameters.verification_base_fee)?; @@ -158,7 +217,7 @@ impl GenesisConfig { for (index, WalletConfig { has_updatable_code, storage_mode, assets }) in wallet_configs.into_iter().enumerate() { - tracing::debug!("Adding wallet account {index} with {assets:?}"); + tracing::debug!(index, assets = ?assets, "Adding wallet account"); let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); @@ -264,6 +323,9 @@ impl GenesisConfig { // Ensure the faucets always precede the wallets referencing them all_accounts.extend(wallet_accounts); + // Append file-loaded accounts as-is + all_accounts.extend(file_loaded_accounts); + Ok(( GenesisState { fee_parameters, @@ -277,36 +339,6 @@ impl GenesisConfig { } } -// NATIVE FAUCET -// ================================================================================================ - -/// Declare the native fungible asset -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -#[serde(deny_unknown_fields)] -pub struct NativeFaucet { - /// Token symbol to use for fees. - symbol: TokenSymbolStr, - - decimals: u8, - /// Max supply in full token units - /// - /// It will be converted internally to the smallest representable unit, - /// using based `10.powi(decimals)` as a multiplier. - max_supply: u64, -} - -impl NativeFaucet { - fn to_faucet_config(&self) -> FungibleFaucetConfig { - let NativeFaucet { symbol, decimals, max_supply, .. } = self; - FungibleFaucetConfig { - symbol: symbol.clone(), - decimals: *decimals, - max_supply: *max_supply, - storage_mode: StorageMode::Public, - } - } -} - // FEE PARAMETER CONFIG // ================================================================================================ @@ -320,6 +352,54 @@ pub struct FeeParameterConfig { verification_base_fee: u32, } +// NATIVE FAUCET CONFIG +// ================================================================================================ + +/// Wraps an optional path to a pre-built faucet account file. +/// +/// When no path is provided, a default native faucet is built using hardcoded MIDEN defaults. +struct NativeFaucetConfig(Option); + +impl NativeFaucetConfig { + /// Build or load the native faucet account. + /// + /// For `None`, builds a new faucet from defaults and returns the generated secret key. + /// For `Some(path)`, loads the account from disk and validates it is a fungible faucet. + fn build_account( + self, + config_dir: &Path, + ) -> Result<(Account, TokenSymbolStr, Option), GenesisConfigError> { + match self.0 { + None => { + let symbol = TokenSymbolStr::from_str(DEFAULT_NATIVE_FAUCET_SYMBOL).unwrap(); + let faucet_config = FungibleFaucetConfig { + symbol: symbol.clone(), + decimals: DEFAULT_NATIVE_FAUCET_DECIMALS, + max_supply: DEFAULT_NATIVE_FAUCET_MAX_SUPPLY, + storage_mode: StorageMode::Public, + }; + let (account, secret_key) = faucet_config.build_account()?; + Ok((account, symbol, Some(secret_key))) + }, + Some(path) => { + let full_path = config_dir.join(&path); + let account_file = AccountFile::read(&full_path) + .map_err(|e| GenesisConfigError::AccountFileRead(e, full_path.clone()))?; + let account = account_file.account; + + if account.id().account_type() != AccountType::FungibleFaucet { + return Err(GenesisConfigError::NativeFaucetNotFungible { path: full_path }); + } + + let faucet = BasicFungibleFaucet::try_from(&account) + .expect("validated as fungible faucet above"); + let symbol = TokenSymbolStr::from(faucet.symbol()); + Ok((account, symbol, None)) + }, + } + } +} + // FUNGIBLE FAUCET CONFIG // ================================================================================================ @@ -548,6 +628,14 @@ impl From for TokenSymbol { } } +impl From for TokenSymbolStr { + fn from(symbol: TokenSymbol) -> Self { + // SAFETY: TokenSymbol guarantees valid format, so to_string should not fail + let raw = symbol.to_string().expect("TokenSymbol should always produce valid string"); + Self { raw, encoded: symbol } + } +} + impl Ord for TokenSymbolStr { fn cmp(&self, other: &Self) -> Ordering { self.raw.cmp(&other.raw) diff --git a/crates/store/src/genesis/config/samples/01-simple.toml b/crates/store/src/genesis/config/samples/01-simple.toml index d32403e85c..2d7af48849 100644 --- a/crates/store/src/genesis/config/samples/01-simple.toml +++ b/crates/store/src/genesis/config/samples/01-simple.toml @@ -1,11 +1,6 @@ timestamp = 1717344256 version = 1 -[native_faucet] -decimals = 3 -max_supply = 100_000_000 -symbol = "MIDEN" - [fee_parameters] verification_base_fee = 0 diff --git a/crates/store/src/genesis/config/samples/02-with-account-files.toml b/crates/store/src/genesis/config/samples/02-with-account-files.toml new file mode 100644 index 0000000000..ede3032b64 --- /dev/null +++ b/crates/store/src/genesis/config/samples/02-with-account-files.toml @@ -0,0 +1,30 @@ +# Genesis configuration example with AggLayer account files +# +# This example demonstrates how to include pre-built accounts from .mac files +# in the genesis block. The account files are generated by the build script +# using deterministic seeds for reproducibility. +# +# They demonstrate interdependencies between accounts: +# - bridge.mac: AggLayer bridge account for cross-chain asset transfers +# - agglayer_faucet_eth.mac: AggLayer faucet for wrapped ETH, depends on the bridge account. +# - agglayer_faucet_usdc.mac: AggLayer faucet for wrapped USDC, depends on the bridge account. +# +# Paths are relative to the directory containing this configuration file. + +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +# AggLayer bridge account for bridging assets to/from AggLayer +[[account]] +path = "02-with-account-files/bridge.mac" + +# AggLayer ETH faucet for wrapped ETH tokens +[[account]] +path = "02-with-account-files/agglayer_faucet_eth.mac" + +# AggLayer USDC faucet for wrapped USDC tokens +[[account]] +path = "02-with-account-files/agglayer_faucet_usdc.mac" diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac new file mode 100644 index 0000000000..ed79a49b1b Binary files /dev/null and b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_eth.mac differ diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac new file mode 100644 index 0000000000..13c71956cd Binary files /dev/null and b/crates/store/src/genesis/config/samples/02-with-account-files/agglayer_faucet_usdc.mac differ diff --git a/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac b/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac new file mode 100644 index 0000000000..57b4627150 Binary files /dev/null and b/crates/store/src/genesis/config/samples/02-with-account-files/bridge.mac differ diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 23e2daa43c..acdeb304c2 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,3 +1,6 @@ +use std::io::Write; +use std::path::Path; + use assert_matches::assert_matches; use miden_protocol::ONE; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; @@ -6,11 +9,23 @@ use super::*; type TestResult = Result<(), Box>; +/// Helper to write TOML content to a file and return the path +fn write_toml_file(dir: &Path, content: &str) -> std::path::PathBuf { + let path = dir.join("genesis.toml"); + let mut file = std::fs::File::create(&path).unwrap(); + file.write_all(content.as_bytes()).unwrap(); + path +} + #[test] #[miden_node_test_macro::enable_logging] fn parsing_yields_expected_default_values() -> TestResult { - let s = include_str!("./samples/01-simple.toml"); - let gcfg = GenesisConfig::read_toml(s)?; + // Copy sample file to temp dir since read_toml_file needs a real file path + let temp_dir = tempfile::tempdir()?; + let sample_content = include_str!("./samples/01-simple.toml"); + let config_path = write_toml_file(temp_dir.path(), sample_content); + + let gcfg = GenesisConfig::read_toml_file(&config_path)?; let (state, _secrets) = gcfg.into_state(SecretKey::new())?; let _ = state; // faucets always precede wallet accounts @@ -30,8 +45,8 @@ fn parsing_yields_expected_default_values() -> TestResult { { let faucet = BasicFungibleFaucet::try_from(native_faucet.clone()).unwrap(); - assert_eq!(faucet.max_supply(), Felt::new(100_000_000)); - assert_eq!(faucet.decimals(), 3); + assert_eq!(faucet.max_supply(), Felt::new(100_000_000_000_000_000)); + assert_eq!(faucet.decimals(), 6); assert_eq!(faucet.symbol(), TokenSymbol::new("MIDEN").unwrap()); } @@ -67,3 +82,275 @@ fn genesis_accounts_have_nonce_one() -> TestResult { let _block = state.into_block()?; Ok(()) } + +#[test] +fn parsing_account_from_file() -> TestResult { + use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; + use miden_standards::AuthScheme; + use miden_standards::account::wallets::create_basic_wallet; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a test wallet account and save it to a .mac file + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + + let test_account = create_basic_wallet( + init_seed, + auth, + AccountType::RegularAccountUpdatableCode, + AccountStorageMode::Public, + )?; + + let account_id = test_account.id(); + + // Save to file + let account_file_path = config_dir.join("test_account.mac"); + let account_file = AccountFile::new(test_account, vec![]); + account_file.write(&account_file_path)?; + + // Create a genesis config TOML that references the account file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +[[account]] +path = "test_account.mac" +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parse the config + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // Convert to state and verify the account is included + let (state, _secrets) = gcfg.into_state(SecretKey::new())?; + assert!(state.accounts.iter().any(|a| a.id() == account_id)); + + Ok(()) +} + +#[test] +fn parsing_native_faucet_from_file() -> TestResult { + use miden_protocol::account::{AccountBuilder, AccountFile, AccountStorageMode, AccountType}; + use miden_standards::account::auth::AuthFalcon512Rpo; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a faucet account and save it to a .mac file + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthFalcon512Rpo::new(secret_key.public_key().into()); + + let faucet_component = + BasicFungibleFaucet::new(TokenSymbol::new("MIDEN").unwrap(), 6, Felt::new(1_000_000_000))?; + + let faucet_account = AccountBuilder::new(init_seed) + .account_type(AccountType::FungibleFaucet) + .storage_mode(AccountStorageMode::Public) + .with_auth_component(auth) + .with_component(faucet_component) + .build()?; + + let faucet_id = faucet_account.id(); + + // Save to file + let faucet_file_path = config_dir.join("native_faucet.mac"); + let account_file = AccountFile::new(faucet_account, vec![]); + account_file.write(&faucet_file_path)?; + + // Create a genesis config TOML that references the faucet file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +native_faucet = "native_faucet.mac" + +[fee_parameters] +verification_base_fee = 0 +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parse the config + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // Convert to state and verify the native faucet is included + let (state, secrets) = gcfg.into_state(SecretKey::new())?; + assert!(state.accounts.iter().any(|a| a.id() == faucet_id)); + + // No secrets should be generated for file-loaded native faucet + assert!(secrets.secrets.is_empty()); + + Ok(()) +} + +#[test] +fn native_faucet_from_file_must_be_faucet_type() -> TestResult { + use miden_protocol::account::{AccountFile, AccountStorageMode, AccountType}; + use miden_standards::AuthScheme; + use miden_standards::account::wallets::create_basic_wallet; + use tempfile::tempdir; + + // Create a temporary directory for our test files + let temp_dir = tempdir()?; + let config_dir = temp_dir.path(); + + // Create a regular wallet account (not a faucet) and try to use it as native faucet + let init_seed: [u8; 32] = rand::random(); + let mut rng = rand_chacha::ChaCha20Rng::from_seed(rand::random()); + let secret_key = miden_protocol::crypto::dsa::falcon512_rpo::SecretKey::with_rng( + &mut miden_node_utils::crypto::get_rpo_random_coin(&mut rng), + ); + let auth = AuthScheme::Falcon512Rpo { pub_key: secret_key.public_key().into() }; + + let regular_account = create_basic_wallet( + init_seed, + auth, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + )?; + + // Save to file + let account_file_path = config_dir.join("not_a_faucet.mac"); + let account_file = AccountFile::new(regular_account, vec![]); + account_file.write(&account_file_path)?; + + // Create a genesis config TOML that tries to use a non-faucet as native faucet + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +native_faucet = "not_a_faucet.mac" + +[fee_parameters] +verification_base_fee = 0 +"#; + let config_path = write_toml_file(config_dir, toml_content); + + // Parsing should succeed + let gcfg = GenesisConfig::read_toml_file(&config_path)?; + + // into_state should fail with NativeFaucetNotFungible error when loading the file + let result = gcfg.into_state(SecretKey::new()); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + matches!(err, GenesisConfigError::NativeFaucetNotFungible { .. }), + "Expected NativeFaucetNotFungible error, got: {err:?}" + ); + + Ok(()) +} + +#[test] +fn missing_account_file_returns_error() { + // Create a genesis config TOML that references a non-existent file + let toml_content = r#" +timestamp = 1717344256 +version = 1 + +[fee_parameters] +verification_base_fee = 0 + +[[account]] +path = "does_not_exist.mac" +"#; + + // Use temp dir as config dir + let temp_dir = tempfile::tempdir().unwrap(); + let config_path = write_toml_file(temp_dir.path(), toml_content); + + // Parsing should succeed + let gcfg = GenesisConfig::read_toml_file(&config_path).unwrap(); + + // into_state should fail with AccountFileRead error when loading the file + let result = gcfg.into_state(SecretKey::new()); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + matches!(err, GenesisConfigError::AccountFileRead(..)), + "Expected AccountFileRead error, got: {err:?}" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn parsing_agglayer_sample_with_account_files() -> TestResult { + use miden_protocol::account::AccountType; + + // Use the actual sample file path since it references relative .mac files + let sample_path = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("src/genesis/config/samples/02-with-account-files.toml"); + + let gcfg = GenesisConfig::read_toml_file(&sample_path)?; + let (state, secrets) = gcfg.into_state(SecretKey::new())?; + + // Should have 4 accounts: + // 1. Native faucet (MIDEN) - built from parameters + // 2. Bridge account (bridge.mac) - loaded from file + // 3. ETH faucet (agglayer_faucet_eth.mac) - loaded from file + // 4. USDC faucet (agglayer_faucet_usdc.mac) - loaded from file + assert_eq!(state.accounts.len(), 4, "Expected 4 accounts in genesis state"); + + // Verify account types + let native_faucet = &state.accounts[0]; + let bridge_account = &state.accounts[1]; + let eth_faucet = &state.accounts[2]; + let usdc_faucet = &state.accounts[3]; + + // Native faucet should be a fungible faucet (built from parameters) + assert_eq!( + native_faucet.id().account_type(), + AccountType::FungibleFaucet, + "Native faucet should be a FungibleFaucet" + ); + + // Verify native faucet symbol + { + let faucet = BasicFungibleFaucet::try_from(native_faucet.clone()).unwrap(); + assert_eq!(faucet.symbol(), TokenSymbol::new("MIDEN").unwrap()); + } + + // Bridge account is a regular account (not a faucet) + assert!( + bridge_account.is_regular_account(), + "Bridge account should be a regular account" + ); + + // ETH faucet should be a fungible faucet (AggLayer faucet loaded from file) + assert_eq!( + eth_faucet.id().account_type(), + AccountType::FungibleFaucet, + "ETH faucet should be a FungibleFaucet" + ); + + // USDC faucet should be a fungible faucet (AggLayer faucet loaded from file) + assert_eq!( + usdc_faucet.id().account_type(), + AccountType::FungibleFaucet, + "USDC faucet should be a FungibleFaucet" + ); + + // Only the native faucet generates a secret (built from parameters) + assert_eq!(secrets.secrets.len(), 1, "Only native faucet should generate a secret"); + + // Verify the genesis state can be converted to a block + let _block = state.into_block()?; + + Ok(()) +} diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 0429864067..3c22684e73 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -290,7 +290,7 @@ impl InnerForest { /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::MAX)) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) } @@ -449,7 +449,7 @@ impl InnerForest { self.storage_map_roots .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..=(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) @@ -465,7 +465,7 @@ impl InnerForest { self.storage_entries .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map(|(_, entries)| entries.clone()) @@ -612,4 +612,6 @@ impl InnerForest { ); } } + + // TODO: tie in-memory forest retention to DB pruning policy once forest queries rely on it. } diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 1d345dcf01..519f8504b9 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -10,7 +10,11 @@ pub mod state; #[cfg(feature = "rocksdb")] pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; +pub use db::Db; +pub use db::models::conv::SqlTypeConvert; +pub use errors::DatabaseError; pub use genesis::GenesisState; +pub use server::block_prover_client::BlockProver; pub use server::{DataDirectory, Store}; // CONSTANTS diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 292842e778..56bfcafb49 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -6,13 +6,15 @@ use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::BlockNumber; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockInputs, BlockNumber}; use miden_protocol::note::Nullifier; use tonic::{Request, Response, Status}; use tracing::{info, instrument}; -use crate::COMPONENT; +use crate::errors::GetBlockInputsError; use crate::state::State; +use crate::{BlockProver, COMPONENT}; // STORE API // ================================================================================================ @@ -20,6 +22,7 @@ use crate::state::State; #[derive(Clone)] pub struct StoreApi { pub(super) state: Arc, + pub(super) block_prover: Arc, } impl StoreApi { @@ -43,6 +46,40 @@ impl StoreApi { mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), })) } + + /// Retrieves block inputs from state based on the contents of the supplied ordered batches. + pub(crate) async fn block_inputs_from_ordered_batches( + &self, + batches: &OrderedBatches, + ) -> Result { + // Construct fields required to retrieve block inputs. + let mut account_ids = BTreeSet::new(); + let mut nullifiers = Vec::new(); + let mut unauthenticated_note_commitments = BTreeSet::new(); + let mut reference_blocks = BTreeSet::new(); + + for batch in batches.as_slice() { + account_ids.extend(batch.updated_accounts()); + nullifiers.extend(batch.created_nullifiers()); + reference_blocks.insert(batch.reference_block_num()); + + for note in batch.input_notes().iter() { + if let Some(header) = note.header() { + unauthenticated_note_commitments.insert(header.commitment()); + } + } + } + + // Retrieve block inputs from the store. + self.state + .get_block_inputs( + account_ids.into_iter().collect(), + nullifiers, + unauthenticated_note_commitments, + reference_blocks, + ) + .await + } } // UTILITIES @@ -138,8 +175,13 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(nullifiers = nullifiers.len()), + err +)] pub fn validate_nullifiers(nullifiers: &[proto::primitives::Digest]) -> Result, E> where E: From + std::fmt::Display, @@ -152,8 +194,13 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(notes = notes.len()), + err +)] pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result, Status> { notes .iter() @@ -162,7 +209,12 @@ pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result< .map_err(|_| invalid_argument("Digest field is not in the modulus range")) } -#[instrument(level = "debug",target = COMPONENT, skip_all)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(block_numbers = block_numbers.len()) +)] pub fn read_block_numbers(block_numbers: &[u32]) -> BTreeSet { BTreeSet::from_iter(block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number))) } diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 9dd2b39c4d..25f6b05f60 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,12 +1,16 @@ use std::convert::Infallible; +use futures::TryFutureExt; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; -use miden_protocol::block::{BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockBody, BlockHeader, BlockNumber, SignedBlock}; use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; use tracing::Instrument; @@ -40,33 +44,69 @@ impl block_producer_server::BlockProducer for StoreApi { /// Updates the local DB by inserting a new block header and the related data. async fn apply_block( &self, - request: Request, + request: Request, ) -> Result, Status> { let request = request.into_inner(); - - let block = ProvenBlock::read_from_bytes(&request.block).map_err(|err| { - Status::invalid_argument(err.as_report_context("block deserialization error")) - })?; + // Read ordered batches. + let ordered_batches = + OrderedBatches::read_from_bytes(&request.ordered_batches).map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to deserialize ordered batches"), + ) + })?; + // Read block. + let block = request + .block + .ok_or(proto::store::ApplyBlockRequest::missing_field(stringify!(block)))?; + // Read block header. + let header: BlockHeader = block + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + // Read block body. + let body: BlockBody = block + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + // Read signature. + let signature: Signature = block + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + // Get block inputs from ordered batches. + let block_inputs = + self.block_inputs_from_ordered_batches(&ordered_batches).await.map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to get block inputs from ordered batches"), + ) + })?; let span = tracing::Span::current(); - span.set_attribute("block.number", block.header().block_num()); - span.set_attribute("block.commitment", block.header().commitment()); - span.set_attribute("block.accounts.count", block.body().updated_accounts().len()); - span.set_attribute("block.output_notes.count", block.body().output_notes().count()); - span.set_attribute("block.nullifiers.count", block.body().created_nullifiers().len()); - - // We perform the apply_block work in a separate task. This prevents the caller cancelling - // the request and thereby cancelling the task at an arbitrary point of execution. + span.set_attribute("block.number", header.block_num()); + span.set_attribute("block.commitment", header.commitment()); + span.set_attribute("block.accounts.count", body.updated_accounts().len()); + span.set_attribute("block.output_notes.count", body.output_notes().count()); + span.set_attribute("block.nullifiers.count", body.created_nullifiers().len()); + + // We perform the apply/prove block work in a separate task. This prevents the caller + // cancelling the request and thereby cancelling the task at an arbitrary point of + // execution. // // Normally this shouldn't be a problem, however our apply_block isn't quite ACID compliant // so things get a bit messy. This is more a temporary hack-around to minimize this risk. let this = self.clone(); - tokio::spawn( + // TODO(sergerad): Use block proof. + let _block_proof = tokio::spawn( async move { + // SAFETY: The header, body, and signature are assumed to + // correspond to each other because they are provided by the Block + // Producer. + let signed_block = SignedBlock::new_unchecked(header.clone(), body, signature); // TODO(sergerad): Use `SignedBlock::new()` when available. + // Note: This is an internal endpoint, so its safe to expose the full error + // report. this.state - .apply_block(block) - .await - .map(Response::new) + .apply_block(signed_block) .inspect_err(|err| { span.set_error(err); }) @@ -75,11 +115,15 @@ impl block_producer_server::BlockProducer for StoreApi { ApplyBlockError::InvalidBlockError(_) => tonic::Code::InvalidArgument, _ => tonic::Code::Internal, }; - - // This is an internal endpoint, so its safe to expose the full error - // report. Status::new(code, err.as_report()) }) + .and_then(|_| { + this.block_prover + .prove(ordered_batches, block_inputs, &header) + .map_err(|err| Status::new(tonic::Code::Internal, err.as_report())) + }) + .await + .map(Response::new) } .in_current_span(), ) @@ -87,7 +131,8 @@ impl block_producer_server::BlockProducer for StoreApi { .map_err(|err| { tonic::Status::internal(err.as_report_context("joining apply_block task failed")) }) - .flatten() + .flatten()?; + Ok(Response::new(())) } /// Returns data needed by the block producer to construct and prove the next block. diff --git a/crates/store/src/server/block_prover_client.rs b/crates/store/src/server/block_prover_client.rs new file mode 100644 index 0000000000..5af15ac433 --- /dev/null +++ b/crates/store/src/server/block_prover_client.rs @@ -0,0 +1,55 @@ +use miden_block_prover::{BlockProverError, LocalBlockProver}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof}; +use miden_remote_prover_client::{RemoteBlockProver, RemoteProverClientError}; +use tracing::instrument; + +use crate::COMPONENT; + +#[derive(Debug, thiserror::Error)] +pub enum StoreProverError { + #[error("local proving failed")] + LocalProvingFailed(#[source] BlockProverError), + #[error("remote proving failed")] + RemoteProvingFailed(#[source] RemoteProverClientError), +} + +// BLOCK PROVER +// ================================================================================================ + +/// Block prover which allows for proving via either local or remote backend. +/// +/// The local proving variant is intended for development and testing purposes. +/// The remote proving variant is intended for production use. +pub enum BlockProver { + Local(LocalBlockProver), + Remote(RemoteBlockProver), +} + +impl BlockProver { + pub fn local() -> Self { + Self::Local(LocalBlockProver::new(0)) + } + + pub fn remote(endpoint: impl Into) -> Self { + Self::Remote(RemoteBlockProver::new(endpoint)) + } + + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn prove( + &self, + tx_batches: OrderedBatches, + block_inputs: BlockInputs, + block_header: &BlockHeader, + ) -> Result { + match self { + Self::Local(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .map_err(StoreProverError::LocalProvingFailed)?), + Self::Remote(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .await + .map_err(StoreProverError::RemoteProvingFailed)?), + } + } +} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index b4b5798db9..3a284ceff4 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -18,15 +18,17 @@ use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; use tower_http::trace::TraceLayer; use tracing::{info, instrument}; +use url::Url; use crate::blocks::BlockStore; use crate::db::Db; use crate::errors::ApplyBlockError; use crate::state::State; -use crate::{COMPONENT, GenesisState}; +use crate::{BlockProver, COMPONENT, GenesisState}; mod api; mod block_producer; +pub mod block_prover_client; mod ntx_builder; mod rpc_api; @@ -35,6 +37,8 @@ pub struct Store { pub rpc_listener: TcpListener, pub ntx_builder_listener: TcpListener, pub block_producer_listener: TcpListener, + /// URL for the Block Prover client. Uses local prover if `None`. + pub block_prover_url: Option, pub data_directory: PathBuf, /// Server-side timeout for an individual gRPC request. /// @@ -100,14 +104,25 @@ impl Store { .context("failed to load state")?, ); - let rpc_service = - store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + // Initialize local or remote block prover. + let block_prover = if let Some(url) = self.block_prover_url { + Arc::new(BlockProver::remote(url)) + } else { + Arc::new(BlockProver::local()) + }; + + let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { + state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), + }); let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let block_producer_service = store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index a0fefa0e7a..6a61b4daf6 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -16,7 +16,12 @@ use tracing::debug; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError, GetWitnessesError}; +use crate::errors::{ + GetAccountError, + GetNetworkAccountIdsError, + GetNoteScriptByRootError, + GetWitnessesError, +}; use crate::server::api::{ StoreApi, internal_error, @@ -167,7 +172,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let proof = self.state.get_account(account_request).await?; diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 67ef1df78e..829d543f32 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,4 +1,6 @@ use miden_node_proto::convert; +use miden_node_proto::domain::block::InvalidBlockRange; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; use miden_node_utils::limiter::{ @@ -10,6 +12,7 @@ use miden_node_utils::limiter::{ }; use miden_protocol::Word; use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; use miden_protocol::note::NoteId; use tonic::{Request, Response, Status}; use tracing::{debug, info}; @@ -17,12 +20,14 @@ use tracing::{debug, info}; use crate::COMPONENT; use crate::errors::{ CheckNullifiersError, + GetAccountError, GetBlockByNumberError, GetNoteScriptByRootError, GetNotesByIdError, NoteSyncError, SyncAccountStorageMapsError, SyncAccountVaultError, + SyncChainMmrError, SyncNullifiersError, SyncTransactionsError, }; @@ -117,54 +122,6 @@ impl rpc_server::Rpc for StoreApi { })) } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects the client is interested in. - async fn sync_state( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - - let account_ids: Vec = read_account_ids::(&request.account_ids)?; - - let (state, delta) = self - .state - .sync_state(request.block_num.into(), account_ids, request.note_tags) - .await - .map_err(internal_error)?; - - let accounts = state - .account_updates - .into_iter() - .map(|account_info| proto::account::AccountSummary { - account_id: Some(account_info.account_id.into()), - account_commitment: Some(account_info.account_commitment.into()), - block_num: account_info.block_num.as_u32(), - }) - .collect(); - - let transactions = state - .transactions - .into_iter() - .map(|transaction_summary| proto::transaction::TransactionSummary { - account_id: Some(transaction_summary.account_id.into()), - block_num: transaction_summary.block_num.as_u32(), - transaction_id: Some(transaction_summary.transaction_id.into()), - }) - .collect(); - - let notes = state.notes.into_iter().map(Into::into).collect(); - - Ok(Response::new(proto::rpc::SyncStateResponse { - chain_tip: self.state.latest_block_num().await.as_u32(), - block_header: Some(state.block_header.into()), - mmr_delta: Some(delta.into()), - accounts, - transactions, - notes, - })) - } - /// Returns info which can be used by the client to sync note state. async fn sync_notes( &self, @@ -196,6 +153,45 @@ impl rpc_server::Rpc for StoreApi { })) } + /// Returns chain MMR updates within a block range. + async fn sync_chain_mmr( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let chain_tip = self.state.latest_block_num().await; + + let block_range = request + .block_range + .ok_or_else(|| proto::rpc::SyncChainMmrRequest::missing_field(stringify!(block_range))) + .map_err(SyncChainMmrError::DeserializationFailed)?; + + let block_from = BlockNumber::from(block_range.block_from); + if block_from > chain_tip { + Err(SyncChainMmrError::FutureBlock { chain_tip, block_from })?; + } + + let block_to = block_range.block_to.map_or(chain_tip, BlockNumber::from).min(chain_tip); + + if block_from > block_to { + Err(SyncChainMmrError::InvalidBlockRange(InvalidBlockRange::StartGreaterThanEnd { + start: block_from, + end: block_to, + }))?; + } + let block_range = block_from..=block_to; + let mmr_delta = + self.state.sync_chain_mmr(block_range.clone()).await.map_err(internal_error)?; + + Ok(Response::new(proto::rpc::SyncChainMmrResponse { + block_range: Some(proto::rpc::BlockRange { + block_from: block_range.start().as_u32(), + block_to: Some(block_range.end().as_u32()), + }), + mmr_delta: Some(mmr_delta.into()), + })) + } + /// Returns a list of [`Note`]s for the specified [`NoteId`]s. /// /// If the list is empty or no [`Note`] matched the requested [`NoteId`] and empty list is @@ -250,7 +246,7 @@ impl rpc_server::Rpc for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let account_data = self.state.get_account(account_request).await?; @@ -327,7 +323,7 @@ impl rpc_server::Rpc for StoreApi { let storage_maps_page = self .state - .get_storage_map_sync_values(account_id, block_range) + .sync_account_storage_maps(account_id, block_range) .await .map_err(SyncAccountStorageMapsError::from)?; diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs new file mode 100644 index 0000000000..145432c97d --- /dev/null +++ b/crates/store/src/state/apply_block.rs @@ -0,0 +1,293 @@ +use std::sync::Arc; + +use miden_node_utils::ErrorReport; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::SignedBlock; +use miden_protocol::note::NoteDetails; +use miden_protocol::transaction::OutputNote; +use miden_protocol::utils::Serializable; +use tokio::sync::oneshot; +use tracing::{Instrument, info, info_span, instrument}; + +use crate::db::NoteRecord; +use crate::errors::{ApplyBlockError, InvalidBlockError}; +use crate::state::State; +use crate::{COMPONENT, HistoricalError}; + +impl State { + /// Apply changes of a new block to the DB and in-memory data structures. + /// + /// ## Note on state consistency + /// + /// The server contains in-memory representations of the existing trees, the in-memory + /// representation must be kept consistent with the committed data, this is necessary so to + /// provide consistent results for all endpoints. In order to achieve consistency, the + /// following steps are used: + /// + /// - the request data is validated, prior to starting any modifications. + /// - block is being saved into the store in parallel with updating the DB, but before + /// committing. This block is considered as candidate and not yet available for reading + /// because the latest block pointer is not updated yet. + /// - a transaction is open in the DB and the writes are started. + /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the + /// in-memory representations, which are consistent at this stage. + /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is + /// acquired, preventing concurrent reads to the in-memory data, since that will be + /// out-of-sync w.r.t. the DB. + /// - the DB transaction is committed, and requests that read only from the DB can proceed to + /// use the fresh data. + /// - the in-memory structures are updated, including the latest block pointer and the lock is + /// released. + // TODO: This span is logged in a root span, we should connect it to the parent span. + #[expect(clippy::too_many_lines)] + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn apply_block(&self, signed_block: SignedBlock) -> Result<(), ApplyBlockError> { + let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; + + let header = signed_block.header(); + let body = signed_block.body(); + + // Validate that header and body match. + let tx_commitment = body.transactions().commitment(); + if header.tx_commitment() != tx_commitment { + return Err(InvalidBlockError::InvalidBlockTxCommitment { + expected: tx_commitment, + actual: header.tx_commitment(), + } + .into()); + } + + let block_num = header.block_num(); + let block_commitment = header.commitment(); + + // Validate that the applied block is the next block in sequence. + let prev_block = self + .db + .select_block_header_by_block_num(None) + .await? + .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; + let expected_block_num = prev_block.block_num().child(); + if block_num != expected_block_num { + return Err(InvalidBlockError::NewBlockInvalidBlockNum { + expected: expected_block_num, + submitted: block_num, + } + .into()); + } + if header.prev_block_commitment() != prev_block.commitment() { + return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); + } + + // Save the block to the block store. In a case of a rolled-back DB transaction, the + // in-memory state will be unchanged, but the block might still be written into the + // block store. Thus, such block should be considered as block candidates, but not + // finalized blocks. So we should check for the latest block when getting block from + // the store. + let signed_block_bytes = signed_block.to_bytes(); + let store = Arc::clone(&self.block_store); + let block_save_task = tokio::spawn( + async move { store.save_block(block_num, &signed_block_bytes).await }.in_current_span(), + ); + + // Scope to read in-memory data, compute mutations required for updating account + // and nullifier trees, and validate the request. + let ( + nullifier_tree_old_root, + nullifier_tree_update, + account_tree_old_root, + account_tree_update, + ) = { + let inner = self.inner.read().await; + + let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); + + // nullifiers can be produced only once + let duplicate_nullifiers: Vec<_> = body + .created_nullifiers() + .iter() + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) + .copied() + .collect(); + if !duplicate_nullifiers.is_empty() { + return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); + } + + // compute updates for the in-memory data structures + + // new_block.chain_root must be equal to the chain MMR root prior to the update + let peaks = inner.blockchain.peaks(); + if peaks.hash_peaks() != header.chain_commitment() { + return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); + } + + // compute update for nullifier tree + let nullifier_tree_update = inner + .nullifier_tree + .compute_mutations( + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + ) + .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; + + if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); + } + + // compute update for account tree + let account_tree_update = inner + .account_tree + .compute_mutations( + body.updated_accounts() + .iter() + .map(|update| (update.account_id(), update.final_state_commitment())), + ) + .map_err(|e| match e { + HistoricalError::AccountTreeError(err) => { + InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) + }, + HistoricalError::MerkleError(_) => { + panic!("Unexpected MerkleError during account tree mutation computation") + }, + })?; + + if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); + } + + ( + inner.nullifier_tree.root(), + nullifier_tree_update, + inner.account_tree.root_latest(), + account_tree_update, + ) + }; + + // Build note tree. + let note_tree = body.compute_block_note_tree(); + if note_tree.root() != header.note_root() { + return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); + } + + let notes = body + .output_notes() + .map(|(note_index, note)| { + let (details, nullifier) = match note { + OutputNote::Full(note) => { + (Some(NoteDetails::from(note)), Some(note.nullifier())) + }, + OutputNote::Header(_) => (None, None), + note @ OutputNote::Partial(_) => { + return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( + note.clone(), + ))); + }, + }; + + let inclusion_path = note_tree.open(note_index); + + let note_record = NoteRecord { + block_num, + note_index, + note_id: note.id().as_word(), + note_commitment: note.commitment(), + metadata: note.metadata().clone(), + details, + inclusion_path, + }; + + Ok((note_record, nullifier)) + }) + .collect::, InvalidBlockError>>()?; + + // Signals the transaction is ready to be committed, and the write lock can be acquired. + let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); + // Signals the write lock has been acquired, and the transaction can be committed. + let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); + + // Extract public account updates with deltas before block is moved into async task. + // Private accounts are filtered out since they don't expose their state changes. + let account_deltas = + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { + AccountUpdateDetails::Delta(delta) => Some(delta.clone()), + AccountUpdateDetails::Private => None, + }, + )); + + // The DB and in-memory state updates need to be synchronized and are partially + // overlapping. Namely, the DB transaction only proceeds after this task acquires the + // in-memory write lock. This requires the DB update to run concurrently, so a new task is + // spawned. + let db = Arc::clone(&self.db); + let db_update_task = tokio::spawn( + async move { db.apply_block(allow_acquire, acquire_done, signed_block, notes).await } + .in_current_span(), + ); + + // Wait for the message from the DB update task, that we ready to commit the DB transaction. + acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; + + // Awaiting the block saving task to complete without errors. + block_save_task.await??; + + // Scope to update the in-memory data. + async move { + // We need to hold the write lock here to prevent inconsistency between the in-memory + // state and the DB state. Thus, we need to wait for the DB update task to complete + // successfully. + let mut inner = self.inner.write().await; + + // We need to check that neither the nullifier tree nor the account tree have changed + // while we were waiting for the DB preparation task to complete. If either of them + // did change, we do not proceed with in-memory and database updates, since it may + // lead to an inconsistent state. + if inner.nullifier_tree.root() != nullifier_tree_old_root + || inner.account_tree.root_latest() != account_tree_old_root + { + return Err(ApplyBlockError::ConcurrentWrite); + } + + // Notify the DB update task that the write lock has been acquired, so it can commit + // the DB transaction. + inform_acquire_done + .send(()) + .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; + + // TODO: shutdown #91 + // Await for successful commit of the DB transaction. If the commit fails, we mustn't + // change in-memory state, so we return a block applying error and don't proceed with + // in-memory updates. + db_update_task + .await? + .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; + + // Update the in-memory data structures after successful commit of the DB transaction + inner + .nullifier_tree + .apply_mutations(nullifier_tree_update) + .expect("Unreachable: old nullifier tree root must be checked before this step"); + inner + .account_tree + .apply_mutations(account_tree_update) + .expect("Unreachable: old account tree root must be checked before this step"); + inner.blockchain.push(block_commitment); + + Ok(()) + } + .in_current_span() + .await?; + + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + + info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); + + Ok(()) + } +} diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea06313..c8c8861484 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -9,15 +9,17 @@ //! data exists, otherwise rebuilt from the database and persisted. use std::future::Future; +use std::num::NonZeroUsize; use std::path::Path; -use miden_protocol::Word; +use miden_crypto::merkle::mmr::Mmr; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; -use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; +use miden_protocol::block::{BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] use miden_protocol::crypto::merkle::smt::MemoryStorage; use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; +use miden_protocol::{Felt, FieldElement, Word}; #[cfg(feature = "rocksdb")] use tracing::info; use tracing::instrument; @@ -29,6 +31,7 @@ use { use crate::COMPONENT; use crate::db::Db; +use crate::db::models::queries::BlockHeaderCommitment; use crate::errors::{DatabaseError, StateInitializationError}; use crate::inner_forest::InnerForest; @@ -41,6 +44,18 @@ pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; /// Directory name for the nullifier tree storage within the data directory. pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; +/// Page size for loading account commitments from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of accounts. +const ACCOUNT_COMMITMENTS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading nullifiers from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of nullifiers. +const NULLIFIERS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading public account IDs from the database during forest rebuilding. +/// This limits memory usage when rebuilding with millions of public accounts. +const PUBLIC_ACCOUNT_IDS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(1_000).unwrap(); + // STORAGE TYPE ALIAS // ================================================================================================ @@ -66,6 +81,14 @@ pub fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInit } } +/// Converts a block number to the leaf value format used in the nullifier tree. +/// +/// This matches the format used by `NullifierBlock::from(BlockNumber)::into()`, +/// which is `[Felt::from(block_num), 0, 0, 0]`. +fn block_num_to_nullifier_leaf(block_num: BlockNumber) -> Word { + Word::from([Felt::from(block_num), Felt::ZERO, Felt::ZERO, Felt::ZERO]) +} + // STORAGE LOADER TRAIT // ================================================================================================ @@ -103,27 +126,82 @@ impl StorageLoader for MemoryStorage { Ok(MemoryStorage::default()) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + // TODO: Make the loading methodology for account and nullifier trees consistent. + // Currently we use `NullifierTree::new_unchecked()` for nullifiers but `AccountTree::new()` + // for accounts. Consider using `NullifierTree::with_storage_from_entries()` for consistency. + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -141,6 +219,7 @@ impl StorageLoader for RocksDbStorage { .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, @@ -156,15 +235,42 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, @@ -179,10 +285,36 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -201,45 +333,57 @@ pub fn load_smt(storage: S) -> Result, StateInitializ /// Loads the blockchain MMR from all block headers in the database. #[instrument(target = COMPONENT, skip_all)] pub async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); + let block_commitments = db.select_all_block_header_commitments().await?; // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX // entries. - let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + let chain_mmr = Blockchain::from_mmr_unchecked(Mmr::from( + block_commitments.iter().copied().map(BlockHeaderCommitment::word), + )); Ok(chain_mmr) } /// Loads SMT forest with storage map and vault Merkle paths for all public accounts. -#[instrument(target = COMPONENT, skip_all, fields(block_num = %block_num))] +#[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] pub async fn load_smt_forest( db: &mut Db, block_num: BlockNumber, ) -> Result { use miden_protocol::account::delta::AccountDelta; - let public_account_ids = db.select_all_public_account_ids().await?; - - // Acquire write lock once for the entire initialization let mut forest = InnerForest::new(); + let mut cursor = None; + + loop { + let page = db.select_public_account_ids_paged(PUBLIC_ACCOUNT_IDS_PAGE_SIZE, cursor).await?; - // Process each account - for account_id in public_account_ids { - // Get the full account from the database - let account_info = db.select_account(account_id).await?; - let account = account_info.details.expect("public accounts always have details in DB"); + if page.account_ids.is_empty() { + break; + } - // Convert the full account to a full-state delta - let delta = - AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + // Process each account in this page + for account_id in page.account_ids { + // TODO: Loading the full account from the database is inefficient and will need to + // go away. + let account_info = db.select_account(account_id).await?; + let account = account_info + .details + .ok_or(StateInitializationError::PublicAccountMissingDetails(account_id))?; + + // Convert the full account to a full-state delta + let delta = AccountDelta::try_from(account).map_err(|e| { + StateInitializationError::AccountToDeltaConversionFailed(e.to_string()) + })?; + + // Use the unified update method (will recognize it's a full-state delta) + forest.update_account(block_num, &delta)?; + } - // Use the unified update method (will recognize it's a full-state delta) - forest.update_account(block_num, &delta)?; + cursor = page.next_cursor; + if cursor.is_none() { + break; + } } Ok(forest) diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index b584f37b4a..40f6f29e60 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -21,59 +21,51 @@ use miden_node_proto::domain::account::{ StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_protocol::Word; -use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; -use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain}; +use miden_protocol::crypto::merkle::mmr::{MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; -use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; -use miden_protocol::transaction::{OutputNote, PartialBlockchain}; -use miden_protocol::utils::Serializable; -use tokio::sync::{Mutex, RwLock, oneshot}; -use tracing::{Instrument, info, info_span, instrument}; +use miden_protocol::note::{NoteId, NoteScript, Nullifier}; +use miden_protocol::transaction::PartialBlockchain; +use tokio::sync::{Mutex, RwLock}; +use tracing::{info, instrument}; -use crate::accounts::{AccountTreeWithHistory, HistoricalError}; +use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; -use crate::db::models::queries::StorageMapValuesPage; -use crate::db::{ - AccountVaultValue, - Db, - NoteRecord, - NoteSyncUpdate, - NullifierInfo, - StateSyncUpdate, -}; +use crate::db::{Db, NoteRecord, NullifierInfo}; use crate::errors::{ ApplyBlockError, DatabaseError, + GetAccountError, GetBatchInputsError, GetBlockHeaderError, GetBlockInputsError, GetCurrentBlockchainDataError, - InvalidBlockError, - NoteSyncError, StateInitializationError, - StateSyncError, }; use crate::inner_forest::{InnerForest, WitnessError}; use crate::{COMPONENT, DataDirectory}; mod loader; -pub use loader::{ +use loader::{ ACCOUNT_TREE_STORAGE_DIR, NULLIFIER_TREE_STORAGE_DIR, StorageLoader, TreeStorage, + load_mmr, + load_smt_forest, + verify_tree_consistency, }; -use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; + +mod apply_block; +mod sync_state; // STRUCTURES // ================================================================================================ @@ -190,294 +182,6 @@ impl State { }) } - // STATE MUTATOR - // -------------------------------------------------------------------------------------------- - - /// Apply changes of a new block to the DB and in-memory data structures. - /// - /// ## Note on state consistency - /// - /// The server contains in-memory representations of the existing trees, the in-memory - /// representation must be kept consistent with the committed data, this is necessary so to - /// provide consistent results for all endpoints. In order to achieve consistency, the - /// following steps are used: - /// - /// - the request data is validated, prior to starting any modifications. - /// - block is being saved into the store in parallel with updating the DB, but before - /// committing. This block is considered as candidate and not yet available for reading - /// because the latest block pointer is not updated yet. - /// - a transaction is open in the DB and the writes are started. - /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the - /// in-memory representations, which are consistent at this stage. - /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is - /// acquired, preventing concurrent reads to the in-memory data, since that will be - /// out-of-sync w.r.t. the DB. - /// - the DB transaction is committed, and requests that read only from the DB can proceed to - /// use the fresh data. - /// - the in-memory structures are updated, including the latest block pointer and the lock is - /// released. - // TODO: This span is logged in a root span, we should connect it to the parent span. - #[allow(clippy::too_many_lines)] - #[instrument(target = COMPONENT, skip_all, err)] - pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { - let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; - - let header = block.header(); - - let tx_commitment = block.body().transactions().commitment(); - - if header.tx_commitment() != tx_commitment { - return Err(InvalidBlockError::InvalidBlockTxCommitment { - expected: tx_commitment, - actual: header.tx_commitment(), - } - .into()); - } - - let block_num = header.block_num(); - let block_commitment = header.commitment(); - - // ensures the right block header is being processed - let prev_block = self - .db - .select_block_header_by_block_num(None) - .await? - .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; - - let expected_block_num = prev_block.block_num().child(); - if block_num != expected_block_num { - return Err(InvalidBlockError::NewBlockInvalidBlockNum { - expected: expected_block_num, - submitted: block_num, - } - .into()); - } - if header.prev_block_commitment() != prev_block.commitment() { - return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); - } - - let block_data = block.to_bytes(); - - // Save the block to the block store. In a case of a rolled-back DB transaction, the - // in-memory state will be unchanged, but the block might still be written into the - // block store. Thus, such block should be considered as block candidates, but not - // finalized blocks. So we should check for the latest block when getting block from - // the store. - let store = Arc::clone(&self.block_store); - let block_save_task = tokio::spawn( - async move { store.save_block(block_num, &block_data).await }.in_current_span(), - ); - - // scope to read in-memory data, compute mutations required for updating account - // and nullifier trees, and validate the request - let ( - nullifier_tree_old_root, - nullifier_tree_update, - account_tree_old_root, - account_tree_update, - ) = { - let inner = self.inner.read().await; - - let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); - - // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = block - .body() - .created_nullifiers() - .iter() - .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) - .copied() - .collect(); - if !duplicate_nullifiers.is_empty() { - return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); - } - - // compute updates for the in-memory data structures - - // new_block.chain_root must be equal to the chain MMR root prior to the update - let peaks = inner.blockchain.peaks(); - if peaks.hash_peaks() != header.chain_commitment() { - return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); - } - - // compute update for nullifier tree - let nullifier_tree_update = inner - .nullifier_tree - .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), - ) - .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; - - if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { - // We do our best here to notify the serve routine, if it doesn't care (dropped the - // receiver) we can't do much. - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidNullifierRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); - } - - // compute update for account tree - let account_tree_update = inner - .account_tree - .compute_mutations( - block - .body() - .updated_accounts() - .iter() - .map(|update| (update.account_id(), update.final_state_commitment())), - ) - .map_err(|e| match e { - HistoricalError::AccountTreeError(err) => { - InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) - }, - HistoricalError::MerkleError(_) => { - panic!("Unexpected MerkleError during account tree mutation computation") - }, - })?; - - if account_tree_update.as_mutation_set().root() != header.account_root() { - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidAccountRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); - } - - ( - inner.nullifier_tree.root(), - nullifier_tree_update, - inner.account_tree.root_latest(), - account_tree_update, - ) - }; - - // build note tree - let note_tree = block.body().compute_block_note_tree(); - if note_tree.root() != header.note_root() { - return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); - } - - let notes = block - .body() - .output_notes() - .map(|(note_index, note)| { - let (details, nullifier) = match note { - OutputNote::Full(note) => { - (Some(NoteDetails::from(note)), Some(note.nullifier())) - }, - OutputNote::Header(_) => (None, None), - note @ OutputNote::Partial(_) => { - return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( - note.clone(), - ))); - }, - }; - - let inclusion_path = note_tree.open(note_index); - - let note_record = NoteRecord { - block_num, - note_index, - note_id: note.id().as_word(), - note_commitment: note.commitment(), - metadata: note.metadata().clone(), - details, - inclusion_path, - }; - - Ok((note_record, nullifier)) - }) - .collect::, InvalidBlockError>>()?; - - // Signals the transaction is ready to be committed, and the write lock can be acquired - let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); - // Signals the write lock has been acquired, and the transaction can be committed - let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); - - // Extract public account updates with deltas before block is moved into async task. - // Private accounts are filtered out since they don't expose their state changes. - let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { - AccountUpdateDetails::Delta(delta) => Some(delta.clone()), - AccountUpdateDetails::Private => None, - } - })); - - // The DB and in-memory state updates need to be synchronized and are partially - // overlapping. Namely, the DB transaction only proceeds after this task acquires the - // in-memory write lock. This requires the DB update to run concurrently, so a new task is - // spawned. - let db = Arc::clone(&self.db); - let db_update_task = tokio::spawn( - async move { db.apply_block(allow_acquire, acquire_done, block, notes).await } - .in_current_span(), - ); - - // Wait for the message from the DB update task, that we ready to commit the DB transaction - acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; - - // Awaiting the block saving task to complete without errors - block_save_task.await??; - - // Scope to update the in-memory data - async move { - // We need to hold the write lock here to prevent inconsistency between the in-memory - // state and the DB state. Thus, we need to wait for the DB update task to complete - // successfully. - let mut inner = self.inner.write().await; - - // We need to check that neither the nullifier tree nor the account tree have changed - // while we were waiting for the DB preparation task to complete. If either of them - // did change, we do not proceed with in-memory and database updates, since it may - // lead to an inconsistent state. - if inner.nullifier_tree.root() != nullifier_tree_old_root - || inner.account_tree.root_latest() != account_tree_old_root - { - return Err(ApplyBlockError::ConcurrentWrite); - } - - // Notify the DB update task that the write lock has been acquired, so it can commit - // the DB transaction - inform_acquire_done - .send(()) - .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; - - // TODO: shutdown #91 - // Await for successful commit of the DB transaction. If the commit fails, we mustn't - // change in-memory state, so we return a block applying error and don't proceed with - // in-memory updates. - db_update_task - .await? - .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; - - // Update the in-memory data structures after successful commit of the DB transaction - inner - .nullifier_tree - .apply_mutations(nullifier_tree_update) - .expect("Unreachable: old nullifier tree root must be checked before this step"); - inner - .account_tree - .apply_mutations(account_tree_update) - .expect("Unreachable: old account tree root must be checked before this step"); - inner.blockchain.push(block_commitment); - - Ok(()) - } - .in_current_span() - .await?; - - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; - - info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); - - Ok(()) - } - // STATE ACCESSORS // -------------------------------------------------------------------------------------------- @@ -506,17 +210,6 @@ impl State { } } - pub async fn sync_nullifiers( - &self, - prefix_len: u32, - nullifier_prefixes: Vec, - block_range: RangeInclusive, - ) -> Result<(Vec, BlockNumber), DatabaseError> { - self.db - .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) - .await - } - /// Generates membership proofs for each one of the `nullifiers` against the latest nullifier /// tree. /// @@ -689,85 +382,6 @@ impl State { }) } - /// Loads data to synchronize a client. - /// - /// The client's request contains a list of note tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filtered based on this - /// block range. - /// - /// # Arguments - /// - /// - `block_num`: The last block *known* by the client, updates start from the next block. - /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's - /// block range. - /// - `note_tags`: The tags the client is interested in, result is restricted to the first block - /// with any matches tags. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_state( - &self, - block_num: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { - let inner = self.inner.read().await; - - let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; - - let delta = if block_num == state_sync.block_header.block_num() { - // The client is in sync with the chain tip. - MmrDelta { - forest: Forest::new(block_num.as_usize()), - data: vec![], - } - } else { - // Important notes about the boundary conditions: - // - // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root - // contained in the block header always lag behind by one block, this is because the Mmr - // leaves are hashes of block headers, and we can't have self-referential hashes. These - // two points cancel out and don't require adjusting. - // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to - // be - // exclusive, so the from_forest has to be adjusted with a +1 - let from_forest = (block_num + 1).as_usize(); - let to_forest = state_sync.block_header.block_num().as_usize(); - inner - .blockchain - .as_mmr() - .get_delta(Forest::new(from_forest), Forest::new(to_forest)) - .map_err(StateSyncError::FailedToBuildMmrDelta)? - }; - - Ok((state_sync, delta)) - } - - /// Loads data to synchronize a client's notes. - /// - /// The client's request contains a list of tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filter based on this - /// block range. - /// - /// # Arguments - /// - /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the - /// first block containing a matching note. - /// - `block_range`: The range of blocks from which to synchronize notes. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_notes( - &self, - note_tags: Vec, - block_range: RangeInclusive, - ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { - let inner = self.inner.read().await; - - let (note_sync, last_included_block) = - self.db.get_note_sync(block_range, note_tags).await?; - - let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; - - Ok((note_sync, mmr_proof, last_included_block)) - } - /// Returns data needed by the block producer to construct and prove the next block. pub async fn get_block_inputs( &self, @@ -996,11 +610,11 @@ impl State { pub async fn get_account( &self, account_request: AccountRequest, - ) -> Result { + ) -> Result { let AccountRequest { block_num, account_id, details } = account_request; if details.is_some() && !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; @@ -1022,19 +636,20 @@ impl State { &self, block_num: Option, account_id: AccountId, - ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { + ) -> Result<(BlockNumber, AccountWitness), GetAccountError> { let inner_state = self.inner.read().await; // Determine which block to query let (block_num, witness) = if let Some(requested_block) = block_num { // Historical query: use the account tree with history - let witness = inner_state - .account_tree - .open_at(account_id, requested_block) - .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { - block_num: requested_block, - reason: "Block is either in the future or has been pruned from history" - .to_string(), + let witness = + inner_state.account_tree.open_at(account_id, requested_block).ok_or_else(|| { + let latest_block = inner_state.account_tree.block_number_latest(); + if requested_block > latest_block { + GetAccountError::UnknownBlock(requested_block) + } else { + GetAccountError::BlockPruned(requested_block) + } })?; (requested_block, witness) } else { @@ -1061,7 +676,7 @@ impl State { account_id: AccountId, block_num: BlockNumber, detail_request: AccountDetailRequest, - ) -> Result { + ) -> Result { let AccountDetailRequest { code_commitment, asset_vault_commitment, @@ -1069,18 +684,25 @@ impl State { } = detail_request; if !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } // Validate block exists in the blockchain before querying the database - self.validate_block_exists(block_num).await?; + { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(GetAccountError::UnknownBlock(block_num)); + } + } // Query account header and storage header together in a single DB call let (account_header, storage_header) = self .db .select_account_header_with_storage_header_at_block(account_id, block_num) .await? - .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; + .ok_or(GetAccountError::AccountNotFound(account_id, block_num))?; let account_code = match code_commitment { Some(commitment) if commitment == account_header.code_commitment() => None, @@ -1143,15 +765,6 @@ impl State { }) } - /// Returns storage map values for syncing within a block range. - pub(crate) async fn get_storage_map_sync_values( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await - } - /// Loads a block from the block store. Return `Ok(None)` if the block is not found. pub async fn load_block( &self, @@ -1168,39 +781,11 @@ impl State { self.inner.read().await.latest_block_num() } - /// Validates that a block exists in the blockchain - /// - /// # Attention - /// - /// Acquires a *read lock** on `self.inner`. - /// - /// # Errors - /// - /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. - async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { - let inner = self.inner.read().await; - let latest_block_num = inner.latest_block_num(); - - if block_num > latest_block_num { - return Err(DatabaseError::BlockNotFound(block_num)); - } - - Ok(()) - } - /// Emits metrics for each database table's size. pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { self.db.analyze_table_sizes().await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.get_account_vault_sync(account_id, block_range).await - } /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -1220,16 +805,6 @@ impl State { self.db.select_note_script_by_root(root).await } - /// Returns the complete transaction records for the specified accounts within the specified - /// block range, including state commitments and note IDs. - pub async fn sync_transactions( - &self, - account_ids: Vec, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.select_transactions_records(account_ids, block_range).await - } - /// Returns vault asset witnesses for the specified account and block number. pub async fn get_vault_asset_witnesses( &self, diff --git a/crates/store/src/state/sync_state.rs b/crates/store/src/state/sync_state.rs new file mode 100644 index 0000000000..6568f31e61 --- /dev/null +++ b/crates/store/src/state/sync_state.rs @@ -0,0 +1,123 @@ +use std::ops::RangeInclusive; + +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrProof}; +use tracing::instrument; + +use super::State; +use crate::COMPONENT; +use crate::db::models::queries::StorageMapValuesPage; +use crate::db::{AccountVaultValue, NoteSyncUpdate, NullifierInfo}; +use crate::errors::{DatabaseError, NoteSyncError, StateSyncError}; + +// STATE SYNCHRONIZATION ENDPOINTS +// ================================================================================================ + +impl State { + /// Returns the complete transaction records for the specified accounts within the specified + /// block range, including state commitments and note IDs. + pub async fn sync_transactions( + &self, + account_ids: Vec, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.select_transactions_records(account_ids, block_range).await + } + + /// Returns the chain MMR delta for the specified block range. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_chain_mmr( + &self, + block_range: RangeInclusive, + ) -> Result { + let inner = self.inner.read().await; + + let block_from = *block_range.start(); + let block_to = *block_range.end(); + + if block_from == block_to { + return Ok(MmrDelta { + forest: Forest::new(block_from.as_usize()), + data: vec![], + }); + } + + // Important notes about the boundary conditions: + // + // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root + // contained in the block header always lag behind by one block, this is because the Mmr + // leaves are hashes of block headers, and we can't have self-referential hashes. These + // two points cancel out and don't require adjusting. + // - Mmr::get_delta is inclusive, whereas the sync request block_from is defined to be the + // last block already present in the caller's MMR. The delta should therefore start at the + // next block, so the from_forest has to be adjusted with a +1. + let from_forest = (block_from + 1).as_usize(); + let to_forest = block_to.as_usize(); + + inner + .blockchain + .as_mmr() + .get_delta(Forest::new(from_forest), Forest::new(to_forest)) + .map_err(StateSyncError::FailedToBuildMmrDelta) + } + + /// Loads data to synchronize a client's notes. + /// + /// The client's request contains a list of tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filter based on this + /// block range. + /// + /// # Arguments + /// + /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the + /// first block containing a matching note. + /// - `block_range`: The range of blocks from which to synchronize notes. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_notes( + &self, + note_tags: Vec, + block_range: RangeInclusive, + ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { + let inner = self.inner.read().await; + + let (note_sync, last_included_block) = + self.db.get_note_sync(block_range, note_tags).await?; + + let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; + + Ok((note_sync, mmr_proof, last_included_block)) + } + + pub async fn sync_nullifiers( + &self, + prefix_len: u32, + nullifier_prefixes: Vec, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber), DatabaseError> { + self.db + .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) + .await + } + + // ACCOUNT STATE SYNCHRONIZATION + // -------------------------------------------------------------------------------------------- + + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.get_account_vault_sync(account_id, block_range).await + } + + /// Returns storage map values for syncing within a block range. + pub async fn sync_account_storage_maps( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result { + self.db.select_storage_map_sync_values(account_id, block_range).await + } +} diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index e61930937e..f2817c6049 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -21,7 +21,6 @@ testing = ["miden-protocol/testing"] [dependencies] anyhow = { workspace = true } bytes = { version = "1.10" } -figment = { features = ["env", "toml"], version = "0.10" } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } @@ -31,16 +30,18 @@ opentelemetry = { version = "0.31" } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } -serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } tokio = { workspace = true } tonic = { default-features = true, workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } -tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } +tracing-forest = { features = ["chrono"], optional = true, version = "0.3" } tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/build.rs b/crates/utils/build.rs new file mode 100644 index 0000000000..ed4038d06e --- /dev/null +++ b/crates/utils/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/crates/utils/src/config.rs b/crates/utils/src/config.rs deleted file mode 100644 index e0fc1a0a6b..0000000000 --- a/crates/utils/src/config.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::path::Path; - -use figment::Figment; -use figment::providers::{Format, Toml}; -use serde::Deserialize; - -pub const DEFAULT_NODE_RPC_PORT: u16 = 57291; -pub const DEFAULT_BLOCK_PRODUCER_PORT: u16 = 48046; -pub const DEFAULT_STORE_PORT: u16 = 28943; -pub const DEFAULT_FAUCET_SERVER_PORT: u16 = 8080; - -/// Loads the user configuration. -/// -/// This function will look for the configuration file at the provided path. If the path is -/// relative, searches in parent directories all the way to the root as well. -/// -/// The above configuration options are indented to support easy of packaging and deployment. -#[allow(clippy::result_large_err, reason = "This error crashes the node")] -pub fn load_config Deserialize<'a>>( - config_file: impl AsRef, -) -> figment::Result { - Figment::from(Toml::file(config_file.as_ref())).extract() -} diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 530e971e49..abf7852631 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -1,4 +1,3 @@ -pub mod config; pub mod cors; pub mod crypto; #[cfg(feature = "testing")] diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 1adf5be411..993b3be689 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -13,7 +13,7 @@ /// Basic request limit. pub const GENERAL_REQUEST_LIMIT: usize = 1000; -#[allow(missing_docs)] +#[expect(missing_docs)] #[derive(Debug, thiserror::Error)] #[error("parameter {which} exceeded limit {limit}: {size}")] pub struct QueryLimitError { @@ -46,21 +46,21 @@ pub trait QueryParamLimiter { /// store. pub const MAX_RESPONSE_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; -/// Used for the following RPC endpoints -/// * `state_sync` +/// Used for the following RPC endpoints: +/// * `sync_transactions` /// /// Capped at 1000 account IDs to keep SQL `IN` clauses bounded and response payloads under the -/// 4 MB budget. +/// 4 MB budget. pub struct QueryParamAccountIdLimit; impl QueryParamLimiter for QueryParamAccountIdLimit { const PARAM_NAME: &str = "account_id"; const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints +/// Used for the following RPC endpoints: /// * `select_nullifiers_by_prefix` /// -/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload +/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload /// budget and to avoid unbounded prefix scans. pub struct QueryParamNullifierPrefixLimit; impl QueryParamLimiter for QueryParamNullifierPrefixLimit { @@ -68,12 +68,11 @@ impl QueryParamLimiter for QueryParamNullifierPrefixLimit { const LIMIT: usize = GENERAL_REQUEST_LIMIT; } -/// Used for the following RPC endpoints +/// Used for the following RPC endpoints: /// * `select_nullifiers_by_prefix` /// * `sync_nullifiers` -/// * `sync_state` /// -/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. +/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. pub struct QueryParamNullifierLimit; impl QueryParamLimiter for QueryParamNullifierLimit { const PARAM_NAME: &str = "nullifier"; @@ -83,7 +82,7 @@ impl QueryParamLimiter for QueryParamNullifierLimit { /// Used for the following RPC endpoints /// * `get_note_sync` /// -/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. +/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. pub struct QueryParamNoteTagLimit; impl QueryParamLimiter for QueryParamNoteTagLimit { const PARAM_NAME: &str = "note_tag"; @@ -103,7 +102,7 @@ impl QueryParamLimiter for QueryParamNoteIdLimit { /// Used for internal queries retrieving note inclusion proofs by commitment. /// -/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB +/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB /// payload cap. pub struct QueryParamNoteCommitmentLimit; impl QueryParamLimiter for QueryParamNoteCommitmentLimit { @@ -114,7 +113,7 @@ impl QueryParamLimiter for QueryParamNoteCommitmentLimit { /// Only used internally, not exposed via public RPC. /// /// Capped at 1000 block headers to bound internal batch operations and keep payloads below the -/// 4 MB limit. +/// 4 MB limit. pub struct QueryParamBlockLimit; impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; diff --git a/crates/utils/src/logging.rs b/crates/utils/src/logging.rs index 6593943f42..5893650303 100644 --- a/crates/utils/src/logging.rs +++ b/crates/utils/src/logging.rs @@ -10,6 +10,8 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::layer::{Filter, SubscriberExt}; use tracing_subscriber::{Layer, Registry}; +use crate::tracing::OpenTelemetrySpanExt; + /// Global tracer provider for flushing traces on panic. /// /// This is necessary because the panic hook needs access to the tracer provider to flush @@ -89,7 +91,12 @@ pub fn setup_tracing(otel: OpenTelemetry) -> anyhow::Result> { // This chains with the default panic hook to preserve backtrace printing. let default_hook = std::panic::take_hook(); std::panic::set_hook(Box::new(move |info| { - tracing::error!(panic = true, "{info}"); + tracing::error!(panic = true, info = %info, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let info_str = info.to_string(); + let wrapped = anyhow::Error::msg(info_str); + tracing::Span::current().set_error(wrapped.as_ref()); // Flush traces before the program terminates. // This ensures the panic trace is exported even though the OtelGuard won't be dropped. diff --git a/crates/utils/src/panic.rs b/crates/utils/src/panic.rs index 1b899ee618..c330fe362a 100644 --- a/crates/utils/src/panic.rs +++ b/crates/utils/src/panic.rs @@ -4,14 +4,21 @@ use http::{Response, StatusCode, header}; use http_body_util::Full; pub use tower_http::catch_panic::CatchPanicLayer; +use crate::tracing::OpenTelemetrySpanExt; + /// Custom callback that is used by Tower to fulfill the /// [`tower_http::catch_panic::ResponseForPanic`] trait. /// /// This should be added to tonic server builder as a layer via [`CatchPanicLayer::custom()`]. +#[track_caller] pub fn catch_panic_layer_fn(err: Box) -> Response> { // Log the panic error details. let err = stringify_panic_error(err); - tracing::error!(panic = true, "{err}"); + tracing::error!(panic = true, error = %err, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let wrapped = anyhow::Error::msg(err.clone()); + tracing::Span::current().set_error(wrapped.as_ref()); // Return generic error response. Response::builder() diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index f5d0951bfa..985a2e4ba8 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -9,6 +9,7 @@ use crate::tracing::OpenTelemetrySpanExt; /// The span name is dynamically set using the HTTP path via the `otel.name` field. /// Additionally also pulls in remote tracing context which allows the server trace to be connected /// to the client's origin trace. +#[track_caller] pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { // A gRPC request's path ends with `..//`. let mut path_segments = request.uri().path().rsplit('/'); diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 6115e7cff3..570f2a8d2d 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -18,6 +18,9 @@ workspace = true [dependencies] anyhow = { workspace = true } +diesel = { workspace = true } +diesel_migrations = { workspace = true } +miden-node-db = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } diff --git a/crates/validator/build.rs b/crates/validator/build.rs new file mode 100644 index 0000000000..b9f947e177 --- /dev/null +++ b/crates/validator/build.rs @@ -0,0 +1,9 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `validator/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . +fn main() { + println!("cargo:rerun-if-changed=./src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + println!("cargo:rerun-if-changed=Cargo.toml"); +} diff --git a/crates/validator/diesel.toml b/crates/validator/diesel.toml new file mode 100644 index 0000000000..bdce9175fa --- /dev/null +++ b/crates/validator/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index c1cab190bd..954d043b8c 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -1,22 +1,24 @@ -use std::sync::Arc; - -use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; +use miden_node_db::{DatabaseError, Db}; +use miden_protocol::block::{BlockSigner, ProposedBlock}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::errors::ProposedBlockError; -use miden_protocol::transaction::TransactionId; -use tracing::{Instrument, info_span}; +use miden_protocol::transaction::{TransactionHeader, TransactionId}; +use tracing::{info_span, instrument}; -use crate::server::ValidatedTransactions; +use crate::COMPONENT; +use crate::db::find_unvalidated_transactions; // BLOCK VALIDATION ERROR // ================================================================================================ #[derive(thiserror::Error, Debug)] pub enum BlockValidationError { - #[error("transaction {0} in block {1} has not been validated")] - TransactionNotValidated(TransactionId, BlockNumber), + #[error("block contains unvalidated transactions {0:?}")] + UnvalidatedTransactions(Vec), #[error("failed to build block")] - BlockBuildingFailed(#[from] ProposedBlockError), + BlockBuildingFailed(#[source] ProposedBlockError), + #[error("failed to select transactions")] + DatabaseError(#[source] DatabaseError), } // BLOCK VALIDATION @@ -24,33 +26,31 @@ pub enum BlockValidationError { /// Validates a block by checking that all transactions in the proposed block have been processed by /// the validator in the past. -/// -/// Removes the validated transactions from the cache upon success. +#[instrument(target = COMPONENT, skip_all, err)] pub async fn validate_block( proposed_block: ProposedBlock, signer: &S, - validated_transactions: Arc, + db: &Db, ) -> Result { - // Check that all transactions in the proposed block have been validated - let verify_span = info_span!("verify_transactions"); - for tx_header in proposed_block.transactions() { - let tx_id = tx_header.id(); - // TODO: LruCache is a poor abstraction since it locks many times. - if validated_transactions - .get(&tx_id) - .instrument(verify_span.clone()) - .await - .is_none() - { - return Err(BlockValidationError::TransactionNotValidated( - tx_id, - proposed_block.block_num(), - )); - } + // Search for any proposed transactions that have not previously been validated. + let proposed_tx_ids = + proposed_block.transactions().map(TransactionHeader::id).collect::>(); + let unvalidated_txs = db + .transact("find_unvalidated_transactions", move |conn| { + find_unvalidated_transactions(conn, &proposed_tx_ids) + }) + .await + .map_err(BlockValidationError::DatabaseError)?; + + // All proposed transactions must have been validated. + if !unvalidated_txs.is_empty() { + return Err(BlockValidationError::UnvalidatedTransactions(unvalidated_txs)); } // Build the block header. - let (header, _) = proposed_block.into_header_and_body()?; + let (header, _) = proposed_block + .into_header_and_body() + .map_err(BlockValidationError::BlockBuildingFailed)?; // Sign the header. let signature = info_span!("sign_block").in_scope(|| signer.sign(&header)); diff --git a/crates/validator/src/db/migrations.rs b/crates/validator/src/db/migrations.rs new file mode 100644 index 0000000000..240c29033b --- /dev/null +++ b/crates/validator/src/db/migrations.rs @@ -0,0 +1,25 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use miden_node_db::DatabaseError; +use tracing::instrument; + +use crate::COMPONENT; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> std::result::Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + return Ok(()); + }; + tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/validator/src/db/migrations/2025062000000_setup/down.sql b/crates/validator/src/db/migrations/2025062000000_setup/down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/crates/validator/src/db/migrations/2025062000000_setup/up.sql b/crates/validator/src/db/migrations/2025062000000_setup/up.sql new file mode 100644 index 0000000000..06297a9700 --- /dev/null +++ b/crates/validator/src/db/migrations/2025062000000_setup/up.sql @@ -0,0 +1,10 @@ +CREATE TABLE validated_transactions ( + id BLOB NOT NULL, + block_num INTEGER NOT NULL, + account_id BLOB NOT NULL, + "transaction" BLOB NOT NULL, -- Binary encoded ExecutedTransaction. + PRIMARY KEY (id) +) WITHOUT ROWID; + +CREATE INDEX idx_validated_transactions_account_id ON validated_transactions(account_id); +CREATE INDEX idx_validated_transactions_block_num ON validated_transactions(block_num); diff --git a/crates/validator/src/db/mod.rs b/crates/validator/src/db/mod.rs new file mode 100644 index 0000000000..4c8fe665be --- /dev/null +++ b/crates/validator/src/db/mod.rs @@ -0,0 +1,80 @@ +mod migrations; +mod models; +mod schema; + +use std::path::PathBuf; + +use diesel::SqliteConnection; +use diesel::dsl::exists; +use diesel::prelude::*; +use miden_node_db::{DatabaseError, Db}; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::Serializable; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::apply_migrations; +use crate::db::models::ValidatedTransactionRowInsert; +use crate::tx_validation::ValidatedTransaction; + +/// Open a connection to the DB and apply any pending migrations. +#[instrument(target = COMPONENT, skip_all)] +pub async fn load(database_filepath: PathBuf) -> Result { + let db = Db::new(&database_filepath)?; + tracing::info!( + target: COMPONENT, + sqlite= %database_filepath.display(), + "Connected to the database" + ); + + db.query("migrations", apply_migrations).await?; + Ok(db) +} + +/// Inserts a new validated transaction into the database. +#[instrument(target = COMPONENT, skip_all, fields(tx_id = %tx_info.tx_id()), err)] +pub(crate) fn insert_transaction( + conn: &mut SqliteConnection, + tx_info: &ValidatedTransaction, +) -> Result { + let row = ValidatedTransactionRowInsert::new(tx_info); + let count = diesel::insert_into(schema::validated_transactions::table) + .values(row) + .on_conflict_do_nothing() + .execute(conn)?; + Ok(count) +} + +/// Scans the database for transaction Ids that do not exist. +/// +/// If the resulting vector is empty, all supplied transaction ids have been validated in the past. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT EXISTS( +/// SELECT 1 +/// FROM validated_transactions +/// WHERE id = ? +/// ); +/// ``` +#[instrument(target = COMPONENT, skip(conn), err)] +pub(crate) fn find_unvalidated_transactions( + conn: &mut SqliteConnection, + tx_ids: &[TransactionId], +) -> Result, DatabaseError> { + let mut unvalidated_tx_ids = Vec::new(); + for tx_id in tx_ids { + // Check whether each transaction id exists in the database. + let exists = diesel::select(exists( + schema::validated_transactions::table + .filter(schema::validated_transactions::id.eq(tx_id.to_bytes())), + )) + .get_result::(conn)?; + // Record any transaction ids that do not exist. + if !exists { + unvalidated_tx_ids.push(*tx_id); + } + } + Ok(unvalidated_tx_ids) +} diff --git a/crates/validator/src/db/models.rs b/crates/validator/src/db/models.rs new file mode 100644 index 0000000000..9a50b7a393 --- /dev/null +++ b/crates/validator/src/db/models.rs @@ -0,0 +1,27 @@ +use diesel::prelude::*; +use miden_node_db::SqlTypeConvert; +use miden_tx::utils::Serializable; + +use crate::db::schema; +use crate::tx_validation::ValidatedTransaction; + +#[derive(Debug, Clone, PartialEq, Insertable)] +#[diesel(table_name = schema::validated_transactions)] +#[diesel(check_for_backend(diesel::sqlite::Sqlite))] +pub struct ValidatedTransactionRowInsert { + pub id: Vec, + pub block_num: i64, + pub account_id: Vec, + pub transaction: Vec, +} + +impl ValidatedTransactionRowInsert { + pub fn new(tx: &ValidatedTransaction) -> Self { + Self { + id: tx.tx_id().to_bytes(), + block_num: tx.block_num().to_raw_sql(), + account_id: tx.account_id().to_bytes(), + transaction: tx.to_bytes(), + } + } +} diff --git a/crates/validator/src/db/schema.rs b/crates/validator/src/db/schema.rs new file mode 100644 index 0000000000..0d299dbfdb --- /dev/null +++ b/crates/validator/src/db/schema.rs @@ -0,0 +1,8 @@ +diesel::table! { + validated_transactions (id, block_num, account_id, transaction) { + id -> Binary, + block_num -> BigInt, + account_id -> Binary, + transaction -> Binary, + } +} diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index a45112d275..a987304c3e 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,4 +1,5 @@ mod block_validation; +mod db; mod server; mod tx_validation; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 89d28d25de..7f71161a23 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -1,42 +1,32 @@ use std::net::SocketAddr; -use std::num::NonZeroUsize; +use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use anyhow::Context; +use miden_node_db::Db; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; use miden_node_utils::ErrorReport; -use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; use miden_protocol::block::{BlockSigner, ProposedBlock}; -use miden_protocol::transaction::{ - ProvenTransaction, - TransactionHeader, - TransactionId, - TransactionInputs, -}; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; -use tracing::{Instrument, info_span}; +use tracing::{info_span, instrument}; use crate::COMPONENT; use crate::block_validation::validate_block; +use crate::db::{insert_transaction, load}; use crate::tx_validation::validate_transaction; -/// Number of transactions to keep in the validated transactions cache. -const NUM_VALIDATED_TRANSACTIONS: NonZeroUsize = NonZeroUsize::new(10000).unwrap(); - -/// A type alias for a LRU cache that stores validated transactions. -pub type ValidatedTransactions = LruCache; - // VALIDATOR // ================================================================================ @@ -53,6 +43,9 @@ pub struct Validator { /// The signer used to sign blocks. pub signer: S, + + /// The data directory for the validator component's database files. + pub data_directory: PathBuf, } impl Validator { @@ -63,6 +56,11 @@ impl Validator { pub async fn serve(self) -> anyhow::Result<()> { tracing::info!(target: COMPONENT, endpoint=?self.address, "Initializing server"); + // Initialize database connection. + let db = load(self.data_directory.join("validator.sqlite3")) + .await + .context("failed to initialize validator database")?; + let listener = TcpListener::bind(self.address) .await .context("failed to bind to block producer address")?; @@ -86,7 +84,7 @@ impl Validator { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .timeout(self.grpc_timeout) - .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer))) + .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer, db))) .add_service(reflection_service) .add_service(reflection_service_alpha) .serve_with_incoming(TcpListenerStream::new(listener)) @@ -103,14 +101,12 @@ impl Validator { /// Implements the gRPC API for the validator. struct ValidatorServer { signer: S, - validated_transactions: Arc, + db: Arc, } impl ValidatorServer { - fn new(signer: S) -> Self { - let validated_transactions = - Arc::new(ValidatedTransactions::new(NUM_VALIDATED_TRANSACTIONS)); - Self { signer, validated_transactions } + fn new(signer: S, db: Db) -> Self { + Self { signer, db: db.into() } } } @@ -128,6 +124,7 @@ impl api_server::Api for ValidatorServer } /// Receives a proven transaction, then validates and stores it. + #[instrument(target = COMPONENT, skip_all, err)] async fn submit_proven_transaction( &self, request: tonic::Request, @@ -150,17 +147,17 @@ impl api_server::Api for ValidatorServer tracing::Span::current().set_attribute("transaction.id", tx.id()); // Validate the transaction. - let validated_tx_header = validate_transaction(tx, inputs).await.map_err(|err| { + let tx_info = validate_transaction(tx, inputs).await.map_err(|err| { Status::invalid_argument(err.as_report_context("Invalid transaction")) })?; - // Register the validated transaction. - let tx_id = validated_tx_header.id(); - self.validated_transactions - .put(tx_id, validated_tx_header) - .instrument(info_span!("validated_txs.insert")) - .await; - + // Store the validated transaction. + self.db + .transact("insert_transaction", move |conn| insert_transaction(conn, &tx_info)) + .await + .map_err(|err| { + Status::internal(err.as_report_context("Failed to insert transaction")) + })?; Ok(tonic::Response::new(())) } @@ -181,11 +178,12 @@ impl api_server::Api for ValidatorServer // Validate the block. let signature = - validate_block(proposed_block, &self.signer, self.validated_transactions.clone()) - .await - .map_err(|err| { - tonic::Status::invalid_argument(format!("Failed to validate block: {err}",)) - })?; + validate_block(proposed_block, &self.signer, &self.db).await.map_err(|err| { + tonic::Status::invalid_argument(format!( + "Failed to validate block: {}", + err.as_report() + )) + })?; // Send the signature. info_span!("serialize").in_scope(|| { diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs index 20d610acaa..f2d1250a20 100644 --- a/crates/validator/src/tx_validation/mod.rs +++ b/crates/validator/src/tx_validation/mod.rs @@ -1,11 +1,15 @@ mod data_store; +mod validated_tx; pub use data_store::TransactionInputsDataStore; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; use miden_tx::auth::UnreachableAuth; use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; -use tracing::{Instrument, info_span}; +use tracing::{Instrument, info_span, instrument}; +pub use validated_tx::ValidatedTransaction; + +use crate::COMPONENT; // TRANSACTION VALIDATION ERROR // ================================================================================================ @@ -30,10 +34,11 @@ pub enum TransactionValidationError { /// provided proven transaction. /// /// Returns the header of the executed transaction if successful. +#[instrument(target = COMPONENT, skip_all, err)] pub async fn validate_transaction( proven_tx: ProvenTransaction, tx_inputs: TransactionInputs, -) -> Result { +) -> Result { // First, verify the transaction proof info_span!("verify").in_scope(|| { let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); @@ -56,7 +61,7 @@ pub async fn validate_transaction( let executed_tx_header: TransactionHeader = (&executed_tx).into(); let proven_tx_header: TransactionHeader = (&proven_tx).into(); if executed_tx_header == proven_tx_header { - Ok(executed_tx_header) + Ok(ValidatedTransaction::new(executed_tx)) } else { Err(TransactionValidationError::Mismatch { proven_tx_header: proven_tx_header.into(), diff --git a/crates/validator/src/tx_validation/validated_tx.rs b/crates/validator/src/tx_validation/validated_tx.rs new file mode 100644 index 0000000000..3ee7dfa458 --- /dev/null +++ b/crates/validator/src/tx_validation/validated_tx.rs @@ -0,0 +1,38 @@ +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::{ExecutedTransaction, TransactionId}; +use miden_tx::utils::Serializable; + +/// Re-executed and validated transaction that the Validator, or some ad-hoc +/// auditing procedure, might need to analyze. +/// +/// Constructed from an [`ExecutedTransaction`] that the Validator would have created while +/// re-executing and validating a [`miden_protocol::transaction::ProvenTransaction`]. +pub struct ValidatedTransaction(ExecutedTransaction); + +impl ValidatedTransaction { + /// Creates a new instance of [`ValidatedTransactionInfo`]. + pub fn new(tx: ExecutedTransaction) -> Self { + Self(tx) + } + + /// Returns ID of the transaction. + pub fn tx_id(&self) -> TransactionId { + self.0.id() + } + + /// Returns the block number in which the transaction was executed. + pub fn block_num(&self) -> BlockNumber { + self.0.block_header().block_num() + } + + /// Returns ID of the account against which this transaction was executed. + pub fn account_id(&self) -> AccountId { + self.0.account_delta().id() + } + + /// Returns the binary representation of the transaction info. + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/docs/external/src/operator/installation.md b/docs/external/src/operator/installation.md index 1f27c639d0..662d76851d 100644 --- a/docs/external/src/operator/installation.md +++ b/docs/external/src/operator/installation.md @@ -39,6 +39,18 @@ command ensures that all required libraries are installed. sudo apt install llvm clang bindgen pkg-config libssl-dev libsqlite3-dev ``` +On macOS, ensure the Xcode Command Line Tools are installed: + +```sh +xcode-select --install +``` + +If you still see `'cstdint' file not found` errors after installing the Command Line Tools (common after a macOS upgrade), try setting the SDK root explicitly: + +```sh +export SDKROOT="$(xcrun --sdk macosx --show-sdk-path)" +``` + Install the latest node binary: ```sh diff --git a/docs/external/src/operator/usage.md b/docs/external/src/operator/usage.md index fa48617231..e8bd377bbd 100644 --- a/docs/external/src/operator/usage.md +++ b/docs/external/src/operator/usage.md @@ -50,8 +50,8 @@ miden-node bundled bootstrap \ --genesis-config-file genesis.toml ``` -The genesis configuration file should contain fee parameters, the native faucet, optionally other -fungible faucets, and also optionally, wallet definitions with assets, for example: +The genesis configuration file should contain fee parameters, optionally a custom native faucet, +optionally other fungible faucets, and also optionally, wallet definitions with assets, for example: ```toml # The UNIX timestamp of the genesis block. It will influence the hash of the genesis block. @@ -59,11 +59,13 @@ timestamp = 1717344256 # Defines the format of the block protocol to use for the genesis block. version = 1 -# The native faucet to use for fees. -[native_faucet] -symbol = "MIDEN" -decimals = 6 -max_supply = 100_000_000_000_000_000 +# The native faucet defaults to a MIDEN token (symbol="MIDEN", decimals=6, +# max_supply=100_000_000_000_000_000). To override it with a pre-built account +# file, specify the path: +# +# native_faucet = "path/to/faucet.mac" +# +# The path is relative to this configuration file. # The fee parameters to use for the genesis block. [fee_parameters] @@ -95,6 +97,17 @@ storage_mode = "private" # has_updatable_code = false # default value ``` +To include pre-built accounts (e.g. bridge or wrapped-asset faucets) in the genesis block, use +`[[account]]` entries with paths to `.mac` files: + +```toml +[[account]] +path = "bridge.mac" + +[[account]] +path = "eth_faucet.mac" +``` + ## Operation Start the node with the desired public gRPC server address. diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index b26e881313..69b7224062 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -22,8 +22,8 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [SyncNullifiers](#syncnullifiers) - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) -- [SyncState](#syncstate) - [SyncAccountStorageMaps](#syncaccountstoragemaps) +- [SyncChainMmr](#syncchainmmr) - [SyncTransactions](#synctransactions) - [Status](#status) @@ -107,6 +107,19 @@ The witness proves the account's state commitment in the account tree. If detail If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. +#### Error Codes + +When the request fails, detailed error information is provided through gRPC status details. The following error codes may be returned: + +| Error Code | Value | gRPC Status | Description | +|---------------------------|-------|--------------------|------------------------------------------------------| +| `INTERNAL_ERROR` | 0 | `INTERNAL` | Internal server error occurred | +| `DESERIALIZATION_FAILED` | 1 | `INVALID_ARGUMENT` | Request could not be deserialized | +| `ACCOUNT_NOT_FOUND` | 2 | `INVALID_ARGUMENT` | Account not found at the requested block | +| `ACCOUNT_NOT_PUBLIC` | 3 | `INVALID_ARGUMENT` | Account details requested for a non-public account | +| `UNKNOWN_BLOCK` | 4 | `INVALID_ARGUMENT` | Requested block number is unknown | +| `BLOCK_PRUNED` | 5 | `INVALID_ARGUMENT` | Requested block has been pruned | + ### GetBlockByNumber Request the raw data for a specific block. @@ -128,7 +141,9 @@ This endpoint allows clients to discover the maximum number of items that can be "endpoints": { "CheckNullifiers": { "parameters": { "nullifier": 1000 } }, "SyncNullifiers": { "parameters": { "nullifier": 1000 } }, - "SyncState": { "parameters": { "account_id": 1000, "note_tag": 1000 } }, + "SyncTransactions": { "parameters": { "account_id": 1000 } }, + "SyncAccountVault": { "parameters": { "account_id": 1000 } }, + "SyncAccountStorageMaps": { "parameters": { "account_id": 1000 } }, "SyncNotes": { "parameters": { "note_tag": 1000 } }, "GetNotesById": { "parameters": { "note_id": 100 } } } @@ -194,18 +209,6 @@ A basic note sync can be implemented by repeatedly requesting the previous respo **Limits:** `note_tag` (1000) -### SyncState - -Iteratively sync data for specific notes and accounts. - -This request returns the next block containing data of interest. Client is expected to repeat these requests in a loop until the response reaches the head of the chain, at which point the data is fully synced. - -Each update response also contains info about new notes, accounts etc. created. It also returns Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain MMR peaks and chain MMR nodes. - -The low part of note tags are redacted to preserve some degree of privacy. Returned data therefore contains additional notes which should be filtered out by the client. - -**Limits:** `account_id` (1000), `note_tag` (1000) - ### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. @@ -214,6 +217,12 @@ Caller specifies the `account_id` of the public account and the block range (`bl This endpoint enables clients to maintain an updated view of account storage. +### SyncChainMmr + +Returns MMR delta information needed to synchronize the chain MMR within a block range. + +Caller specifies the `block_range`, starting from the last block already represented in its local MMR. The response contains the MMR delta for the requested range, but at most to (including) the chain tip. + ### SyncTransactions Returns transaction records for specific accounts within a block range. diff --git a/packaging/node/miden-validator.service b/packaging/node/miden-validator.service new file mode 100644 index 0000000000..7b6c5de874 --- /dev/null +++ b/packaging/node/miden-validator.service @@ -0,0 +1,16 @@ +[Unit] +Description=Miden validator +Wants=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=exec +Environment="OTEL_SERVICE_NAME=miden-validator" +EnvironmentFile=/lib/systemd/system/miden-validator.env +ExecStart=/usr/bin/miden-node validator start +WorkingDirectory=/opt/miden-validator +User=miden-validator +RestartSec=5 +Restart=always diff --git a/packaging/node/postinst b/packaging/node/postinst index 8967f9e54e..036b2d112a 100644 --- a/packaging/node/postinst +++ b/packaging/node/postinst @@ -2,25 +2,28 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# user is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-node +for svc in miden-node miden-validator; do + # user is expected by the systemd service file and `/opt/` is its working directory, + sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent "$svc" -# Working folder. -if [ -d "/opt/miden-node" ] -then - echo "Directory /opt/miden-node exists." -else - mkdir -p /opt/miden-node -fi -sudo chown -R miden-node /opt/miden-node + # Working folder. + if [ -d "/opt/$svc" ] + then + echo "Directory /opt/$svc exists." + else + mkdir -p "/opt/$svc" + fi + sudo chown -R "$svc" "/opt/$svc" -# Configuration folder -if [ -d "/etc/opt/miden-node" ] -then - echo "Directory /etc/opt/miden-node exists." -else - mkdir -p /etc/opt/miden-node -fi -sudo chown -R miden-node /etc/opt/miden-node + # Configuration folder + if [ -d "/etc/opt/$svc" ] + then + echo "Directory /etc/opt/$svc exists." + else + mkdir -p "/etc/opt/$svc" + fi + sudo chown -R "$svc" "/etc/opt/$svc" + +done sudo systemctl daemon-reload diff --git a/packaging/node/postrm b/packaging/node/postrm index 893a535881..86a9846a25 100644 --- a/packaging/node/postrm +++ b/packaging/node/postrm @@ -3,7 +3,10 @@ ############### # Remove miden-node installs ############## -sudo rm -rf /lib/systemd/system/miden-node.service -sudo rm -rf /etc/opt/miden-node -sudo deluser miden-node +for svc in miden-node miden-validator; do + sudo rm -rf "/lib/systemd/system/$svc.service" + sudo rm -rf "/etc/opt/$svc" + sudo deluser "$svc" +done + sudo systemctl daemon-reload diff --git a/packaging/prover-proxy/miden-prover-proxy.service b/packaging/prover-proxy/miden-prover-proxy.service deleted file mode 100644 index 90a34c9d0f..0000000000 --- a/packaging/prover-proxy/miden-prover-proxy.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Miden delegated prover proxy -Wants=network-online.target - -[Install] -WantedBy=multi-user.target - -[Service] -Type=exec -Environment="OTEL_SERVICE_NAME=miden-prover-proxy" -EnvironmentFile=/lib/systemd/system/miden-prover-proxy.env -ExecStart=/usr/bin/miden-remote-prover start-proxy -WorkingDirectory=/opt/miden-prover-proxy -User=miden-prover-proxy -RestartSec=5 -Restart=always -LimitCORE=infinity diff --git a/packaging/prover-proxy/postinst b/packaging/prover-proxy/postinst deleted file mode 100644 index 275c8f2c7a..0000000000 --- a/packaging/prover-proxy/postinst +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# -# This is a postinstallation script so the service can be configured and started when requested. - -# User is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-prover-proxy - -# Working folder. -if [ -d "/opt/miden-prover-proxy" ] -then - echo "Directory /opt/miden-prover-proxy exists." -else - mkdir -p /opt/miden-prover-proxy -fi -sudo chown -R miden-prover-proxy /opt/miden-prover-proxy - -# Configuration folder -if [ -d "/etc/opt/miden-prover-proxy" ] -then - echo "Directory /etc/opt/miden-prover-proxy exists." -else - mkdir -p /etc/opt/miden-prover-proxy -fi -sudo chown -R miden-prover-proxy /etc/opt/miden-prover-proxy - -sudo systemctl daemon-reload -sudo systemctl enable miden-prover-proxy -sudo systemctl start miden-prover-proxy diff --git a/packaging/prover-proxy/postrm b/packaging/prover-proxy/postrm deleted file mode 100644 index 001360b5c6..0000000000 --- a/packaging/prover-proxy/postrm +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# -############### -# Remove miden-prover-proxy installs -############## -sudo rm -f /lib/systemd/system/miden-prover-proxy.* -sudo rm -rf /opt/miden-prover-proxy/ -sudo deluser miden-prover-proxy -sudo systemctl daemon-reload diff --git a/packaging/prover/miden-prover.service b/packaging/prover/miden-prover.service index a34eb26afb..4aafc09ca0 100644 --- a/packaging/prover/miden-prover.service +++ b/packaging/prover/miden-prover.service @@ -9,8 +9,7 @@ WantedBy=multi-user.target Type=exec Environment="OTEL_SERVICE_NAME=miden-prover" EnvironmentFile=/lib/systemd/system/miden-prover.env -ExecStart=/usr/bin/miden-remote-prover start-worker -WorkingDirectory=/opt/miden-prover +ExecStart=/usr/bin/miden-remote-prover User=miden-prover RestartSec=5 Restart=always diff --git a/packaging/prover/postinst b/packaging/prover/postinst index 9976ba33bf..2069a4cb69 100644 --- a/packaging/prover/postinst +++ b/packaging/prover/postinst @@ -2,27 +2,9 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# User is expected by the systemd service file and `/opt/` is its working directory, +# User is expected by the systemd service file sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-prover -# Working folder. -if [ -d "/opt/miden-prover" ] -then - echo "Directory /opt/miden-prover exists." -else - mkdir -p /opt/miden-prover -fi -sudo chown -R miden-prover /opt/miden-prover - -# Configuration folder -if [ -d "/etc/opt/miden-prover" ] -then - echo "Directory /etc/opt/miden-prover exists." -else - mkdir -p /etc/opt/miden-prover -fi -sudo chown -R miden-prover /etc/opt/miden-prover - sudo systemctl daemon-reload sudo systemctl enable miden-prover sudo systemctl start miden-prover diff --git a/packaging/prover/postrm b/packaging/prover/postrm index d57bf2efcc..a633574388 100644 --- a/packaging/prover/postrm +++ b/packaging/prover/postrm @@ -3,7 +3,5 @@ ############### # Remove miden-prover installs ############## -sudo rm -f /lib/systemd/system/miden-prover.* -sudo rm -rf /opt/miden-prover/ sudo deluser miden-prover sudo systemctl daemon-reload diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 001dc40986..1012476d18 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -63,22 +63,8 @@ service Rpc { // tip of the chain. rpc SyncNotes(rpc.SyncNotesRequest) returns (rpc.SyncNotesResponse) {} - // Returns info which can be used by the requester to sync up to the latest state of the chain - // for the objects (accounts, notes, nullifiers) the requester is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. requester is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the requester is fully synchronized with the chain. - // - // Each request also returns info about new notes, nullifiers etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags and nullifiers filters contain only high - // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - // additional filtering of that data on its side. - rpc SyncState(rpc.SyncStateRequest) returns (rpc.SyncStateResponse) {} + // Returns chain MMR updates within a block range. + rpc SyncChainMmr(rpc.SyncChainMmrRequest) returns (rpc.SyncChainMmrResponse) {} // Returns account vault updates for specified account within a block range. rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} @@ -96,7 +82,7 @@ service Rpc { // Store API for the BlockProducer component service BlockProducer { // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + rpc ApplyBlock(ApplyBlockRequest) returns (google.protobuf.Empty) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. @@ -112,6 +98,18 @@ service BlockProducer { rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} } +// APPLY BLOCK REQUEST +// ================================================================================================ + +// Applies a block to the state. +message ApplyBlockRequest { + // Ordered batches encoded using [winter_utils::Serializable] implementation for + // [miden_objects::batch::OrderedBatches]. + bytes ordered_batches = 1; + // Block signed by the Validator. + blockchain.SignedBlock block = 2; +} + // GET BLOCK INPUTS // ================================================================================================ diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index f521fc1c5f..59f587f67e 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -17,6 +17,13 @@ service Api { // Returns the status info of the node. rpc Status(google.protobuf.Empty) returns (RpcStatus) {} + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + // Returns a Sparse Merkle Tree opening proof for each requested nullifier // // Each proof demonstrates either: @@ -46,6 +53,9 @@ service Api { // Returns the script for a note by its root. rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} + // TRANSACTION SUBMISSION ENDPOINTS + // -------------------------------------------------------------------------------------------- + // Submits proven transaction to the Miden network. Returns the node's current block height. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} @@ -63,54 +73,38 @@ service Api { // Returns the node's current block height. rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} + // STATE SYNCHRONIZATION ENDPOINTS + // -------------------------------------------------------------------------------------------- - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. + // Returns info which can be used by the client to sync up to the tip of chain for the notes + // they are interested in. // - // Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. + // Client specifies the `note_tags` they are interested in, and the block height from which to + // search for new for matching notes for. The request will then return the next block containing + // any note matching the provided tags. // // The response includes each note's metadata and inclusion proof. // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. + // A basic note sync can be implemented by repeatedly requesting the previous response's block + // until reaching the tip of the chain. rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - // Returns info which can be used by the client to sync up to the latest state of the chain - // for the objects (accounts and notes) the client is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. Client is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the client is fully synchronized with the chain. - // - // Each update response also contains info about new notes, accounts etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // - // For preserving some degree of privacy, note tags contain only high - // part of hashes. Thus, returned data contains excessive notes, client can make - // additional filtering of that data on its side. - rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} // Returns storage map updates for specified account and storage slots within a block range. rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - - // Returns the query parameter limits configured for RPC methods. - // - // These define the maximum number of each parameter a method will accept. - // Exceeding the limit will result in the request being rejected and you should instead send - // multiple smaller requests. - rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + // Returns MMR delta needed to synchronize the chain MMR within the requested block range. + rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} } // RPC STATUS @@ -486,51 +480,26 @@ message SyncNotesResponse { repeated note.NoteSyncRecord notes = 4; } -// SYNC STATE +// SYNC CHAIN MMR // ================================================================================================ -// State synchronization request. -// -// Specifies state updates the requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -// `account_ids` for that block range. -message SyncStateRequest { - // Last block known by the requester. The response will contain data starting from the next block, - // until the first block which contains a note of matching the requested tag, or the chain tip - // if there are no notes. - fixed32 block_num = 1; - - // Accounts' commitment to include in the response. +// Chain MMR synchronization request. +message SyncChainMmrRequest { + // Block range from which to synchronize the chain MMR. // - // An account commitment will be included if-and-only-if it is the latest update. Meaning it is - // possible there was an update to the account for the given range, but if it is not the latest, - // it won't be included in the response. - repeated account.AccountId account_ids = 2; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 3; + // The response will contain MMR delta starting after `block_range.block_from` up to + // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + // block already present in the caller's MMR so the delta begins at the next block. + BlockRange block_range = 1; } -// Represents the result of syncing state request. -message SyncStateResponse { - // Number of the latest block in the chain. - fixed32 chain_tip = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - primitives.MmrDelta mmr_delta = 3; - - // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - repeated account.AccountSummary accounts = 5; - - // List of transactions executed against requested accounts between `request.block_num + 1` and - // `response.block_header.block_num`. - repeated transaction.TransactionSummary transactions = 6; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 7; +// Represents the result of syncing chain MMR. +message SyncChainMmrResponse { + // For which block range the MMR delta is returned. + BlockRange block_range = 1; + // Data needed to update the partial MMR from `request.block_range.block_from + 1` to + // `response.block_range.block_to` or the chain tip. + primitives.MmrDelta mmr_delta = 2; } // SYNC ACCOUNT STORAGE MAP @@ -650,7 +619,7 @@ message TransactionRecord { // Represents the query parameter limits for RPC endpoints. message RpcLimits { // Maps RPC endpoint names to their parameter limits. - // Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + // Key: endpoint name (e.g., "CheckNullifiers") // Value: map of parameter names to their limit values map endpoints = 1; } diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 6f53cd4f33..43828d4dc9 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -7,11 +7,11 @@ import "types/primitives.proto"; // BLOCK // ================================================================================================ -// Represents a block. -message Block { - // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::block::Block]. - bytes block = 1; +// Represents a signed block. +message SignedBlock { + BlockHeader header = 1; + BlockBody body = 2; + BlockSignature signature = 3; } // Represents a proposed block. diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index ac125daa06..ebaa64ed61 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -7,6 +7,16 @@ import "types/account.proto"; // NOTES // ================================================================================================ +// The type of a note. +enum NoteType { + // Unspecified note type (default value, should not be used). + NOTE_TYPE_UNSPECIFIED = 0; + // Public note - details are visible on-chain. + NOTE_TYPE_PUBLIC = 1; + // Private note - details are not visible on-chain. + NOTE_TYPE_PRIVATE = 2; +} + // Represents a note's ID. message NoteId { // A unique identifier of the note which is a 32-byte commitment to the underlying note data. @@ -24,8 +34,8 @@ message NoteMetadata { // The account which sent the note. account.AccountId sender = 1; - // The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - uint32 note_type = 2; + // The type of the note. + NoteType note_type = 2; // A value which can be used by the recipient(s) to identify notes intended for them. // diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6744e56e15..d9a424cef9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.90" +channel = "1.91" components = ["clippy", "rust-src", "rustfmt"] profile = "minimal" targets = ["wasm32-unknown-unknown"] diff --git a/scripts/check-msrv.sh b/scripts/check-msrv.sh deleted file mode 100755 index 6058a0ace2..0000000000 --- a/scripts/check-msrv.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -set -e -set -o pipefail - -# Enhanced MSRV checking script for workspace repository -# Checks MSRV for each workspace member and provides helpful error messages - -# ---- utilities -------------------------------------------------------------- - -check_command() { - if ! command -v "$1" >/dev/null 2>&1; then - echo "ERROR: Required command '$1' is not installed or not in PATH" - exit 1 - fi -} - -# Check required commands -check_command "cargo" -check_command "jq" -check_command "rustup" -check_command "sed" -check_command "grep" -check_command "awk" - -# Portable in-place sed (GNU/macOS); usage: sed_i 's/foo/bar/' file -# shellcheck disable=SC2329 # used quoted -sed_i() { - if sed --version >/dev/null 2>&1; then - sed -i "$@" - else - sed -i '' "$@" - fi -} - -# ---- repo root -------------------------------------------------------------- - -# Get the directory where this script is located and change to the parent directory -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$DIR/.." - -echo "Checking MSRV for workspace members..." - -# ---- metadata -------------------------------------------------------------- - -metadata_json="$(cargo metadata --no-deps --format-version 1)" -workspace_root="$(printf '%s' "$metadata_json" | jq -r '.workspace_root')" - -failed_packages="" - -# Iterate actual workspace packages with manifest paths and (maybe) rust_version -# Fields per line (TSV): id name manifest_path rust_version_or_empty -while IFS=$'\t' read -r pkg_id package_name manifest_path rust_version; do - # Derive package directory (avoid external dirname for portability) - package_dir="${manifest_path%/*}" - if [[ -z "$package_dir" || "$package_dir" == "$manifest_path" ]]; then - package_dir="." - fi - - echo "Checking $package_name ($pkg_id) in $package_dir" - - if [[ ! -f "$package_dir/Cargo.toml" ]]; then - echo "WARNING: No Cargo.toml found in $package_dir, skipping..." - continue - fi - - # Prefer cargo metadata's effective rust_version if present - current_msrv="$rust_version" - if [[ -z "$current_msrv" ]]; then - # If the crate inherits: rust-version.workspace = true - if grep -Eq '^\s*rust-version\.workspace\s*=\s*true\b' "$package_dir/Cargo.toml"; then - # Read from workspace root [workspace.package] - current_msrv="$(grep -Eo '^\s*rust-version\s*=\s*"[^"]+"' "$workspace_root/Cargo.toml" | head -n1 | sed -E 's/.*"([^"]+)".*/\1/')" - if [[ -n "$current_msrv" ]]; then - echo " Using workspace MSRV: $current_msrv" - fi - fi - fi - - if [[ -z "$current_msrv" ]]; then - echo "WARNING: No rust-version found (package or workspace) for $package_name" - continue - fi - - echo " Current MSRV: $current_msrv" - - # Try to verify the MSRV - if ! cargo msrv verify --manifest-path "$package_dir/Cargo.toml" >/dev/null 2>&1; then - echo "ERROR: MSRV check failed for $package_name" - failed_packages="$failed_packages $package_name" - - echo "Searching for correct MSRV for $package_name..." - - # Determine the currently-installed stable toolchain version (e.g., "1.91.1") - latest_stable="$(rustup run stable rustc --version 2>/dev/null | awk '{print $2}')" - if [[ -z "$latest_stable" ]]; then latest_stable="1.91.1"; fi - - # Search for the actual MSRV starting from the current one - if actual_msrv=$(cargo msrv find \ - --manifest-path "$package_dir/Cargo.toml" \ - --min "$current_msrv" \ - --max "$latest_stable" \ - --output-format minimal 2>/dev/null); then - echo " Found actual MSRV: $actual_msrv" - echo "" - echo "ERROR SUMMARY for $package_name:" - echo " Package: $package_name" - echo " Directory: $package_dir" - echo " Current (incorrect) MSRV: $current_msrv" - echo " Correct MSRV: $actual_msrv" - echo "" - echo "TO FIX:" - echo " Update rust-version in $package_dir/Cargo.toml from \"$current_msrv\" to \"$actual_msrv\"" - echo "" - echo " Or run this command (portable in-place edit):" - echo " sed_i 's/^\\s*rust-version\\s*=\\s*\"$current_msrv\"/rust-version = \"$actual_msrv\"/' \"$package_dir/Cargo.toml\"" - else - echo " Could not determine correct MSRV automatically" - echo "" - echo "ERROR SUMMARY for $package_name:" - echo " Package: $package_name" - echo " Directory: $package_dir" - echo " Current (incorrect) MSRV: $current_msrv" - echo " Could not automatically determine correct MSRV" - echo "" - echo "TO FIX:" - echo " Run manually: cargo msrv find --manifest-path \"$package_dir/Cargo.toml\"" - fi - echo "-------------------------------------------------------------------------------" - else - echo "OK: MSRV check passed for $package_name" - fi - echo "" - -done < <( - printf '%s' "$metadata_json" \ - | jq -r '. as $m - | $m.workspace_members[] - | . as $id - | ($m.packages[] | select(.id == $id) - | [ .id, .name, .manifest_path, (.rust_version // "") ] | @tsv)' -) - -if [[ -n "$failed_packages" ]]; then - echo "MSRV CHECK FAILED" - echo "" - echo "The following packages have incorrect MSRV settings:$failed_packages" - echo "" - echo "Please fix the rust-version fields in the affected Cargo.toml files as shown above." - exit 1 -else - echo "ALL WORKSPACE MEMBERS PASSED MSRV CHECKS!" - exit 0 -fi