diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7a4bba2603b..8748e040b17 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -328,6 +328,7 @@ jobs: gcr.io/flow-container-registry/collection:latest \ gcr.io/flow-container-registry/consensus:latest \ gcr.io/flow-container-registry/execution:latest \ + gcr.io/flow-container-registry/execution-ledger:latest \ gcr.io/flow-container-registry/ghost:latest \ gcr.io/flow-container-registry/observer:latest \ gcr.io/flow-container-registry/verification:latest \ diff --git a/.github/workflows/image_builds.yml b/.github/workflows/image_builds.yml index ea320a14c70..bda034e0fc3 100644 --- a/.github/workflows/image_builds.yml +++ b/.github/workflows/image_builds.yml @@ -1,4 +1,4 @@ -name: Build & Promote Docker Images to Public Registry +name: Build & Promote Docker Images to Public Registry on: workflow_dispatch: inputs: @@ -25,53 +25,59 @@ jobs: # The environment is set to 'container builds' that provides the necessary secrets for pushing to the pirvate registry. public-build: if: ${{ github.event.inputs.secure-build == 'false' }} - name: Execute public repo build & push to private artifact registry + name: Execute public repo build & push to private artifact registry runs-on: ubuntu-latest strategy: fail-fast: false matrix: - # We specify all of the potential build commands for each role. + # We specify all of the potential build commands for each role. # This allows us to build and push all images in parallel, reducing the overall build time. # The matrix is defined to include all roles & image types that we want to build and push. # These commands are targets defined in the Makefile of the repository. build_command: # access Build Commands - - docker-build-access-with-adx docker-push-access-with-adx - - docker-build-access-without-adx docker-push-access-without-adx - - docker-build-access-without-netgo-without-adx docker-push-access-without-netgo-without-adx - - docker-cross-build-access-arm docker-push-access-arm + - docker-build-access-with-adx docker-push-access-with-adx + - docker-build-access-without-adx docker-push-access-without-adx + - docker-build-access-without-netgo-without-adx docker-push-access-without-netgo-without-adx + - docker-cross-build-access-arm docker-push-access-arm # collection Build Commands - - docker-build-collection-with-adx docker-push-collection-with-adx - - docker-build-collection-without-adx docker-push-collection-without-adx - - docker-build-collection-without-netgo-without-adx docker-push-collection-without-netgo-without-adx - - docker-cross-build-collection-arm docker-push-collection-arm + - docker-build-collection-with-adx docker-push-collection-with-adx + - docker-build-collection-without-adx docker-push-collection-without-adx + - docker-build-collection-without-netgo-without-adx docker-push-collection-without-netgo-without-adx + - docker-cross-build-collection-arm docker-push-collection-arm # consensus Build Commands - - docker-build-consensus-with-adx docker-push-consensus-with-adx - - docker-build-consensus-without-adx docker-push-consensus-without-adx - - docker-build-consensus-without-netgo-without-adx docker-push-consensus-without-netgo-without-adx - - docker-cross-build-consensus-arm docker-push-consensus-arm + - docker-build-consensus-with-adx docker-push-consensus-with-adx + - docker-build-consensus-without-adx docker-push-consensus-without-adx + - docker-build-consensus-without-netgo-without-adx docker-push-consensus-without-netgo-without-adx + - docker-cross-build-consensus-arm docker-push-consensus-arm # execution Build Commands - - docker-build-execution-with-adx docker-push-execution-with-adx - - docker-build-execution-without-adx docker-push-execution-without-adx - - docker-build-execution-without-netgo-without-adx docker-push-execution-without-netgo-without-adx - - docker-cross-build-execution-arm docker-push-execution-arm + - docker-build-execution-with-adx docker-push-execution-with-adx + - docker-build-execution-without-adx docker-push-execution-without-adx + - docker-build-execution-without-netgo-without-adx docker-push-execution-without-netgo-without-adx + - docker-cross-build-execution-arm docker-push-execution-arm + + # execution Ledger Service Build Commands + - docker-build-execution-ledger-with-adx docker-push-execution-ledger-with-adx + - docker-build-execution-ledger-without-adx docker-push-execution-ledger-without-adx + - docker-build-execution-ledger-without-netgo-without-adx docker-push-execution-ledger-without-netgo-without-adx + - docker-cross-build-execution-ledger-arm docker-push-execution-ledger-arm # observer Build Commands - - docker-build-observer-with-adx docker-push-observer-with-adx - - docker-build-observer-without-adx docker-push-observer-without-adx - - docker-build-observer-without-netgo-without-adx docker-push-observer-without-netgo-without-adx - - docker-cross-build-observer-arm docker-push-observer-arm + - docker-build-observer-with-adx docker-push-observer-with-adx + - docker-build-observer-without-adx docker-push-observer-without-adx + - docker-build-observer-without-netgo-without-adx docker-push-observer-without-netgo-without-adx + - docker-cross-build-observer-arm docker-push-observer-arm # verification Build Commands - - docker-build-verification-with-adx docker-push-verification-with-adx - - docker-build-verification-without-adx docker-push-verification-without-adx - - docker-build-verification-without-netgo-without-adx docker-push-verification-without-netgo-without-adx - - docker-cross-build-verification-arm docker-push-verification-arm + - docker-build-verification-with-adx docker-push-verification-with-adx + - docker-build-verification-without-adx docker-push-verification-without-adx + - docker-build-verification-without-netgo-without-adx docker-push-verification-without-netgo-without-adx + - docker-cross-build-verification-arm docker-push-verification-arm - environment: container builds + environment: container builds steps: - name: Setup Go uses: actions/setup-go@v4 @@ -119,7 +125,7 @@ jobs: fail-fast: false matrix: role: [access, collection, consensus, execution, observer, verification] - environment: secure builds + environment: secure builds steps: - uses: actions/create-github-app-token@v2 id: app-token @@ -133,8 +139,8 @@ jobs: client_payload: '{"role": "${{ matrix.role }}", "tag": "${{ inputs.tag }}"}' github_token: ${{ steps.app-token.outputs.token }} owner: 'onflow' - repo: ${{ secrets.SECURE_BUILDS_REPO }} - ref: master-private + repo: ${{ secrets.SECURE_BUILDS_REPO }} + ref: master-private workflow_file_name: 'secure_build.yml' promote-to-partner-registry: @@ -155,19 +161,19 @@ jobs: fail-fast: false matrix: role: [access] - environment: ${{ matrix.role }} image promotion to partner registry + environment: ${{ matrix.role }} image promotion to partner registry steps: - name: Checkout repo uses: actions/checkout@v3 - - name: Promote ${{ matrix.role }} + - name: Promote ${{ matrix.role }} uses: ./actions/promote-images with: gcp_credentials: ${{ secrets.PARTNER_REGISTRY_PROMOTION_SECRET }} private_registry: ${{ vars.PRIVATE_REGISTRY }} private_registry_host: ${{ env.PRIVATE_REGISTRY_HOST }} promotion_registry: ${{ vars.PARTNER_REGISTRY }} - role: ${{ matrix.role }} + role: ${{ matrix.role }} tags: "${{ inputs.tag }},${{ inputs.tag }}-without-adx,${{ inputs.tag }}-without-netgo-without-adx,${{ inputs.tag }}-arm" promote-to-public-registry: @@ -187,18 +193,18 @@ jobs: fail-fast: false matrix: role: [access, collection, consensus, execution, observer, verification] - environment: ${{ matrix.role }} image promotion to public registry + environment: ${{ matrix.role }} image promotion to public registry steps: - name: Checkout repo uses: actions/checkout@v3 - - name: Promote ${{ matrix.role }} + - name: Promote ${{ matrix.role }} uses: ./actions/promote-images with: gcp_credentials: ${{ secrets.PUBLIC_REGISTRY_PROMOTION_SECRET }} private_registry: ${{ vars.PRIVATE_REGISTRY }} private_registry_host: ${{ env.PRIVATE_REGISTRY_HOST }} promotion_registry: ${{ vars.PUBLIC_REGISTRY }} - role: ${{ matrix.role }} + role: ${{ matrix.role }} tags: "${{ inputs.tag }},${{ inputs.tag }}-without-adx,${{ inputs.tag }}-without-netgo-without-adx,${{ inputs.tag }}-arm" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d7c51008df6..6aa00c5fcd3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,17 +109,11 @@ following when creating your pull request: A reviewer will be assigned automatically when your PR is created. -We use [bors](https://github.com/bors-ng/bors-ng) merge bot to ensure that the `master` branch never breaks. -Once a PR is approved, you can comment on it with the following to add your PR to the merge queue: +We use GitHub Actions to ensure that the `master` branch never breaks. +Once a PR is approved and CI passes, you can add it to the merge queue. +If the PR fails in the merge queue, you will need to fix it and try again. -``` -bors merge -``` - -If the PR passes CI, it will automatically be pushed to the `master` branch. If it fails, bors will comment -on the PR so you can fix it. - -See the [documentation](https://bors.tech/documentation/) for a more comprehensive list of bors commands. +See GitHub's [merge queue documentation](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/configuring-pull-request-merges/managing-a-merge-queue) for more details. ## Style Guide diff --git a/Makefile b/Makefile index c6a1008442c..02f4f5899c3 100644 --- a/Makefile +++ b/Makefile @@ -145,7 +145,7 @@ generate: generate-proto generate-mocks generate-fvm-env-wrappers .PHONY: generate-proto generate-proto: - prototool generate protobuf + cd ledger/protobuf && buf generate .PHONY: generate-fvm-env-wrappers generate-fvm-env-wrappers: @@ -371,6 +371,42 @@ docker-cross-build-execution-arm: --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_ARM)" . +.PHONY: docker-build-execution-ledger-with-adx +docker-build-execution-ledger-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ledger --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG)" . + +.PHONY: docker-build-execution-ledger-without-adx +docker-build-execution-ledger-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ledger --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-execution-ledger-without-netgo-without-adx +docker-build-execution-ledger-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ledger --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-execution-ledger-arm +docker-cross-build-execution-ledger-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ledger --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg GOARCH=arm64 --build-arg CC=aarch64-linux-gnu-gcc --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ + -t "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG_ARM)" . + +.PHONY: docker-native-build-execution-ledger +docker-native-build-execution-ledger: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ledger --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ + -t "$(CONTAINER_REGISTRY)/execution-ledger:latest" \ + -t "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG)" . + .PHONY: docker-native-build-execution-debug docker-native-build-execution-debug: docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ @@ -558,7 +594,7 @@ docker-native-build-ghost-debug: -t "$(CONTAINER_REGISTRY)/ghost-debug:latest" \ -t "$(CONTAINER_REGISTRY)/ghost-debug:$(IMAGE_TAG)" . -PHONY: docker-build-bootstrap +.PHONY: docker-build-bootstrap docker-build-bootstrap: docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/bootstrap --build-arg GOARCH=$(GOARCH) --build-arg VERSION=$(IMAGE_TAG) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY \ @@ -590,7 +626,7 @@ docker-native-build-loader: -t "$(CONTAINER_REGISTRY)/loader:$(IMAGE_TAG)" . .PHONY: docker-native-build-flow -docker-native-build-flow: docker-native-build-collection docker-native-build-consensus docker-native-build-execution docker-native-build-verification docker-native-build-access docker-native-build-observer docker-native-build-ghost +docker-native-build-flow: docker-native-build-collection docker-native-build-consensus docker-native-build-execution docker-native-build-execution-ledger docker-native-build-verification docker-native-build-access docker-native-build-observer docker-native-build-ghost .PHONY: docker-build-flow-with-adx docker-build-flow-with-adx: docker-build-collection-with-adx docker-build-consensus-with-adx docker-build-execution-with-adx docker-build-verification-with-adx docker-build-access-with-adx docker-build-observer-with-adx @@ -676,6 +712,22 @@ docker-push-execution-arm: docker-push-execution-latest: docker-push-execution docker push "$(CONTAINER_REGISTRY)/execution:latest" +.PHONY: docker-push-execution-ledger-with-adx +docker-push-execution-ledger-with-adx: + docker push "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG)" + +.PHONY: docker-push-execution-ledger-without-adx +docker-push-execution-ledger-without-adx: + docker push "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG_NO_ADX)" + +.PHONY: docker-push-execution-ledger-without-netgo-without-adx +docker-push-execution-ledger-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG_NO_NETGO_NO_ADX)" + +.PHONY: docker-push-execution-ledger-arm +docker-push-execution-ledger-arm: + docker push "$(CONTAINER_REGISTRY)/execution-ledger:$(IMAGE_TAG_ARM)" + .PHONY: docker-push-verification-with-adx docker-push-verification-with-adx: docker push "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG)" diff --git a/admin/buf.lock b/admin/buf.lock index 7d3579dcf64..188ba61c36c 100644 --- a/admin/buf.lock +++ b/admin/buf.lock @@ -1,9 +1,8 @@ # Generated by buf. DO NOT EDIT. +version: v1beta1 deps: - remote: buf.build owner: googleapis repository: googleapis - branch: main - commit: 04ad98c82478417784639b43e71c6b4c - digest: b1-8nhYmpcJRqI1lyfXpbPH_nQjQfzgGoVHXq_gA7E4mjg= - create_time: 2021-09-07T16:08:38.569839Z + commit: 004180b77378443887d3b55cabc00384 + digest: shake256:d26c7c2fd95f0873761af33ca4a0c0d92c8577122b6feb74eb3b0a57ebe47a98ab24a209a0e91945ac4c77204e9da0c2de0020b2cedc27bdbcdea6c431eec69b diff --git a/admin/command_runner.go b/admin/command_runner.go index a16c0085ff0..2ca260cb93a 100644 --- a/admin/command_runner.go +++ b/admin/command_runner.go @@ -195,6 +195,13 @@ func (r *CommandRunner) runAdminServer(ctx irrecoverable.SignalerContext) error r.logger.Info().Msg("admin server starting up") + // Remove stale socket file from previous run (e.g. after container/process restart) + if _, err := os.Stat(r.grpcAddress); err == nil { + if removeErr := os.Remove(r.grpcAddress); removeErr != nil { + r.logger.Warn().Err(removeErr).Str("socket", r.grpcAddress).Msg("failed to remove stale admin socket") + } + } + listener, err := net.Listen("unix", r.grpcAddress) if err != nil { return fmt.Errorf("failed to listen on admin server address: %w", err) diff --git a/admin/commands/execution/checkpoint_trigger.go b/admin/commands/execution/checkpoint_trigger.go index 481b3fa3199..0a396ec23e4 100644 --- a/admin/commands/execution/checkpoint_trigger.go +++ b/admin/commands/execution/checkpoint_trigger.go @@ -13,14 +13,25 @@ import ( var _ commands.AdminCommand = (*TriggerCheckpointCommand)(nil) // TriggerCheckpointCommand will send a signal to compactor to trigger checkpoint -// once finishing writing the current WAL segment file +// once finishing writing the current WAL segment file. +// When running in remote ledger mode (ledgerServiceAddr is non-empty), this command +// returns an error directing users to the ledger service's admin endpoint. type TriggerCheckpointCommand struct { - trigger *atomic.Bool + trigger *atomic.Bool + ledgerServiceAddr string // non-empty when using remote ledger service + ledgerServiceAdminAddr string // admin HTTP address for remote ledger service } -func NewTriggerCheckpointCommand(trigger *atomic.Bool) *TriggerCheckpointCommand { +// NewTriggerCheckpointCommand creates a new TriggerCheckpointCommand. +// Parameters: +// - trigger: atomic bool to signal the compactor (used only in local ledger mode) +// - ledgerServiceAddr: gRPC address of the remote ledger service (empty string for local mode) +// - ledgerServiceAdminAddr: admin HTTP address of the remote ledger service (for error messages) +func NewTriggerCheckpointCommand(trigger *atomic.Bool, ledgerServiceAddr, ledgerServiceAdminAddr string) *TriggerCheckpointCommand { return &TriggerCheckpointCommand{ - trigger: trigger, + trigger: trigger, + ledgerServiceAddr: ledgerServiceAddr, + ledgerServiceAdminAddr: ledgerServiceAdminAddr, } } @@ -35,5 +46,23 @@ func (s *TriggerCheckpointCommand) Handler(_ context.Context, _ *admin.CommandRe } func (s *TriggerCheckpointCommand) Validator(_ *admin.CommandRequest) error { + // When using remote ledger service, checkpointing is handled by the ledger service + if s.ledgerServiceAddr != "" { + if s.ledgerServiceAdminAddr == "" { + return admin.NewInvalidAdminReqErrorf( + "trigger-checkpoint is not available when using remote ledger service (connected to %s). "+ + "Please use the ledger service's admin endpoint instead. "+ + "The admin address was not configured - check if the ledger service was started with --admin-addr", + s.ledgerServiceAddr, + ) + } + return admin.NewInvalidAdminReqErrorf( + "trigger-checkpoint is not available when using remote ledger service (connected to %s). "+ + "Please use the ledger service's admin endpoint instead: "+ + "curl -X POST http://%s/admin/run_command -H 'Content-Type: application/json' -d '{\"commandName\": \"trigger-checkpoint\", \"data\": {}}'", + s.ledgerServiceAddr, + s.ledgerServiceAdminAddr, + ) + } return nil } diff --git a/cmd/bootstrap/run/execution_state.go b/cmd/bootstrap/run/execution_state.go index ab76c4e036b..257f088f8f6 100644 --- a/cmd/bootstrap/run/execution_state.go +++ b/cmd/bootstrap/run/execution_state.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/engine/execution/state/bootstrap" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/ledger/common/pathfinder" - ledger "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" bootstrapFilenames "github.com/onflow/flow-go/model/bootstrap" @@ -37,12 +37,12 @@ func GenerateExecutionState( return flow.DummyStateCommitment, err } - ledgerStorage, err := ledger.NewLedger(diskWal, capacity, metricsCollector, zerolog.Nop(), ledger.DefaultPathFinderVersion) + ledgerStorage, err := complete.NewLedger(diskWal, capacity, metricsCollector, zerolog.Nop(), complete.DefaultPathFinderVersion) if err != nil { return flow.DummyStateCommitment, err } - compactor, err := ledger.NewCompactor(ledgerStorage, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector) + compactor, err := complete.NewCompactor(ledgerStorage, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector) if err != nil { return flow.DummyStateCommitment, err } diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 6fe7a0d569a..c35a2d42a05 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -63,10 +63,9 @@ import ( "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/systemcontracts" - ledgerpkg "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/pathfinder" - ledger "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/wal" + ledgerfactory "github.com/onflow/flow-go/ledger/factory" modelbootstrap "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -130,7 +129,7 @@ type ExecutionNode struct { executionState state.ExecutionState followerState protocol.FollowerState committee hotstuff.DynamicCommittee - ledgerStorage *ledger.Ledger + ledgerStorage ledger.Ledger registerStore *storehouse.RegisterStore // storage @@ -160,7 +159,6 @@ type ExecutionNode struct { scriptsEng *scripts.Engine followerDistributor *pubsub.FollowerDistributor checkAuthorizedAtBlock func(blockID flow.Identifier) (bool, error) - diskWAL *wal.DiskWAL blockDataUploader *uploader.Manager executionDataStore execution_data.ExecutionDataStore toTriggerCheckpoint *atomic.Bool // create the checkpoint trigger to be controlled by admin tool, and listened by the compactor @@ -195,7 +193,7 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { return stateSyncCommands.NewReadExecutionDataCommand(exeNode.executionDataStore) }). AdminCommand("trigger-checkpoint", func(config *NodeConfig) commands.AdminCommand { - return executionCommands.NewTriggerCheckpointCommand(exeNode.toTriggerCheckpoint) + return executionCommands.NewTriggerCheckpointCommand(exeNode.toTriggerCheckpoint, exeNode.exeConf.ledgerServiceAddr, exeNode.exeConf.ledgerServiceAdminAddr) }). AdminCommand("stop-at-height", func(config *NodeConfig) commands.AdminCommand { return executionCommands.NewStopAtHeightCommand(exeNode.stopControl) @@ -244,7 +242,6 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { "chunk_data_pack", exeNode.chunkDataPackDB) }). Component("stop control", exeNode.LoadStopControl). - Component("execution state ledger WAL compactor", exeNode.LoadExecutionStateLedgerWALCompactor). // disable execution data pruner for now, since storehouse is going to need the execution data // for recovery, // TODO: will re-visit this once storehouse has implemented new WAL for checkpoint file of @@ -678,14 +675,8 @@ func (exeNode *ExecutionNode) LoadProviderEngine( blockSnapshot, _, err := exeNode.executionState.CreateStorageSnapshot(blockID) if err != nil { - tries, _ := exeNode.ledgerStorage.Tries() - trieInfo := "empty" - if len(tries) > 0 { - trieInfo = fmt.Sprintf("length: %v, 1st: %v, last: %v", len(tries), tries[0].RootHash(), tries[len(tries)-1].RootHash()) - } - - return nil, fmt.Errorf("cannot create a storage snapshot at block %v at height %v, trie: %s: %w", blockID, - height, trieInfo, err) + return nil, fmt.Errorf("cannot create a storage snapshot at block %v at height %v : %w", blockID, + height, err) } // Get the epoch counter from the smart contract at the last executed block. @@ -919,36 +910,27 @@ func (exeNode *ExecutionNode) LoadExecutionStateLedger( module.ReadyDoneAware, error, ) { - // DiskWal is a dependent component because we need to ensure - // that all WAL updates are completed before closing opened WAL segment. - var err error - exeNode.diskWAL, err = wal.NewDiskWAL(node.Logger.With().Str("subcomponent", "wal").Logger(), - node.MetricsRegisterer, exeNode.collector, exeNode.exeConf.triedir, int(exeNode.exeConf.mTrieCacheSize), pathfinder.PathByteSize, wal.SegmentSize) + // Create ledger using factory + ledgerStorage, err := ledgerfactory.NewLedger(ledgerfactory.Config{ + LedgerServiceAddr: exeNode.exeConf.ledgerServiceAddr, + LedgerMaxRequestSize: exeNode.exeConf.ledgerMaxRequestSize, + LedgerMaxResponseSize: exeNode.exeConf.ledgerMaxResponseSize, + Triedir: exeNode.exeConf.triedir, + MTrieCacheSize: exeNode.exeConf.mTrieCacheSize, + CheckpointDistance: exeNode.exeConf.checkpointDistance, + CheckpointsToKeep: exeNode.exeConf.checkpointsToKeep, + MetricsRegisterer: node.MetricsRegisterer, + WALMetrics: exeNode.collector, + LedgerMetrics: exeNode.collector, + Logger: node.Logger, + }, exeNode.toTriggerCheckpoint) if err != nil { - return nil, fmt.Errorf("failed to initialize wal: %w", err) + return nil, err } - exeNode.ledgerStorage, err = ledger.NewLedger(exeNode.diskWAL, int(exeNode.exeConf.mTrieCacheSize), exeNode.collector, node.Logger.With().Str("subcomponent", - "ledger").Logger(), ledger.DefaultPathFinderVersion) - return exeNode.ledgerStorage, err -} + exeNode.ledgerStorage = ledgerStorage -func (exeNode *ExecutionNode) LoadExecutionStateLedgerWALCompactor( - node *NodeConfig, -) ( - module.ReadyDoneAware, - error, -) { - return ledger.NewCompactor( - exeNode.ledgerStorage, - exeNode.diskWAL, - node.Logger.With().Str("subcomponent", "checkpointer").Logger(), - uint(exeNode.exeConf.mTrieCacheSize), - exeNode.exeConf.checkpointDistance, - exeNode.exeConf.checkpointsToKeep, - exeNode.toTriggerCheckpoint, // compactor will listen to the signal from admin tool for force triggering checkpointing - exeNode.collector, - ) + return exeNode.ledgerStorage, nil } func (exeNode *ExecutionNode) LoadExecutionDataPruner( @@ -1430,7 +1412,7 @@ func (exeNode *ExecutionNode) LoadBootstrapper(node *NodeConfig) error { node.Logger, path.Join(node.BootstrapDir, modelbootstrap.DirnameExecutionState), modelbootstrap.FilenameWALRootCheckpoint, - ledgerpkg.RootHash(node.RootSeal.FinalState), + ledger.RootHash(node.RootSeal.FinalState), ) if err != nil { return err diff --git a/cmd/execution_config.go b/cmd/execution_config.go index 56a40f95e33..91fc503782b 100644 --- a/cmd/execution_config.go +++ b/cmd/execution_config.go @@ -20,6 +20,7 @@ import ( "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool" @@ -81,6 +82,11 @@ type ExecutionConfig struct { pruningConfigBatchSize uint pruningConfigSleepAfterCommit time.Duration pruningConfigSleepAfterIteration time.Duration + + ledgerServiceAddr string // gRPC address for remote ledger service (empty means use local ledger) + ledgerServiceAdminAddr string // Admin HTTP address for remote ledger service (for trigger-checkpoint command) + ledgerMaxRequestSize uint // Maximum request message size in bytes for remote ledger client (0 = default 1 GiB) + ledgerMaxResponseSize uint // Maximum response message size in bytes for remote ledger client (0 = default 1 GiB) } func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { @@ -97,9 +103,9 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { flags.StringVar(&exeConf.triedir, "triedir", filepath.Join(datadir, "trie"), "directory to store the execution State") flags.StringVar(&exeConf.executionDataDir, "execution-data-dir", filepath.Join(datadir, "execution_data"), "directory to use for storing Execution Data") flags.StringVar(&exeConf.registerDir, "register-dir", filepath.Join(datadir, "register"), "directory to use for storing registers Data") - flags.Uint32Var(&exeConf.mTrieCacheSize, "mtrie-cache-size", 500, "cache size for MTrie") - flags.UintVar(&exeConf.checkpointDistance, "checkpoint-distance", 20, "number of WAL segments between checkpoints") - flags.UintVar(&exeConf.checkpointsToKeep, "checkpoints-to-keep", 5, "number of recent checkpoints to keep (0 to keep all)") + flags.Uint32Var(&exeConf.mTrieCacheSize, "mtrie-cache-size", ledger.DefaultMTrieCacheSize, "cache size for MTrie") + flags.UintVar(&exeConf.checkpointDistance, "checkpoint-distance", ledger.DefaultCheckpointDistance, "number of WAL segments between checkpoints") + flags.UintVar(&exeConf.checkpointsToKeep, "checkpoints-to-keep", ledger.DefaultCheckpointsToKeep, "number of recent checkpoints to keep (0 to keep all)") flags.UintVar(&exeConf.computationConfig.DerivedDataCacheSize, "cadence-execution-cache", derived.DefaultDerivedDataCacheSize, "cache size for Cadence execution") flags.BoolVar(&exeConf.computationConfig.ExtensiveTracing, "extensive-tracing", false, "adds high-overhead tracing to execution") @@ -160,6 +166,10 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { flags.UintVar(&exeConf.pruningConfigBatchSize, "pruning-config-batch-size", exepruner.DefaultConfig.BatchSize, "the batch size is the number of blocks that we want to delete in one batch, default 1200") flags.DurationVar(&exeConf.pruningConfigSleepAfterCommit, "pruning-config-sleep-after-commit", exepruner.DefaultConfig.SleepAfterEachBatchCommit, "sleep time after each batch commit, default 1s") flags.DurationVar(&exeConf.pruningConfigSleepAfterIteration, "pruning-config-sleep-after-iteration", exepruner.DefaultConfig.SleepAfterEachIteration, "sleep time after each iteration, default max int64") + flags.StringVar(&exeConf.ledgerServiceAddr, "ledger-service-addr", "", "gRPC address for remote ledger service (TCP: e.g., localhost:9000, or Unix socket: unix:///path/to/socket). If empty, uses local ledger") + flags.StringVar(&exeConf.ledgerServiceAdminAddr, "ledger-service-admin-addr", "", "admin HTTP address for remote ledger service (e.g., localhost:9003). Used to provide helpful error messages when trigger-checkpoint is called in remote mode") + flags.UintVar(&exeConf.ledgerMaxRequestSize, "ledger-max-request-size", 0, "maximum request message size in bytes for remote ledger client (0 = default 1 GiB)") + flags.UintVar(&exeConf.ledgerMaxResponseSize, "ledger-max-response-size", 0, "maximum response message size in bytes for remote ledger client (0 = default 1 GiB)") } func (exeConf *ExecutionConfig) ValidateFlags() error { diff --git a/cmd/ledger/README.md b/cmd/ledger/README.md new file mode 100644 index 00000000000..b71e7573c14 --- /dev/null +++ b/cmd/ledger/README.md @@ -0,0 +1,103 @@ +# Ledger Service + +A standalone gRPC service that provides remote access to ledger operations. + +## Building + +The protobuf code must be generated first: + +```bash +cd ledger/protobuf +buf generate +``` + +Then build the service: + +```bash +go build -o flow-ledger-service ./cmd/ledger +``` + +## Running + +```bash +# Listen on TCP only +./flow-ledger-service \ + -triedir /path/to/trie \ + -ledger-service-tcp 0.0.0.0:9000 + +# Listen on Unix socket only +./flow-ledger-service \ + -triedir /path/to/trie \ + -ledger-service-socket /sockets/ledger.sock + +# Listen on both TCP and Unix socket +./flow-ledger-service \ + -triedir /path/to/trie \ + -ledger-service-tcp 0.0.0.0:9000 \ + -ledger-service-socket /sockets/ledger.sock \ + -mtrie-cache-size 500 \ + -checkpoint-distance 100 \ + -checkpoints-to-keep 3 + +# With admin server enabled (use port 9003 to avoid conflict with execution node's 9002) +./flow-ledger-service \ + -triedir /path/to/trie \ + -ledger-service-tcp 0.0.0.0:9000 \ + -admin-addr 0.0.0.0:9003 +``` + +## Flags + +- `-triedir`: Directory for trie files (required) +- `-ledger-service-tcp`: TCP listen address (e.g., 0.0.0.0:9000). If provided, server accepts TCP connections. +- `-ledger-service-socket`: Unix socket path (e.g., /sockets/ledger.sock). If provided, server accepts Unix socket connections. Can specify multiple sockets separated by comma. +- **Note**: At least one of `-ledger-service-tcp` or `-ledger-service-socket` must be provided. +- `-admin-addr`: Address to bind on for admin HTTP server (e.g., 0.0.0.0:9003). If provided, enables admin commands. Use a different port than the execution node's admin server (default 9002). Optional. +- `-mtrie-cache-size`: MTrie cache size - number of tries (default: 500) +- `-checkpoint-distance`: Checkpoint distance (default: 100) +- `-checkpoints-to-keep`: Number of checkpoints to keep (default: 3) +- `-max-request-size`: Maximum request message size in bytes (default: 1 GiB) +- `-max-response-size`: Maximum response message size in bytes (default: 1 GiB) +- `-loglevel`: Log level (panic, fatal, error, warn, info, debug) (default: info) + +## Admin Commands + +When `-admin-addr` is provided, the service exposes an HTTP admin API for managing the ledger service. + +### Available Commands + +- `trigger-checkpoint`: Triggers a checkpoint to be created as soon as the current WAL segment file is finished writing. This is useful for manually creating checkpoints without waiting for the automatic checkpoint distance. +- `ping`: Simple health check command to verify the admin server is responsive. +- `list-commands`: Lists all available admin commands. + +**Examples:** +```bash +# Trigger a checkpoint +curl -X POST http://localhost:9003/admin/run_command \ + -H "Content-Type: application/json" \ + -d '{"commandName": "trigger-checkpoint", "data": {}}' + +# Ping the admin server +curl -X POST http://localhost:9003/admin/run_command \ + -H "Content-Type: application/json" \ + -d '{"commandName": "ping", "data": {}}' + +# List all available commands +curl -X POST http://localhost:9003/admin/run_command \ + -H "Content-Type: application/json" \ + -d '{"commandName": "list-commands", "data": {}}' +``` + +**Note:** When running an execution node with a remote ledger service (using `--ledger-service-addr`), the `trigger-checkpoint` command on the execution node is disabled. You must use the ledger service's admin endpoint to trigger checkpoints. + +## API + +The service implements the `LedgerService` gRPC interface defined in `ledger/protobuf/ledger.proto`: + +- `InitialState()` - Returns the initial state of the ledger +- `HasState()` - Checks if a state exists +- `GetSingleValue()` - Gets a single value for a key +- `Get()` - Gets multiple values for keys +- `Set()` - Updates keys with new values +- `Prove()` - Generates proofs for keys + diff --git a/cmd/ledger/admin.go b/cmd/ledger/admin.go new file mode 100644 index 00000000000..607d9968b7a --- /dev/null +++ b/cmd/ledger/admin.go @@ -0,0 +1,96 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/rs/zerolog" + "go.uber.org/atomic" +) + +// adminRequest represents the JSON request body for admin commands. +// This matches the format used by the execution node's admin framework. +type adminRequest struct { + CommandName string `json:"commandName"` + Data json.RawMessage `json:"data,omitempty"` +} + +// adminResponse represents the JSON response for admin commands. +// This matches the format used by the execution node's admin framework. +type adminResponse struct { + Output any `json:"output,omitempty"` + Error string `json:"error,omitempty"` +} + +// adminHandler is a simple HTTP-only admin server for the ledger service. +// Unlike the execution node's admin framework (which uses gRPC + HTTP gateway), +// this directly handles HTTP requests without the gRPC proxy layer. +type adminHandler struct { + logger zerolog.Logger + triggerCheckpoint *atomic.Bool + commands []string +} + +// newAdminHandler creates a new admin HTTP handler. +func newAdminHandler(logger zerolog.Logger, triggerCheckpoint *atomic.Bool) http.Handler { + h := &adminHandler{ + logger: logger.With().Str("component", "admin").Logger(), + triggerCheckpoint: triggerCheckpoint, + commands: []string{"ping", "list-commands", "trigger-checkpoint"}, + } + + mux := http.NewServeMux() + mux.HandleFunc("/admin/run_command", h.handleCommand) + return mux +} + +func (h *adminHandler) handleCommand(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + if r.Method != http.MethodPost { + h.writeError(w, http.StatusMethodNotAllowed, "method not allowed, use POST") + return + } + + var req adminRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, http.StatusBadRequest, fmt.Sprintf("invalid JSON: %v", err)) + return + } + + h.logger.Info().Str("command", req.CommandName).Msg("received admin command") + + var result any + + switch req.CommandName { + case "ping": + result = "pong" + + case "list-commands": + result = h.commands + + case "trigger-checkpoint": + if h.triggerCheckpoint.CompareAndSwap(false, true) { + h.logger.Info().Msg("trigger checkpoint as soon as finishing writing the current segment file") + result = "ok" + } else { + result = "checkpoint already triggered" + } + + default: + h.writeError(w, http.StatusBadRequest, fmt.Sprintf("unknown command: %s", req.CommandName)) + return + } + + h.writeSuccess(w, result) +} + +func (h *adminHandler) writeError(w http.ResponseWriter, status int, msg string) { + w.WriteHeader(status) + _ = json.NewEncoder(w).Encode(adminResponse{Error: msg}) +} + +func (h *adminHandler) writeSuccess(w http.ResponseWriter, output any) { + _ = json.NewEncoder(w).Encode(adminResponse{Output: output}) +} diff --git a/cmd/ledger/main.go b/cmd/ledger/main.go new file mode 100644 index 00000000000..eb54d735317 --- /dev/null +++ b/cmd/ledger/main.go @@ -0,0 +1,306 @@ +package main + +import ( + "context" + "flag" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" + "go.uber.org/atomic" + "google.golang.org/grpc" + + ledgerfactory "github.com/onflow/flow-go/ledger/factory" + ledgerpb "github.com/onflow/flow-go/ledger/protobuf" + "github.com/onflow/flow-go/ledger/remote" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" +) + +var ( + triedir = flag.String("triedir", "", "Directory for trie files (required)") + ledgerServiceTCP = flag.String("ledger-service-tcp", "", "Ledger service TCP listen address (e.g., 0.0.0.0:9000). If provided, server accepts TCP connections.") + ledgerServiceSocket = flag.String("ledger-service-socket", "", "Ledger service Unix socket path (e.g., /sockets/ledger.sock). If provided, server accepts Unix socket connections. Can specify multiple sockets separated by comma.") + adminAddr = flag.String("admin-addr", "", "Address to bind on for admin HTTP server (e.g., 0.0.0.0:9003). If provided, enables admin commands. Use a different port than the execution node's admin server (default 9002).") + metricsPort = flag.Uint("metrics-port", 0, "Port for Prometheus metrics server (e.g., 8080). If 0, metrics server is disabled.") + mtrieCacheSize = flag.Int("mtrie-cache-size", 500, "MTrie cache size (number of tries)") + checkpointDist = flag.Uint("checkpoint-distance", 100, "Checkpoint distance") + checkpointsToKeep = flag.Uint("checkpoints-to-keep", 3, "Number of checkpoints to keep") + logLevel = flag.String("loglevel", "info", "Log level (panic, fatal, error, warn, info, debug)") + maxRequestSize = flag.Uint("max-request-size", 1<<30, "Maximum request message size in bytes (default: 1 GiB)") + maxResponseSize = flag.Uint("max-response-size", 1<<30, "Maximum response message size in bytes (default: 1 GiB)") +) + +func main() { + flag.Parse() + + if *triedir == "" { + fmt.Fprintf(os.Stderr, "error: --triedir is required\n") + os.Exit(1) + } + + // Parse and set log level + lvl, err := zerolog.ParseLevel(strings.ToLower(*logLevel)) + if err != nil { + fmt.Fprintf(os.Stderr, "error: invalid log level %q: %v\n", *logLevel, err) + os.Exit(1) + } + zerolog.SetGlobalLevel(lvl) + + logger := zerolog.New(os.Stderr).With(). + Timestamp(). + Str("service", "ledger"). + Logger() + + // Validate that at least one address is provided + if *ledgerServiceTCP == "" && *ledgerServiceSocket == "" { + logger.Fatal().Msg("at least one of --ledger-service-tcp or --ledger-service-socket must be provided") + } + + logger.Info(). + Str("triedir", *triedir). + Str("ledger_service_tcp", *ledgerServiceTCP). + Str("ledger_service_socket", *ledgerServiceSocket). + Str("admin_addr", *adminAddr). + Uint("metrics_port", *metricsPort). + Int("mtrie_cache_size", *mtrieCacheSize). + Msg("starting ledger service") + + // Create trigger for manual checkpointing (used by admin command) + triggerCheckpointOnNextSegmentFinish := atomic.NewBool(false) + + // Create ledger using factory + metricsCollector := metrics.NewLedgerCollector("ledger", "wal") + ledgerStorage, err := ledgerfactory.NewLedger(ledgerfactory.Config{ + Triedir: *triedir, + MTrieCacheSize: uint32(*mtrieCacheSize), + CheckpointDistance: *checkpointDist, + CheckpointsToKeep: *checkpointsToKeep, + MetricsRegisterer: prometheus.DefaultRegisterer, + WALMetrics: metricsCollector, + LedgerMetrics: metricsCollector, + Logger: logger, + }, triggerCheckpointOnNextSegmentFinish) + if err != nil { + logger.Fatal().Err(err).Msg("failed to create ledger") + } + + // Wait for ledger to be ready (WAL replay) + logger.Info().Msg("waiting for ledger initialization...") + <-ledgerStorage.Ready() + logger.Info().Msg("ledger ready") + + // Check if any trie is loaded after startup + stateCount := ledgerStorage.StateCount() + if stateCount == 0 { + logger.Fatal().Msg("no trie loaded after startup - no states available") + } + + // Get the last trie state for logging + lastState, err := ledgerStorage.StateByIndex(-1) + if err != nil { + logger.Fatal().Err(err).Msg("failed to get last state for logging") + } + logger.Info(). + Int("state_count", stateCount). + Str("last_state", lastState.String()). + Msg("ledger health check passed") + + // Create gRPC server with max message size configuration. + // Default to 1 GiB for responses (instead of standard 4 MiB) to handle large proofs that can exceed 4MB. + // This was increased to fix "grpc: received message larger than max" errors when generating + // proofs for blocks with many state changes. + grpcServer := grpc.NewServer( + grpc.MaxRecvMsgSize(int(*maxRequestSize)), + grpc.MaxSendMsgSize(int(*maxResponseSize)), + ) + + // Create and register ledger service + ledgerService := remote.NewService(ledgerStorage, logger) + ledgerpb.RegisterLedgerServiceServer(grpcServer, ledgerService) + + // Create listeners based on provided flags + type listenerInfo struct { + listener net.Listener + address string + socketPath string + isUnixSocket bool + } + var listeners []listenerInfo + var socketPaths []string + + // Create TCP listener if TCP address is provided + if *ledgerServiceTCP != "" { + lis, err := net.Listen("tcp", *ledgerServiceTCP) + if err != nil { + logger.Fatal().Err(err).Str("address", *ledgerServiceTCP).Msg("failed to listen on TCP") + } + + logger.Info().Str("address", *ledgerServiceTCP).Msg("gRPC server listening on TCP") + listeners = append(listeners, listenerInfo{ + listener: lis, + address: *ledgerServiceTCP, + socketPath: "", + isUnixSocket: false, + }) + } + + // Create Unix socket listeners if socket path(s) are provided + if *ledgerServiceSocket != "" { + // Support multiple socket paths separated by comma + socketPathsList := strings.Split(*ledgerServiceSocket, ",") + for _, socketPath := range socketPathsList { + socketPath = strings.TrimSpace(socketPath) + if socketPath == "" { + continue + } + + // Ensure the socket directory exists + socketDir := filepath.Dir(socketPath) + if socketDir != "" && socketDir != "." { + if err := os.MkdirAll(socketDir, 0755); err != nil { + logger.Fatal().Err(err).Str("socket_dir", socketDir).Msg("failed to create socket directory") + } + } + + // Clean up any existing socket file + if _, err := os.Stat(socketPath); err == nil { + logger.Info().Str("socket_path", socketPath).Msg("removing existing socket file") + if err := os.Remove(socketPath); err != nil { + logger.Warn().Err(err).Str("socket_path", socketPath).Msg("failed to remove existing socket file") + } + } + + lis, err := net.Listen("unix", socketPath) + if err != nil { + logger.Fatal().Err(err).Str("socket_path", socketPath).Msg("failed to listen on Unix socket") + } + + // Set socket file permissions (readable/writable by owner and group) + if err := os.Chmod(socketPath, 0660); err != nil { + logger.Warn().Err(err).Str("socket_path", socketPath).Msg("failed to set socket file permissions") + } + + logger.Info().Str("socket_path", socketPath).Msg("gRPC server listening on Unix domain socket") + socketPaths = append(socketPaths, socketPath) + listeners = append(listeners, listenerInfo{ + listener: lis, + address: socketPath, + socketPath: socketPath, + isUnixSocket: true, + }) + } + } + + // Set up metrics server if metrics port is provided + var metricsServer *metrics.Server + var metricsCancel context.CancelFunc + if *metricsPort > 0 { + metricsServer = metrics.NewServer(logger, *metricsPort) + + metricsCtx, cancel := context.WithCancel(context.Background()) + metricsCancel = cancel + + signalerCtx, errChan := irrecoverable.WithSignaler(metricsCtx) + go func() { + metricsServer.Start(signalerCtx) + select { + case err := <-errChan: + if err != nil { + logger.Error().Err(err).Msg("metrics server encountered irrecoverable error") + } + case <-metricsCtx.Done(): + } + }() + + <-metricsServer.Ready() + logger.Info().Uint("metrics_port", *metricsPort).Msg("metrics server started") + } + + // Set up simple HTTP admin server if admin address is provided + // This is a lightweight HTTP-only server (no gRPC proxy layer) + var adminServer *http.Server + if *adminAddr != "" { + adminHandler := newAdminHandler(logger, triggerCheckpointOnNextSegmentFinish) + adminServer = &http.Server{ + Addr: *adminAddr, + Handler: adminHandler, + } + + go func() { + logger.Info().Str("admin_addr", *adminAddr).Msg("starting admin HTTP server") + if err := adminServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + logger.Error().Err(err).Msg("admin HTTP server error") + } + }() + } + + // Start server on all listeners in separate goroutines + errCh := make(chan error, len(listeners)) + for _, info := range listeners { + info := info // capture loop variable + go func() { + if err := grpcServer.Serve(info.listener); err != nil { + errCh <- fmt.Errorf("gRPC server error on %s: %w", info.address, err) + } + }() + } + + // Wait for interrupt signal or error + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) + + select { + case sig := <-sigCh: + logger.Info().Str("signal", sig.String()).Msg("received signal, shutting down") + case err := <-errCh: + logger.Error().Err(err).Msg("server error") + } + + // Graceful shutdown + logger.Info().Msg("shutting down gRPC server...") + grpcServer.GracefulStop() + + // Clean up Unix socket files + for _, socketPath := range socketPaths { + if socketPath != "" { + if err := os.Remove(socketPath); err != nil { + logger.Warn().Err(err).Str("socket_path", socketPath).Msg("failed to remove socket file") + } else { + logger.Info().Str("socket_path", socketPath).Msg("removed socket file") + } + } + } + + // Shutdown admin server if it was started + if adminServer != nil { + logger.Info().Msg("shutting down admin server...") + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + if err := adminServer.Shutdown(shutdownCtx); err != nil { + logger.Warn().Err(err).Msg("admin server shutdown error") + } + logger.Info().Msg("admin server stopped") + } + + // Shutdown metrics server if it was started + if metricsServer != nil && metricsCancel != nil { + logger.Info().Msg("shutting down metrics server...") + metricsCancel() + <-metricsServer.Done() + logger.Info().Msg("metrics server stopped") + } + + logger.Info().Msg("waiting for ledger to stop...") + <-ledgerStorage.Done() + + logger.Info().Msg("ledger service stopped") +} diff --git a/fvm/errors/codes.go b/fvm/errors/codes.go index e73ee94dc2b..c5af9b56e34 100644 --- a/fvm/errors/codes.go +++ b/fvm/errors/codes.go @@ -70,7 +70,8 @@ const ( ErrCodeAccountAuthorizationError ErrorCode = 1055 ErrCodeOperationAuthorizationError ErrorCode = 1056 ErrCodeOperationNotSupportedError ErrorCode = 1057 - ErrCodeBlockHeightOutOfRangeError ErrorCode = 1058 + // Deprecated: No longer used. + ErrCodeBlockHeightOutOfRangeError ErrorCode = 1058 // execution errors 1100 - 1200 // Deprecated: No longer used. diff --git a/fvm/errors/execution.go b/fvm/errors/execution.go index ec2f1c3d3fd..eeb752d9692 100644 --- a/fvm/errors/execution.go +++ b/fvm/errors/execution.go @@ -248,17 +248,6 @@ func IsOperationNotSupportedError(err error) bool { return HasErrorCode(err, ErrCodeOperationNotSupportedError) } -func NewBlockHeightOutOfRangeError(height uint64) CodedError { - return NewCodedError( - ErrCodeBlockHeightOutOfRangeError, - "block height (%v) is out of queriable range", - height) -} - -func IsBlockHeightOutOfRangeError(err error) bool { - return HasErrorCode(err, ErrCodeBlockHeightOutOfRangeError) -} - // NewScriptExecutionCancelledError construct a new CodedError which indicates // that Cadence Script execution has been cancelled (e.g. request connection // has been droped) diff --git a/fvm/evm/impl/abi.go b/fvm/evm/impl/abi.go index f199255a714..1d034f84f1a 100644 --- a/fvm/evm/impl/abi.go +++ b/fvm/evm/impl/abi.go @@ -516,6 +516,7 @@ func gethABIType( } func goType( + context abiEncodingContext, staticType interpreter.StaticType, evmTypeIDs *evmSpecialTypeIDs, ) (reflect.Type, bool) { @@ -558,7 +559,7 @@ func goType( switch staticType := staticType.(type) { case *interpreter.ConstantSizedStaticType: - elementType, ok := goType(staticType.ElementType(), evmTypeIDs) + elementType, ok := goType(context, staticType.ElementType(), evmTypeIDs) if !ok { break } @@ -566,7 +567,7 @@ func goType( return reflect.ArrayOf(int(staticType.Size), elementType), true case *interpreter.VariableSizedStaticType: - elementType, ok := goType(staticType.ElementType(), evmTypeIDs) + elementType, ok := goType(context, staticType.ElementType(), evmTypeIDs) if !ok { break } @@ -585,6 +586,22 @@ func goType( return reflect.ArrayOf(stdlib.EVMBytes32Length, reflect.TypeOf(byte(0))), true } + gethABIType, ok := gethABIType( + context, + staticType, + evmTypeIDs, + ) + // All user-defined Cadence structs, are ABI encoded/decoded as Solidity tuples. + // Except for the structs defined in the EVM system contract: + // - `EVM.EVMAddress` + // - `EVM.EVMBytes` + // - `EVM.EVMBytes4` + // - `EVM.EVMBytes32` + // These have their own ABI encoding/decoding format. + if ok && gethABIType.T == gethABI.TupleTy { + return gethABIType.TupleType, true + } + return nil, false } @@ -793,7 +810,7 @@ func encodeABI( elementStaticType := arrayStaticType.ElementType() - elementGoType, ok := goType(elementStaticType, evmTypeIDs) + elementGoType, ok := goType(context, elementStaticType, evmTypeIDs) if !ok { break } @@ -810,6 +827,9 @@ func encodeABI( result = reflect.MakeSlice(reflect.SliceOf(elementGoType), size, size) } + semaType := interpreter.MustConvertStaticToSemaType(elementStaticType, context) + isTuple := asTupleEncodableCompositeType(semaType) != nil + var index int value.Iterate( context, @@ -825,7 +845,16 @@ func encodeABI( panic(err) } - result.Index(index).Set(reflect.ValueOf(arrayElement)) + if isTuple { + // For tuples, the underlying `arrayElement` is a value of + // type *struct { X,Y,Z fields }, so we need to indirect + // the pointer + result.Index(index).Set( + reflect.Indirect(reflect.ValueOf(arrayElement)), + ) + } else { + result.Index(index).Set(reflect.ValueOf(arrayElement)) + } index++ diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index 19329f53d60..6e03e8a2940 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -1289,6 +1289,74 @@ func TestEVMEncodeABIBytesRoundtrip(t *testing.T) { assert.Equal(t, uint64(64), gauge.TotalComputationUsed()) }) + + t.Run("ABI encode array of structs into tuple Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + struct S { + access(all) let x: UInt8 + access(all) let y: Int16 + + init(x: UInt8, y: Int16) { + self.x = x + self.y = y + } + + access(all) fun toString(): String { + return "S(x: \(self.x), y: \(self.y))" + } + } + + access(all) + fun main() { + let s1 = S(x: 4, y: 2) + let s2 = S(x: 5, y: 9) + let structArray = [s1, s2] + let encodedData = EVM.encodeABI([structArray]) + assert(encodedData == [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x20, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x4, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x5, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x9 + ], message: String.encodeHex(encodedData)) + + let values = EVM.decodeABI(types: [Type<[S]>()], data: encodedData) + assert(values.length == 1) + let decodedStructArray = values[0] as! [S] + assert(decodedStructArray.length == 2) + + assert(decodedStructArray[0].x == 4) + assert(decodedStructArray[0].y == 2) + assert(decodedStructArray[1].x == 5) + assert(decodedStructArray[1].y == 9) + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, uint64(192), gauge.TotalComputationUsed()) + }) } func TestEVMEncodeABIComputation(t *testing.T) { diff --git a/go.mod b/go.mod index c426d519cd7..8e62b195b8c 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/onflow/flow v0.4.15 github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.2 github.com/onflow/flow-core-contracts/lib/go/templates v1.9.2 - github.com/onflow/flow-go-sdk v1.9.13 + github.com/onflow/flow-go-sdk v1.9.14 github.com/onflow/flow/protobuf/go/flow v0.4.19 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 @@ -68,20 +68,20 @@ require ( github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.11.1 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel v1.39.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/sdk v1.38.0 - go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/otel/sdk v1.39.0 + go.opentelemetry.io/otel/trace v1.39.0 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 - golang.org/x/crypto v0.46.0 + golang.org/x/crypto v0.47.0 golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 golang.org/x/sync v0.19.0 - golang.org/x/sys v0.39.0 - golang.org/x/text v0.32.0 + golang.org/x/sys v0.40.0 + golang.org/x/text v0.33.0 golang.org/x/time v0.14.0 - golang.org/x/tools v0.39.0 - google.golang.org/api v0.259.0 + golang.org/x/tools v0.40.0 + google.golang.org/api v0.264.0 google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 google.golang.org/grpc v1.78.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 @@ -115,7 +115,7 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 - google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b + google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d gopkg.in/yaml.v2 v2.4.0 ) @@ -127,13 +127,13 @@ require ( github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect github.com/ferranbt/fastssz v0.1.4 // indirect - golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect ) require ( cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/auth v0.18.0 // indirect + cloud.google.com/go/auth v0.18.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/iam v1.5.3 // indirect cloud.google.com/go/monitoring v1.24.3 // indirect @@ -210,11 +210,11 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/flock v0.12.1 // indirect + github.com/gofrs/flock v0.12.1 github.com/golang/glog v1.2.5 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/googleapis/gax-go/v2 v2.16.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect @@ -342,20 +342,20 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/mock v0.5.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/term v0.38.0 // indirect + golang.org/x/term v0.39.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.4.1 // indirect diff --git a/go.sum b/go.sum index ed309de09c2..1d2720ca7cd 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,8 @@ cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2Z cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= -cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= +cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= +cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -580,8 +580,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= +github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -960,8 +960,8 @@ github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3 github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= -github.com/onflow/flow-go-sdk v1.9.13 h1:HdWhsheDkaUokC6+7eefP+v6cMKfN3/yU4O8ddC1YGc= -github.com/onflow/flow-go-sdk v1.9.13/go.mod h1:e5zVNLkpzYxVbusPUMvtrbsinwCyr1krPvxMD6dhW6M= +github.com/onflow/flow-go-sdk v1.9.14 h1:YIBb8XDt5ZW/oUKLBvMLcV/UEjJ8ez0FSvwhiMKSMtk= +github.com/onflow/flow-go-sdk v1.9.14/go.mod h1:Rn5UfGAwzme+OqPy54m0Q3pP0su19rBiQXT7PftoUOI= github.com/onflow/flow-nft/lib/go/contracts v1.3.0 h1:DmNop+O0EMyicZvhgdWboFG57xz5t9Qp81FKlfKyqJc= github.com/onflow/flow-nft/lib/go/contracts v1.3.0/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= github.com/onflow/flow-nft/lib/go/templates v1.3.0 h1:uGIBy4GEY6Z9hKP7sm5nA5kwvbvLWW4nWx5NN9Wg0II= @@ -1347,8 +1347,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= @@ -1357,14 +1357,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1x go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= @@ -1421,8 +1421,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1464,8 +1464,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1531,8 +1531,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1677,10 +1677,10 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1688,8 +1688,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1704,8 +1704,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1776,8 +1776,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1828,8 +1828,8 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.259.0 h1:90TaGVIxScrh1Vn/XI2426kRpBqHwWIzVBzJsVZ5XrQ= -google.golang.org/api v0.259.0/go.mod h1:LC2ISWGWbRoyQVpxGntWwLWN/vLNxxKBK9KuJRI8Te4= +google.golang.org/api v0.264.0 h1:+Fo3DQXBK8gLdf8rFZ3uLu39JpOnhvzJrLMQSoSYZJM= +google.golang.org/api v0.264.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1929,10 +1929,10 @@ google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b h1:pcwUBl8sRRgljKGbSYn4Riy/iVzEiuNBRZnDyrBSHVE= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Tej9lWiwVvQJP+b43pjJIsr/3mZycXWCIyoiXmbFf40= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d h1:Q9v92SXbvCsk89QPHVik5fAtq93/x/R8/KNWeS3numk= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:Tej9lWiwVvQJP+b43pjJIsr/3mZycXWCIyoiXmbFf40= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/insecure/go.mod b/insecure/go.mod index e99710020aa..4d981c1eb64 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -23,7 +23,7 @@ require ( require ( cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/auth v0.18.0 // indirect + cloud.google.com/go/auth v0.18.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.5.3 // indirect @@ -131,7 +131,7 @@ require ( github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/googleapis/gax-go/v2 v2.16.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect @@ -223,7 +223,7 @@ require ( github.com/onflow/flow-evm-bridge v0.1.0 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect - github.com/onflow/flow-go-sdk v1.9.13 // indirect + github.com/onflow/flow-go-sdk v1.9.14 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.3.0 // indirect github.com/onflow/flow-nft/lib/go/templates v1.3.0 // indirect github.com/onflow/flow/protobuf/go/flow v0.4.19 // indirect @@ -305,38 +305,38 @@ require ( go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.46.0 // indirect + golang.org/x/crypto v0.47.0 // indirect golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/tools v0.40.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/api v0.259.0 // indirect + google.golang.org/api v0.264.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/insecure/go.sum b/insecure/go.sum index 924583b8167..4fa6d97b0af 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -23,8 +23,8 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= -cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= +cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= +cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -540,8 +540,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= +github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -908,8 +908,8 @@ github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3 github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= -github.com/onflow/flow-go-sdk v1.9.13 h1:HdWhsheDkaUokC6+7eefP+v6cMKfN3/yU4O8ddC1YGc= -github.com/onflow/flow-go-sdk v1.9.13/go.mod h1:e5zVNLkpzYxVbusPUMvtrbsinwCyr1krPvxMD6dhW6M= +github.com/onflow/flow-go-sdk v1.9.14 h1:YIBb8XDt5ZW/oUKLBvMLcV/UEjJ8ez0FSvwhiMKSMtk= +github.com/onflow/flow-go-sdk v1.9.14/go.mod h1:Rn5UfGAwzme+OqPy54m0Q3pP0su19rBiQXT7PftoUOI= github.com/onflow/flow-nft/lib/go/contracts v1.3.0 h1:DmNop+O0EMyicZvhgdWboFG57xz5t9Qp81FKlfKyqJc= github.com/onflow/flow-nft/lib/go/contracts v1.3.0/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= github.com/onflow/flow-nft/lib/go/templates v1.3.0 h1:uGIBy4GEY6Z9hKP7sm5nA5kwvbvLWW4nWx5NN9Wg0II= @@ -1290,22 +1290,22 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1361,8 +1361,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1403,8 +1403,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1463,8 +1463,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1579,10 +1579,10 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1590,8 +1590,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1605,8 +1605,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1673,8 +1673,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1706,8 +1706,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.259.0 h1:90TaGVIxScrh1Vn/XI2426kRpBqHwWIzVBzJsVZ5XrQ= -google.golang.org/api v0.259.0/go.mod h1:LC2ISWGWbRoyQVpxGntWwLWN/vLNxxKBK9KuJRI8Te4= +google.golang.org/api v0.264.0 h1:+Fo3DQXBK8gLdf8rFZ3uLu39JpOnhvzJrLMQSoSYZJM= +google.golang.org/api v0.264.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1766,10 +1766,10 @@ google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b h1:pcwUBl8sRRgljKGbSYn4Riy/iVzEiuNBRZnDyrBSHVE= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Tej9lWiwVvQJP+b43pjJIsr/3mZycXWCIyoiXmbFf40= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d h1:Q9v92SXbvCsk89QPHVik5fAtq93/x/R8/KNWeS3numk= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:Tej9lWiwVvQJP+b43pjJIsr/3mZycXWCIyoiXmbFf40= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/integration/go.mod b/integration/go.mod index 28433e2b82e..334d4f671b0 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -10,7 +10,7 @@ require ( github.com/cockroachdb/pebble/v2 v2.0.6 github.com/coreos/go-semver v0.3.0 github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724 - github.com/docker/docker v25.0.6+incompatible + github.com/docker/docker v24.0.6+incompatible github.com/docker/go-connections v0.4.0 github.com/ethereum/go-ethereum v1.16.8 github.com/go-git/go-git/v5 v5.11.0 @@ -27,7 +27,7 @@ require ( github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.2 github.com/onflow/flow-core-contracts/lib/go/templates v1.9.2 github.com/onflow/flow-go v0.38.0-preview.0.0.20241021221952-af9cd6e99de1 - github.com/onflow/flow-go-sdk v1.9.13 + github.com/onflow/flow-go-sdk v1.9.14 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 github.com/onflow/flow/protobuf/go/flow v0.4.19 github.com/prometheus/client_golang v1.20.5 @@ -49,7 +49,7 @@ require ( require ( cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/auth v0.18.0 // indirect + cloud.google.com/go/auth v0.18.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.5.3 // indirect @@ -104,14 +104,12 @@ require ( github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/containerd v1.7.30 // indirect github.com/containerd/fifo v1.1.0 // indirect - github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/cyphar/filepath-securejoin v0.5.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect @@ -119,8 +117,9 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/distribution/reference v0.6.0 // indirect + github.com/distribution/reference v0.5.0 // indirect github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -173,7 +172,7 @@ require ( github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/googleapis/gax-go/v2 v2.16.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect @@ -244,7 +243,6 @@ require ( github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect @@ -273,7 +271,7 @@ require ( github.com/onflow/wal v1.0.2 // indirect github.com/onsi/ginkgo/v2 v2.22.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -311,6 +309,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/rootless-containers/rootlesskit v1.1.1 // indirect github.com/schollz/progressbar/v3 v3.18.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/sethvargo/go-retry v0.2.3 // indirect @@ -351,35 +350,35 @@ require ( go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.46.0 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/tools v0.40.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/api v0.259.0 // indirect + google.golang.org/api v0.264.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/integration/go.sum b/integration/go.sum index 633299770c2..4aff08fd404 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -6,8 +6,8 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= -cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= +cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= +cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.72.0 h1:D/yLju+3Ens2IXx7ou1DJ62juBm+/coBInn4VVOg5Cw= @@ -193,12 +193,8 @@ github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.30 h1:/2vezDpLDVGGmkUXmlNPLCCNKHJ5BbC5tJB5JNzQhqE= -github.com/containerd/containerd v1.7.30/go.mod h1:fek494vwJClULlTpExsmOyKCMUAbuVjlFsJQc4/j44M= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -226,8 +222,8 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/cyphar/filepath-securejoin v0.5.1 h1:eYgfMq5yryL4fbWfkLpFFy2ukSELzaJOTaUTuh+oF48= -github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724 h1:zOOpPLu5VvH8ixyoDWHnQHWoEHtryT1ne31vwz0G7Fo= github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724/go.mod h1:U0cEcbf9hAwPSuuoPVqXKhcWV+IU4CStK75cJ52f2/A= @@ -253,15 +249,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= -github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -474,8 +469,8 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= +github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= @@ -711,8 +706,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= -github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= @@ -791,8 +784,8 @@ github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3 github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= -github.com/onflow/flow-go-sdk v1.9.13 h1:HdWhsheDkaUokC6+7eefP+v6cMKfN3/yU4O8ddC1YGc= -github.com/onflow/flow-go-sdk v1.9.13/go.mod h1:e5zVNLkpzYxVbusPUMvtrbsinwCyr1krPvxMD6dhW6M= +github.com/onflow/flow-go-sdk v1.9.14 h1:YIBb8XDt5ZW/oUKLBvMLcV/UEjJ8ez0FSvwhiMKSMtk= +github.com/onflow/flow-go-sdk v1.9.14/go.mod h1:Rn5UfGAwzme+OqPy54m0Q3pP0su19rBiQXT7PftoUOI= github.com/onflow/flow-nft/lib/go/contracts v1.3.0 h1:DmNop+O0EMyicZvhgdWboFG57xz5t9Qp81FKlfKyqJc= github.com/onflow/flow-nft/lib/go/contracts v1.3.0/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= github.com/onflow/flow-nft/lib/go/templates v1.3.0 h1:uGIBy4GEY6Z9hKP7sm5nA5kwvbvLWW4nWx5NN9Wg0II= @@ -822,9 +815,8 @@ github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -954,6 +946,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rootless-containers/rootlesskit v1.1.1 h1:F5psKWoWY9/VjZ3ifVcaosjvFZJOagX85U22M0/EQZE= github.com/rootless-containers/rootlesskit v1.1.1/go.mod h1:UD5GoA3dqKCJrnvnhVgQQnweMF2qZnf9KLw8EewcMZI= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= @@ -1140,24 +1133,22 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1206,8 +1197,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= @@ -1225,8 +1216,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1268,8 +1259,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1349,10 +1340,10 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1363,8 +1354,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1378,8 +1369,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1409,8 +1400,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1422,8 +1413,8 @@ gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.259.0 h1:90TaGVIxScrh1Vn/XI2426kRpBqHwWIzVBzJsVZ5XrQ= -google.golang.org/api v0.259.0/go.mod h1:LC2ISWGWbRoyQVpxGntWwLWN/vLNxxKBK9KuJRI8Te4= +google.golang.org/api v0.264.0 h1:+Fo3DQXBK8gLdf8rFZ3uLu39JpOnhvzJrLMQSoSYZJM= +google.golang.org/api v0.264.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1442,10 +1433,10 @@ google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b h1:pcwUBl8sRRgljKGbSYn4Riy/iVzEiuNBRZnDyrBSHVE= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Tej9lWiwVvQJP+b43pjJIsr/3mZycXWCIyoiXmbFf40= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d h1:Q9v92SXbvCsk89QPHVik5fAtq93/x/R8/KNWeS3numk= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:Tej9lWiwVvQJP+b43pjJIsr/3mZycXWCIyoiXmbFf40= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/integration/localnet/Makefile b/integration/localnet/Makefile index 12a06fa29ab..4a2bb2a4413 100644 --- a/integration/localnet/Makefile +++ b/integration/localnet/Makefile @@ -3,6 +3,8 @@ CONSENSUS = 2 VALID_CONSENSUS := $(shell test $(CONSENSUS) -ge 2; echo $$?) EXECUTION = 2 VALID_EXECUTION := $(shell test $(EXECUTION) -ge 2; echo $$?) +LEDGER_EXECUTION = 0 +VALID_LEDGER_EXECUTION := $(shell test $(LEDGER_EXECUTION) -le $(EXECUTION); echo $$?) TEST_EXECUTION = 0 VERIFICATION = 1 ACCESS = 1 @@ -49,6 +51,8 @@ ifeq ($(strip $(VALID_EXECUTION)), 1) $(error Number of Execution nodes should be no less than 2) else ifeq ($(strip $(VALID_CONSENSUS)), 1) $(error Number of Consensus nodes should be no less than 2) +else ifeq ($(strip $(VALID_LEDGER_EXECUTION)), 1) + $(error LEDGER_EXECUTION ($(LEDGER_EXECUTION)) should not be greater than EXECUTION ($(EXECUTION))) else go run \ -ldflags="-X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' \ @@ -74,7 +78,8 @@ else -tracing=$(TRACING) \ -extensive-tracing=$(EXTENSIVE_TRACING) \ -consensus-delay=$(CONSENSUS_DELAY) \ - -collection-delay=$(COLLECTION_DELAY) + -collection-delay=$(COLLECTION_DELAY) \ + -ledger-execution=$(LEDGER_EXECUTION) endif # Creates a light version of the localnet with just 1 instance for each node type @@ -130,6 +135,10 @@ build-flow: stop: DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.metrics.yml -f docker-compose.nodes.yml down -v --remove-orphans +.PHONY: stop-flow +stop-flow: + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml down -v + .PHONY: load load: go run ../benchmark/cmd/manual -log-level info -tps 1,1,1 -tps-durations 30s,30s @@ -152,6 +161,7 @@ clean-data: rm -rf ./bootstrap rm -rf ./trie rm -rf ./profiler + rm -rf ./sockets rm -f ./targets.nodes.json rm -f ./docker-compose.nodes.yml rm -f ./ports.nodes.json diff --git a/integration/localnet/builder/bootstrap.go b/integration/localnet/builder/bootstrap.go index 539eae6a892..77b7c7e3664 100644 --- a/integration/localnet/builder/bootstrap.go +++ b/integration/localnet/builder/bootstrap.go @@ -17,6 +17,8 @@ import ( "github.com/go-yaml/yaml" "github.com/onflow/flow-go/cmd/build" + "github.com/onflow/flow-go/ledger/complete/wal" + bootstrapFilenames "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/protocol_state" @@ -30,6 +32,7 @@ const ( ProfilerDir = "./profiler" DataDir = "./data" TrieDir = "./trie" + SocketDir = "./sockets" DockerComposeFile = "./docker-compose.nodes.yml" DockerComposeFileVersion = "3.7" PrometheusTargetsFile = "./targets.nodes.json" @@ -62,6 +65,7 @@ var ( accessCount int observerCount int testExecutionCount int + ledgerExecutionCount int nClusters uint numViewsInStakingPhase uint64 numViewsInDKGPhase uint64 @@ -104,6 +108,7 @@ func init() { flag.DurationVar(&consensusDelay, "consensus-delay", DefaultConsensusDelay, "delay on consensus node block proposals") flag.DurationVar(&collectionDelay, "collection-delay", DefaultCollectionDelay, "delay on collection node block proposals") flag.StringVar(&logLevel, "loglevel", DefaultLogLevel, "log level for all nodes") + flag.IntVar(&ledgerExecutionCount, "ledger-execution", 0, "number of execution nodes that use remote ledger service (0 = all use local ledger, max = execution count)") } func generateBootstrapData(flowNetworkConf testnet.NetworkConfig) []testnet.ContainerConfig { @@ -176,6 +181,14 @@ func main() { flowNetworkConf := testnet.NewNetworkConfig("localnet", flowNodes, flowNetworkOpts...) displayFlowNetworkConf(flowNetworkConf) + // Validate ledger execution count + if ledgerExecutionCount < 0 { + panic(fmt.Sprintf("ledger-execution must be >= 0, got %d", ledgerExecutionCount)) + } + if ledgerExecutionCount > executionCount { + panic(fmt.Sprintf("ledger-execution (%d) must not be greater than execution count (%d)", ledgerExecutionCount, executionCount)) + } + // Generate the Flow network bootstrap files for this localnet flowNodeContainerConfigs := generateBootstrapData(flowNetworkConf) @@ -188,6 +201,10 @@ func main() { panic(err) } + // Only create ledger service if at least one execution node uses remote ledger + if ledgerExecutionCount > 0 { + dockerServices = prepareLedgerService(dockerServices, flowNodeContainerConfigs) + } dockerServices = prepareObserverServices(dockerServices, flowNodeContainerConfigs) dockerServices = prepareTestExecutionService(dockerServices, flowNodeContainerConfigs) @@ -217,7 +234,7 @@ func displayFlowNetworkConf(flowNetworkConf testnet.NetworkConfig) { } func prepareCommonHostFolders() { - for _, dir := range []string{BootstrapDir, ProfilerDir, DataDir, TrieDir} { + for _, dir := range []string{BootstrapDir, ProfilerDir, DataDir, TrieDir, SocketDir} { if err := os.RemoveAll(dir); err != nil && !errors.Is(err, fs.ErrNotExist) { panic(err) } @@ -440,9 +457,30 @@ func prepareExecutionService(container testnet.ContainerConfig, i int, n int) Se "--scheduled-callbacks-enabled=true", ) - service.Volumes = append(service.Volumes, - fmt.Sprintf("%s:/trie:z", trieDir), - ) + // Configure ledger service: execution nodes with index < ledgerExecutionCount use remote ledger + if i < ledgerExecutionCount { + // This execution node uses remote ledger service via Unix socket; mount shared socket dir (absolute path) + absSocketDir, err := filepath.Abs(SocketDir) + if err != nil { + panic(fmt.Errorf("socket dir absolute path: %w", err)) + } + service.Volumes = append(service.Volumes, + fmt.Sprintf("%s:/sockets:z", absSocketDir), + ) + service.Command = append(service.Command, + "--ledger-service-addr=unix:///sockets/ledger.sock", + ) + // Execution node depends on ledger service + service.DependsOn = append(service.DependsOn, "ledger_service_1") + // Execution nodes using remote ledger should NOT mount the trie directory + // because the ledger service manages it + } else { + // Execution nodes with index >= ledgerExecutionCount use local ledger by default (no flag needed) + // These nodes need to mount the trie directory for their local ledger + service.Volumes = append(service.Volumes, + fmt.Sprintf("%s:/trie:z", trieDir), + ) + } service.AddExposedPorts(testnet.GRPCPort) @@ -670,18 +708,16 @@ func writePrometheusConfig(serviceDisc PrometheusServiceDiscovery) error { } func openAndTruncate(filename string) (*os.File, error) { - f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755) - if err != nil { - return nil, err - } - - // overwrite current file contents - err = f.Truncate(0) - if err != nil { - return nil, err + // Check if path exists and is a directory, remove it if so + if fi, err := os.Stat(filename); err == nil { + if fi.IsDir() { + if err := os.RemoveAll(filename); err != nil { + return nil, fmt.Errorf("failed to remove existing directory %s: %w", filename, err) + } + } } - _, err = f.Seek(0, 0) + f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return nil, err } @@ -762,6 +798,117 @@ func prepareObserverServices(dockerServices Services, flowNodeContainerConfigs [ return dockerServices } +func prepareLedgerService(dockerServices Services, flowNodeContainerConfigs []testnet.ContainerConfig) Services { + // Find the first execution node that uses remote ledger (index 0) + // The ledger service will reuse its trie directory + var firstExecutionNode *testnet.ContainerConfig + executionIndex := 0 + for _, container := range flowNodeContainerConfigs { + if container.Role == flow.RoleExecution { + if executionIndex < ledgerExecutionCount { + firstExecutionNode = &container + break + } + executionIndex++ + } + } + + if firstExecutionNode == nil { + panic("failed to find first execution node for ledger service") + } + + // Use the same trie directory as the first execution node + trieDir := "./" + filepath.Join(TrieDir, firstExecutionNode.Role.String(), firstExecutionNode.NodeID.String()) + + // Ensure trie directory exists for the ledger service + err := os.MkdirAll(trieDir, 0755) + if err != nil && !errors.Is(err, fs.ErrExist) { + panic(err) + } + + // Copy root checkpoint from bootstrap directory to trie directory on the host + // The symlinks will work inside containers because: + // 1. Execution node has both /bootstrap and /trie mounted + // 2. Ledger service has /trie mounted and can follow symlinks to /bootstrap (via execution node's mount) + // 3. We create symlinks using relative paths that work in both host and container contexts + bootstrapExecutionStateDir := filepath.Join(BootstrapDir, bootstrapFilenames.DirnameExecutionState) + checkpointSource := filepath.Join(bootstrapExecutionStateDir, bootstrapFilenames.FilenameWALRootCheckpoint) + if _, err := os.Stat(checkpointSource); err == nil { + // Checkpoint exists, create symlinks on host + // The symlinks will use relative paths that resolve correctly inside containers + // because both /bootstrap and /trie are mounted in the containers + _, err = wal.SoftlinkCheckpointFile(bootstrapFilenames.FilenameWALRootCheckpoint, bootstrapExecutionStateDir, trieDir) + if err != nil { + panic(fmt.Errorf("failed to create checkpoint symlinks: %w", err)) + } + fmt.Printf("created checkpoint symlinks in trie directory: %s\n", trieDir) + } else { + // Checkpoint doesn't exist, this is expected for fresh bootstrap + // The execution node will create it when it initializes + fmt.Printf("root checkpoint not found in %s, ledger service will start with empty state\n", checkpointSource) + } + + // Allocate ports for ledger service + ledgerServiceName := "ledger_service_1" + err = ports.AllocatePorts(ledgerServiceName, "ledger") + if err != nil { + panic(err) + } + + // Shared socket directory: use absolute path so Docker mounts the same host dir in all containers + absSocketDir, err := filepath.Abs(SocketDir) + if err != nil { + panic(fmt.Errorf("socket dir absolute path: %w", err)) + } + + // Create ledger service + // Use Unix domain socket; ledger and execution nodes share absSocketDir mounted at /sockets + service := Service{ + name: ledgerServiceName, + Image: "localnet-ledger", + Command: []string{ + "--triedir=/trie", + "--ledger-service-socket=/sockets/ledger.sock", + "--mtrie-cache-size=100", + "--checkpoint-distance=100", + "--checkpoints-to-keep=3", + fmt.Sprintf("--loglevel=%s", logLevel), + }, + Volumes: []string{ + fmt.Sprintf("%s:/trie:z", trieDir), + fmt.Sprintf("%s:/bootstrap:z", BootstrapDir), + fmt.Sprintf("%s:/sockets:z", absSocketDir), + }, + Environment: []string{ + fmt.Sprintf("GOMAXPROCS=%d", DefaultGOMAXPROCS), + }, + Labels: map[string]string{ + "org.flowfoundation.role": "ledger", + "org.flowfoundation.num": "001", + }, + } + + // Build configuration for ledger service + service.Build = Build{ + Context: "../../", + Dockerfile: "cmd/Dockerfile", + Args: map[string]string{ + "TARGET": "./cmd/ledger", + "VERSION": build.Version(), + "COMMIT": build.Commit(), + "GOARCH": runtime.GOARCH, + }, + Target: "production", + } + + dockerServices[ledgerServiceName] = service + + fmt.Println() + fmt.Println("Ledger service bootstrapping data generated...") + + return dockerServices +} + func prepareTestExecutionService(dockerServices Services, flowNodeContainerConfigs []testnet.ContainerConfig) Services { if testExecutionCount == 0 { return dockerServices diff --git a/integration/localnet/builder/ports.go b/integration/localnet/builder/ports.go index f3b8b581004..1f5f2fc02be 100644 --- a/integration/localnet/builder/ports.go +++ b/integration/localnet/builder/ports.go @@ -53,6 +53,11 @@ var config = map[string]*portConfig{ end: 8000, portCount: 2, }, + "ledger": { + start: 8000, // 8000-8100 => 50 ledger services + end: 8100, + portCount: 2, + }, } // PortAllocator is responsible for allocating and tracking container-to-host port mappings for each node diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go index 7fcae7a2a7b..842d8590df8 100644 --- a/integration/tests/epochs/base_suite.go +++ b/integration/tests/epochs/base_suite.go @@ -4,7 +4,7 @@ // and resource-heavy, we split them into several cohorts, which can be run in parallel. // // If a new cohort is added in the future, it must be added to: -// - ci.yml, flaky-test-monitor.yml, bors.toml (ensure new cohort of tests is run) +// - ci.yml, flaky-test-monitor.yml (ensure new cohort of tests is run) // - Makefile (include new cohort in integration-test directive, etc.) package epochs diff --git a/integration/tests/epochs/dynamic_epoch_transition_suite.go b/integration/tests/epochs/dynamic_epoch_transition_suite.go index 4791ca51bd8..d49b4f47a17 100644 --- a/integration/tests/epochs/dynamic_epoch_transition_suite.go +++ b/integration/tests/epochs/dynamic_epoch_transition_suite.go @@ -4,7 +4,7 @@ // and resource-heavy, we split them into several cohorts, which can be run in parallel. // // If a new cohort is added in the future, it must be added to: -// - ci.yml, flaky-test-monitor.yml, bors.toml (ensure new cohort of tests is run) +// - ci.yml, flaky-test-monitor.yml (ensure new cohort of tests is run) // - Makefile (include new cohort in integration-test directive, etc.) package epochs diff --git a/ledger/common/pathfinder/pathfinder.go b/ledger/common/pathfinder/pathfinder.go index 7849cf28256..9bcae15a1d7 100644 --- a/ledger/common/pathfinder/pathfinder.go +++ b/ledger/common/pathfinder/pathfinder.go @@ -114,7 +114,7 @@ func PathsFromPayloads(payloads []*ledger.Payload, version uint8) ([]ledger.Path return paths, nil } -// UpdateToPayloads constructs an slice of payloads given ledger update +// UpdateToPayloads constructs an slice of payloads given ledger update. func UpdateToPayloads(update *ledger.Update) ([]*ledger.Payload, error) { keys := update.Keys() values := update.Values() diff --git a/ledger/complete/factory.go b/ledger/complete/factory.go new file mode 100644 index 00000000000..2152a1143f2 --- /dev/null +++ b/ledger/complete/factory.go @@ -0,0 +1,59 @@ +package complete + +import ( + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/module" +) + +// LocalLedgerFactory creates in-process ledger instances with compactor. +type LocalLedgerFactory struct { + wal wal.LedgerWAL + capacity int + compactorConfig *ledger.CompactorConfig + triggerCheckpoint *atomic.Bool + metrics module.LedgerMetrics + logger zerolog.Logger + pathFinderVersion uint8 +} + +// NewLocalLedgerFactory creates a new factory for local ledger instances. +// triggerCheckpoint is a runtime control signal to trigger checkpoint on next segment finish. +func NewLocalLedgerFactory( + ledgerWAL wal.LedgerWAL, + capacity int, + compactorConfig *ledger.CompactorConfig, + triggerCheckpoint *atomic.Bool, + metrics module.LedgerMetrics, + logger zerolog.Logger, + pathFinderVersion uint8, +) ledger.Factory { + return &LocalLedgerFactory{ + wal: ledgerWAL, + capacity: capacity, + compactorConfig: compactorConfig, + triggerCheckpoint: triggerCheckpoint, + metrics: metrics, + logger: logger, + pathFinderVersion: pathFinderVersion, + } +} + +func (f *LocalLedgerFactory) NewLedger() (ledger.Ledger, error) { + ledgerWithCompactor, err := NewLedgerWithCompactor( + f.wal, + f.capacity, + f.compactorConfig, + f.triggerCheckpoint, + f.metrics, + f.logger, + f.pathFinderVersion, + ) + if err != nil { + return nil, err + } + return ledgerWithCompactor, nil +} diff --git a/ledger/complete/ledger.go b/ledger/complete/ledger.go index 82ff8e7f477..6ceb65c7dd5 100644 --- a/ledger/complete/ledger.go +++ b/ledger/complete/ledger.go @@ -3,6 +3,7 @@ package complete import ( "fmt" "io" + "sync" "time" "github.com/rs/zerolog" @@ -40,6 +41,7 @@ type Ledger struct { metrics module.LedgerMetrics logger zerolog.Logger trieUpdateCh chan *WALTrieUpdate + closeTrieUpdateCh sync.Once pathFinderVersion uint8 } @@ -110,7 +112,9 @@ func (l *Ledger) Done() <-chan struct{} { // Ledger is responsible for closing trieUpdateCh channel, // so Compactor can drain and process remaining updates. - close(l.trieUpdateCh) + l.closeTrieUpdateCh.Do(func() { + close(l.trieUpdateCh) + }) }() return done } @@ -457,3 +461,36 @@ func (l *Ledger) FindTrieByStateCommit(commitment flow.StateCommitment) (*trie.M return nil, nil } + +// StateCount returns the number of states (tries) stored in the forest +func (l *Ledger) StateCount() int { + return l.ForestSize() +} + +// StateByIndex returns the state at the given index +// -1 is the last index +func (l *Ledger) StateByIndex(index int) (ledger.State, error) { + tries, err := l.Tries() + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to get tries: %w", err) + } + + count := len(tries) + if count == 0 { + return ledger.DummyState, fmt.Errorf("no states available") + } + + // Handle negative index (-1 means last index) + if index < 0 { + index = count + index + if index < 0 { + return ledger.DummyState, fmt.Errorf("index %d is out of range (count: %d)", index-count, count) + } + } + + if index >= count { + return ledger.DummyState, fmt.Errorf("index %d is out of range (count: %d)", index, count) + } + + return ledger.State(tries[index].RootHash()), nil +} diff --git a/ledger/complete/ledger_test.go b/ledger/complete/ledger_test.go index d7021516440..cbe0be529d6 100644 --- a/ledger/complete/ledger_test.go +++ b/ledger/complete/ledger_test.go @@ -813,3 +813,171 @@ func migrationByKey(p []ledger.Payload) ([]ledger.Payload, error) { return ret, nil } + +func TestLedger_StateCount(t *testing.T) { + wal := &fixtures.NoopWAL{} + l, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) + require.NoError(t, err) + + compactor := fixtures.NewNoopCompactor(l) + <-compactor.Ready() + defer func() { + <-l.Done() + <-compactor.Done() + }() + + // Initially should have at least the empty trie + initialCount := l.StateCount() + assert.GreaterOrEqual(t, initialCount, 1, "should have at least one state (empty trie)") + + // Create some updates to add more states + state := l.InitialState() + keys := testutils.RandomUniqueKeys(3, 2, 2, 4) + values := testutils.RandomValues(3, 1, 32) + + // First update + update1, err := ledger.NewUpdate(state, keys[0:1], values[0:1]) + require.NoError(t, err) + newState1, _, err := l.Set(update1) + require.NoError(t, err) + + // Second update from the first new state + update2, err := ledger.NewUpdate(newState1, keys[1:2], values[1:2]) + require.NoError(t, err) + newState2, _, err := l.Set(update2) + require.NoError(t, err) + + // State count should have increased + finalCount := l.StateCount() + assert.Greater(t, finalCount, initialCount, "state count should increase after updates") + _ = newState2 // avoid unused variable +} + +func TestLedger_StateByIndex(t *testing.T) { + wal := &fixtures.NoopWAL{} + l, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) + require.NoError(t, err) + + compactor := fixtures.NewNoopCompactor(l) + <-compactor.Ready() + defer func() { + <-l.Done() + <-compactor.Done() + }() + + // Get initial state + initialState := l.InitialState() + stateCount := l.StateCount() + require.Greater(t, stateCount, 0, "should have at least one state") + + // Test getting state at index 0 + state0, err := l.StateByIndex(0) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, state0, "state at index 0 should not be dummy state") + + // Test getting last state using -1 + lastState, err := l.StateByIndex(-1) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, lastState, "last state should not be dummy state") + + // Create some updates to add more states + state := initialState + keys := testutils.RandomUniqueKeys(3, 2, 2, 4) + values := testutils.RandomValues(3, 1, 32) + + // Create multiple updates + for i := 0; i < 3; i++ { + update, err := ledger.NewUpdate(state, keys[i:i+1], values[i:i+1]) + require.NoError(t, err) + newState, _, err := l.Set(update) + require.NoError(t, err) + state = newState + } + + // Verify we can get states by index + finalCount := l.StateCount() + require.GreaterOrEqual(t, finalCount, 1, "should have at least one state") + + // Test getting first state + firstState, err := l.StateByIndex(0) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, firstState) + + // Test getting last state with -1 + lastStateAfterUpdates, err := l.StateByIndex(-1) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, lastStateAfterUpdates) + + // Test getting last state with positive index + if finalCount > 0 { + lastStateByIndex, err := l.StateByIndex(finalCount - 1) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, lastStateByIndex) + // Last state by index should match last state by -1 + assert.Equal(t, lastStateAfterUpdates, lastStateByIndex, "last state by -1 should match last state by positive index") + } + + // Test out of range indices + _, err = l.StateByIndex(finalCount) + require.Error(t, err, "should error for index out of range") + + _, err = l.StateByIndex(-finalCount - 1) + require.Error(t, err, "should error for negative index out of range") +} + +func TestLedgerWithCompactor_StateCountAndStateByIndex(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + metricsCollector := &metrics.NoopCollector{} + diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metricsCollector, dir, 100, pathfinder.PathByteSize, wal.SegmentSize) + require.NoError(t, err) + + compactorConfig := ledger.DefaultCompactorConfig(metricsCollector) + lwc, err := complete.NewLedgerWithCompactor( + diskWal, + 100, + compactorConfig, + atomic.NewBool(false), + metricsCollector, + zerolog.Nop(), + complete.DefaultPathFinderVersion, + ) + require.NoError(t, err) + + <-lwc.Ready() + defer func() { + <-lwc.Done() + }() + + // Test StateCount + initialCount := lwc.StateCount() + assert.GreaterOrEqual(t, initialCount, 1, "should have at least one state") + + // Test StateByIndex + state0, err := lwc.StateByIndex(0) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, state0) + + lastState, err := lwc.StateByIndex(-1) + require.NoError(t, err) + assert.NotEqual(t, ledger.DummyState, lastState) + + // Create some updates + state := lwc.InitialState() + keys := testutils.RandomUniqueKeys(2, 2, 2, 4) + values := testutils.RandomValues(2, 1, 32) + + update, err := ledger.NewUpdate(state, keys, values) + require.NoError(t, err) + newState, _, err := lwc.Set(update) + require.NoError(t, err) + + // Verify StateCount increased + finalCount := lwc.StateCount() + assert.Greater(t, finalCount, initialCount, "state count should increase after update") + + // Verify we can get the new state + lastStateAfterUpdate, err := lwc.StateByIndex(-1) + require.NoError(t, err) + assert.Equal(t, ledger.State(newState), lastStateAfterUpdate, "last state should match the new state") + }) +} diff --git a/ledger/complete/ledger_with_compactor.go b/ledger/complete/ledger_with_compactor.go new file mode 100644 index 00000000000..7a07d65c8e1 --- /dev/null +++ b/ledger/complete/ledger_with_compactor.go @@ -0,0 +1,116 @@ +package complete + +import ( + "fmt" + + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/ledger" + realWAL "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/module" +) + +// LedgerWithCompactor wraps a Ledger and its internal Compactor, +// managing both as a single component. This hides the compactor +// as an implementation detail. +// Embedding *Ledger allows automatic delegation of Ledger methods. +type LedgerWithCompactor struct { + *Ledger + compactor *Compactor + logger zerolog.Logger +} + +// NewLedgerWithCompactor creates a new ledger with an internal compactor. +// The compactor lifecycle is managed by this wrapper. +// Use Ready() to wait for the ledger and compactor to be ready. +// triggerCheckpoint is a runtime control signal to trigger checkpoint on next segment finish. +func NewLedgerWithCompactor( + diskWAL realWAL.LedgerWAL, + ledgerCapacity int, + compactorConfig *ledger.CompactorConfig, + triggerCheckpoint *atomic.Bool, + metrics module.LedgerMetrics, + logger zerolog.Logger, + pathFinderVersion uint8, +) (*LedgerWithCompactor, error) { + logger = logger.With().Str("ledger_mod", "complete").Logger() + + // Create the ledger + l, err := NewLedger(diskWAL, ledgerCapacity, metrics, logger, pathFinderVersion) + if err != nil { + return nil, fmt.Errorf("failed to create ledger: %w", err) + } + + // Create the compactor (internal to ledger) + compactor, err := NewCompactor( + l, + diskWAL, + logger.With().Str("subcomponent", "compactor").Logger(), + compactorConfig.CheckpointCapacity, + compactorConfig.CheckpointDistance, + compactorConfig.CheckpointsToKeep, + triggerCheckpoint, + compactorConfig.Metrics, + ) + if err != nil { + return nil, fmt.Errorf("failed to create compactor: %w", err) + } + + return &LedgerWithCompactor{ + Ledger: l, + compactor: compactor, + logger: logger, + }, nil +} + +// Note: Ledger methods (InitialState, HasState, GetSingleValue, Get, Set, Prove, +// StateCount, StateByIndex) are automatically delegated via embedding. + +// Ready manages lifecycle of both ledger and compactor. +// Signals when initialization (WAL replay) is complete and compactor is ready. +// Overrides the embedded Ledger.Ready() to coordinate with the compactor. +func (lwc *LedgerWithCompactor) Ready() <-chan struct{} { + ready := make(chan struct{}) + go func() { + defer close(ready) + + // Wait for ledger initialization (WAL replay) to complete + <-lwc.Ledger.Ready() + + // Start compactor + <-lwc.compactor.Ready() + + lwc.logger.Info().Msg("ledger with compactor ready") + }() + return ready +} + +// Done manages shutdown of both ledger and compactor. +// Overrides the embedded Ledger.Done() to coordinate with the compactor. +func (lwc *LedgerWithCompactor) Done() <-chan struct{} { + done := make(chan struct{}) + go func() { + defer close(done) + + lwc.logger.Info().Msg("stopping ledger with compactor...") + + // Close the trie update channel first so the compactor can drain it + // The compactor's drain loop blocks until the channel is closed. + // Use sync.Once to ensure it's only closed once (ledger.Done() also closes it). + lwc.closeTrieUpdateCh.Do(func() { + close(lwc.trieUpdateCh) + }) + + // Stop compactor first (it needs to finish WAL writes) + <-lwc.compactor.Done() + + lwc.logger.Info().Msg("stopping ledger ...") + + // Then stop ledger + <-lwc.Ledger.Done() + + lwc.logger.Info().Msg("ledger with compactor stopped") + }() + return done +} diff --git a/ledger/complete/wal/checkpoint_v5_test.go b/ledger/complete/wal/checkpoint_v5_test.go index 4422d3376c0..0cd61adf481 100644 --- a/ledger/complete/wal/checkpoint_v5_test.go +++ b/ledger/complete/wal/checkpoint_v5_test.go @@ -4,6 +4,7 @@ import ( "path/filepath" "testing" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" @@ -14,7 +15,7 @@ func TestCopyCheckpointFileV5(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV5(dir, fileName, logger, tries...), "fail to store checkpoint") to := filepath.Join(dir, "newfolder") newPaths, err := CopyCheckpointFile(fileName, dir, to) diff --git a/ledger/complete/wal/checkpoint_v6_test.go b/ledger/complete/wal/checkpoint_v6_test.go index 83bbcb2a4c7..1e036d3adf6 100644 --- a/ledger/complete/wal/checkpoint_v6_test.go +++ b/ledger/complete/wal/checkpoint_v6_test.go @@ -188,7 +188,7 @@ func createMultipleRandomTriesMini(t *testing.T) ([]*trie.MTrie, *trie.MTrie) { func TestEncodeSubTrie(t *testing.T) { file := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() tries := createMultipleRandomTries(t) estimatedSubtrieNodeCount := estimateSubtrieNodeCount(tries[0]) subtrieRoots := createSubTrieRoots(tries) @@ -287,7 +287,7 @@ func TestWriteAndReadCheckpointV6EmptyTrie(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := []*trie.MTrie{trie.NewEmptyMTrie()} fileName := "checkpoint-empty-trie" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") decoded, err := OpenAndReadCheckpointV6(dir, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) @@ -299,7 +299,7 @@ func TestWriteAndReadCheckpointV6SimpleTrie(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") decoded, err := OpenAndReadCheckpointV6(dir, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) @@ -311,7 +311,7 @@ func TestWriteAndReadCheckpointV6MultipleTries(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) fileName := "checkpoint-multi-file" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") decoded, err := OpenAndReadCheckpointV6(dir, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) @@ -323,7 +323,7 @@ func TestWriteAndReadCheckpointV6MultipleTries(t *testing.T) { func TestCheckpointV6IsDeterminstic(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint1", logger), "fail to store checkpoint") require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint2", logger), "fail to store checkpoint") partFiles1 := filePaths(dir, "checkpoint1", subtrieLevel) @@ -342,7 +342,7 @@ func TestWriteAndReadCheckpointV6LeafEmptyTrie(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := []*trie.MTrie{trie.NewEmptyMTrie()} fileName := "checkpoint-empty-trie" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 10 @@ -361,7 +361,7 @@ func TestWriteAndReadCheckpointV6LeafSimpleTrie(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 1 leafNodesCh := make(chan *LeafNode, bufSize) @@ -385,7 +385,7 @@ func TestWriteAndReadCheckpointV6LeafMultipleTriesFail(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { fileName := "checkpoint-multi-leaf-file" tries, _ := createMultipleRandomTriesMini(t) - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 5 leafNodesCh := make(chan *LeafNode, bufSize) @@ -402,7 +402,7 @@ func TestWriteAndReadCheckpointV6LeafMultipleTriesOK(t *testing.T) { tries := []*trie.MTrie{last} - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 5 leafNodesCh := make(chan *LeafNode, bufSize) @@ -491,7 +491,7 @@ func TestWriteAndReadCheckpointV5(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) fileName := "checkpoint1" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, storeCheckpointV5(tries, dir, fileName, logger), "fail to store checkpoint") decoded, err := LoadCheckpoint(filepath.Join(dir, fileName), logger) @@ -505,7 +505,7 @@ func TestWriteAndReadCheckpointV5(t *testing.T) { func TestWriteAndReadCheckpointV6ThenBackToV5(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) - logger := unittest.Logger() + logger := zerolog.Nop() // store tries into v6 then read back, then store into v5 require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint-v6", logger), "fail to store checkpoint") @@ -534,7 +534,7 @@ func TestCleanupOnErrorIfNotExist(t *testing.T) { t.Run("clean up after finish storing files", func(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) - logger := unittest.Logger() + logger := zerolog.Nop() // store tries into v6 then read back, then store into v5 require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint-v6", logger), "fail to store checkpoint") @@ -565,7 +565,7 @@ func TestAllPartFileExist(t *testing.T) { } require.NoErrorf(t, err, "fail to find sub trie file path") - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // delete i-th part file, then the error should mention i-th file missing @@ -593,7 +593,7 @@ func TestAllPartFileExistLeafReader(t *testing.T) { } require.NoErrorf(t, err, "fail to find sub trie file path") - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // delete i-th part file, then the error should mention i-th file missing @@ -613,7 +613,7 @@ func TestCannotStoreTwice(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // checkpoint already exist, can't store again require.Error(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger)) @@ -640,7 +640,7 @@ func TestCopyCheckpointFileV6(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") to := filepath.Join(dir, "newfolder") newPaths, err := CopyCheckpointFile(fileName, dir, to) @@ -656,7 +656,7 @@ func TestReadCheckpointRootHash(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") trieRoots, err := ReadTriesRootHash(logger, dir, fileName) @@ -673,7 +673,7 @@ func TestReadCheckpointRootHashValidateChecksum(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createSimpleTrie(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // add a wrong checksum to top trie file @@ -700,7 +700,7 @@ func TestReadCheckpointRootHashMulti(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") trieRoots, err := ReadTriesRootHash(logger, dir, fileName) @@ -717,7 +717,7 @@ func TestCheckpointHasRootHash(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) fileName := "checkpoint" - logger := unittest.Logger() + logger := zerolog.Nop() require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") trieRoots, err := ReadTriesRootHash(logger, dir, fileName) diff --git a/ledger/complete/wal/checkpointer.go b/ledger/complete/wal/checkpointer.go index b67f2385440..2c1aeead713 100644 --- a/ledger/complete/wal/checkpointer.go +++ b/ledger/complete/wal/checkpointer.go @@ -15,7 +15,6 @@ import ( "github.com/docker/go-units" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" "github.com/onflow/flow-go/ledger" @@ -439,7 +438,7 @@ func StoreCheckpointV5(dir string, fileName string, logger zerolog.Logger, tries // Index 0 is a special case with nil node. traversedSubtrieNodes[nil] = 0 - logging := logProgress(fmt.Sprintf("storing %v-th sub trie roots", i), estimatedSubtrieNodeCount, log.Logger) + logging := logProgress(fmt.Sprintf("storing %v-th sub trie roots", i), estimatedSubtrieNodeCount, logger) for _, root := range subTrieRoot { // Empty trie is always added to forest as starting point and // empty trie's root is nil. It remains in the forest until evicted @@ -1110,7 +1109,37 @@ func SoftlinkCheckpointFile(filename string, from string, to string) ([]string, newPath := filepath.Join(to, partfile) newPaths[i] = newPath - err := os.Symlink(match, newPath) + // Check if symlink already exists and points to the correct target + if fi, err := os.Lstat(newPath); err == nil { + if fi.Mode()&os.ModeSymlink != 0 { + // Symlink exists, check if it points to the correct target + target, err := os.Readlink(newPath) + if err != nil { + return nil, fmt.Errorf("cannot read existing symlink %v: %w", newPath, err) + } + // Calculate expected relative target for comparison + symlinkDir := filepath.Dir(newPath) + expectedRelTarget, _ := filepath.Rel(symlinkDir, match) + if target == expectedRelTarget || target == match { + // Symlink already exists and points to correct target, skip creation + continue + } + // Symlink exists but points to different target, this is an error + return nil, fmt.Errorf("symlink %v already exists but points to %v instead of %v", newPath, target, match) + } + // Path exists but is not a symlink, this is an error + return nil, fmt.Errorf("path %v already exists but is not a symlink", newPath) + } + + // Create symlink with relative path from newPath to match + // This ensures the symlink works in both host and container contexts + symlinkDir := filepath.Dir(newPath) + relTarget, err := filepath.Rel(symlinkDir, match) + if err != nil { + // If relative path calculation fails, use match as-is + relTarget = match + } + err = os.Symlink(relTarget, newPath) if err != nil { return nil, fmt.Errorf("cannot link file from %v to %v: %w", match, newPath, err) } diff --git a/ledger/complete/wal/checkpointer_test.go b/ledger/complete/wal/checkpointer_test.go index dd46ffdb85e..f69faeb3269 100644 --- a/ledger/complete/wal/checkpointer_test.go +++ b/ledger/complete/wal/checkpointer_test.go @@ -485,16 +485,25 @@ func randomlyModifyFile(t *testing.T, filename string) { file, err := os.OpenFile(filename, os.O_RDWR, 0644) require.NoError(t, err) + defer file.Close() fileInfo, err := file.Stat() require.NoError(t, err) fileSize := fileInfo.Size() + if fileSize == 0 { + // Empty file, nothing to modify - this shouldn't happen in normal test scenarios + // but handle gracefully by returning early + return + } + buf := make([]byte, 1) // get some random offset - offset := int64(rand.Int()) % (fileSize + int64(len(buf))) + // Use fileSize (not fileSize + len(buf)) to ensure offset is always < fileSize + // Valid file positions are 0 to fileSize-1 + offset := int64(rand.Int()) % fileSize _, err = file.ReadAt(buf, offset) require.NoError(t, err) diff --git a/ledger/complete/wal/wal.go b/ledger/complete/wal/wal.go index 4f8d04082c2..cbfe9ba6780 100644 --- a/ledger/complete/wal/wal.go +++ b/ledger/complete/wal/wal.go @@ -4,6 +4,7 @@ import ( "fmt" "sort" + "github.com/hashicorp/go-multierror" prometheusWAL "github.com/onflow/wal/wal" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" @@ -12,6 +13,7 @@ import ( "github.com/onflow/flow-go/ledger/complete/mtrie" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/module" + utilsio "github.com/onflow/flow-go/utils/io" ) const SegmentSize = 32 * 1024 * 1024 // 32 MB @@ -23,21 +25,43 @@ type DiskWAL struct { pathByteSize int log zerolog.Logger dir string + fileLock *utilsio.FileLock } // TODO use real logger and metrics, but that would require passing them to Trie storage func NewDiskWAL(logger zerolog.Logger, reg prometheus.Registerer, metrics module.WALMetrics, dir string, forestCapacity int, pathByteSize int, segmentSize int) (*DiskWAL, error) { + // Acquire exclusive file lock to ensure only one process can write to this WAL directory + fileLock, err := utilsio.NewFileLock(dir) + if err != nil { + panic(fmt.Sprintf("failed to create file lock for WAL directory %s: %v", dir, err)) + } + if err := fileLock.Lock(); err != nil { + // The Lock() method returns a complete error message that distinguishes between + // permission denied and lock conflicts. This is a fatal error - the process should crash. + panic(err.Error()) + } + w, err := prometheusWAL.NewSize(logger, reg, dir, segmentSize, false) if err != nil { - return nil, fmt.Errorf("could not create disk wal from dir %v, segmentSize %v: %w", dir, segmentSize, err) + // Release the lock if WAL creation fails + err = fmt.Errorf("could not create disk wal from dir %v, segmentSize %v: %w", dir, segmentSize, err) + if unlockErr := fileLock.Unlock(); unlockErr != nil { + err = multierror.Append(err, fmt.Errorf("failed to release file lock: %w", unlockErr)) + } + return nil, err } + + log := logger.With().Str("ledger_mod", "diskwal").Logger() + log.Info().Str("lock_path", fileLock.Path()).Msg("acquired exclusive lock on WAL directory") + return &DiskWAL{ wal: w, paused: false, forestCapacity: forestCapacity, pathByteSize: pathByteSize, - log: logger.With().Str("ledger_mod", "diskwal").Logger(), + log: log, dir: dir, + fileLock: fileLock, }, nil } @@ -341,12 +365,22 @@ func (w *DiskWAL) Ready() <-chan struct{} { } // Done implements interface module.ReadyDoneAware -// it closes all the open write-ahead log files. +// it closes all the open write-ahead log files and releases the file lock. func (w *DiskWAL) Done() <-chan struct{} { err := w.wal.Close() if err != nil { w.log.Err(err).Msg("error while closing WAL") } + + // Release the file lock + if w.fileLock != nil { + if err := w.fileLock.Unlock(); err != nil { + w.log.Err(err).Msg("error while releasing file lock") + } else { + w.log.Info().Str("lock_path", w.fileLock.Path()).Msg("released exclusive lock on WAL directory") + } + } + done := make(chan struct{}) close(done) return done diff --git a/ledger/config.go b/ledger/config.go new file mode 100644 index 00000000000..4b6f9dcd6ae --- /dev/null +++ b/ledger/config.go @@ -0,0 +1,30 @@ +package ledger + +import ( + "github.com/onflow/flow-go/module" +) + +// Default values for ledger configuration. +const ( + DefaultMTrieCacheSize = 500 + DefaultCheckpointDistance = 20 + DefaultCheckpointsToKeep = 5 +) + +// CompactorConfig holds configuration for ledger compaction. +type CompactorConfig struct { + CheckpointCapacity uint + CheckpointDistance uint + CheckpointsToKeep uint + Metrics module.WALMetrics +} + +// DefaultCompactorConfig returns default compactor configuration. +func DefaultCompactorConfig(metrics module.WALMetrics) *CompactorConfig { + return &CompactorConfig{ + CheckpointCapacity: DefaultMTrieCacheSize, + CheckpointDistance: DefaultCheckpointDistance, + CheckpointsToKeep: DefaultCheckpointsToKeep, + Metrics: metrics, + } +} diff --git a/ledger/factory.go b/ledger/factory.go new file mode 100644 index 00000000000..29d656a6d4e --- /dev/null +++ b/ledger/factory.go @@ -0,0 +1,9 @@ +package ledger + +// Factory creates ledger instances with internal compaction management. +// The compactor lifecycle is managed internally by the ledger. +type Factory interface { + // NewLedger creates a new ledger instance with internal compactor. + // The ledger's Ready() method will signal when initialization (WAL replay) is complete. + NewLedger() (Ledger, error) +} diff --git a/ledger/factory/factory.go b/ledger/factory/factory.go new file mode 100644 index 00000000000..a5120f0fb47 --- /dev/null +++ b/ledger/factory/factory.go @@ -0,0 +1,117 @@ +package factory + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/ledger/remote" + "github.com/onflow/flow-go/module" +) + +// Config holds configuration for creating a ledger instance. +type Config struct { + // Remote ledger service configuration + LedgerServiceAddr string // gRPC address for remote ledger service (empty means use local ledger) + LedgerMaxRequestSize uint // Maximum request message size in bytes for remote ledger client (0 = default 1 GiB) + LedgerMaxResponseSize uint // Maximum response message size in bytes for remote ledger client (0 = default 1 GiB) + + // Local ledger configuration + Triedir string + MTrieCacheSize uint32 + CheckpointDistance uint + CheckpointsToKeep uint + MetricsRegisterer prometheus.Registerer + WALMetrics module.WALMetrics + LedgerMetrics module.LedgerMetrics + Logger zerolog.Logger +} + +// NewLedger creates a ledger instance based on the configuration. +// If LedgerServiceAddr is set, it creates a remote ledger client. +// Otherwise, it creates a local ledger with WAL and compactor. +// triggerCheckpoint is a runtime control signal to trigger checkpoint on next segment finish (can be nil for remote ledger). +func NewLedger(config Config, triggerCheckpoint *atomic.Bool) (ledger.Ledger, error) { + if config.LedgerServiceAddr != "" { + return newRemoteLedger(config) + } + return newLocalLedger(config, triggerCheckpoint) +} + +// newRemoteLedger creates a remote ledger client that connects to a ledger service. +func newRemoteLedger(config Config) (ledger.Ledger, error) { + config.Logger.Info(). + Str("ledger_service_addr", config.LedgerServiceAddr). + Msg("using remote ledger service") + + factory := remote.NewRemoteLedgerFactory( + config.LedgerServiceAddr, + config.Logger.With().Str("subcomponent", "ledger").Logger(), + config.LedgerMaxRequestSize, + config.LedgerMaxResponseSize, + ) + + ledgerStorage, err := factory.NewLedger() + if err != nil { + return nil, fmt.Errorf("failed to create remote ledger: %w", err) + } + + return ledgerStorage, nil +} + +// newLocalLedger creates a local ledger with WAL and compactor. +func newLocalLedger(config Config, triggerCheckpoint *atomic.Bool) (ledger.Ledger, error) { + // the local ledger service is used when: + // 1. execution node is running ledger in local + // 2. the standalone ledger service is running it in local + + config.Logger.Info(). + Str("triedir", config.Triedir). + Msg("using local ledger") + + // Create WAL + diskWal, err := wal.NewDiskWAL( + config.Logger.With().Str("subcomponent", "wal").Logger(), + config.MetricsRegisterer, + config.WALMetrics, + config.Triedir, + int(config.MTrieCacheSize), + pathfinder.PathByteSize, + wal.SegmentSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize wal: %w", err) + } + + // Create compactor config + compactorConfig := &ledger.CompactorConfig{ + CheckpointCapacity: uint(config.MTrieCacheSize), + CheckpointDistance: config.CheckpointDistance, + CheckpointsToKeep: config.CheckpointsToKeep, + Metrics: config.WALMetrics, + } + + // Use factory to create ledger with internal compactor + factory := complete.NewLocalLedgerFactory( + diskWal, + int(config.MTrieCacheSize), + compactorConfig, + triggerCheckpoint, + config.LedgerMetrics, + config.Logger.With().Str("subcomponent", "ledger").Logger(), + complete.DefaultPathFinderVersion, + ) + + ledgerStorage, err := factory.NewLedger() + if err != nil { + return nil, fmt.Errorf("failed to create local ledger: %w", err) + } + + return ledgerStorage, nil +} diff --git a/ledger/factory/factory_test.go b/ledger/factory/factory_test.go new file mode 100644 index 00000000000..d42b50dd09d --- /dev/null +++ b/ledger/factory/factory_test.go @@ -0,0 +1,500 @@ +package factory + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "google.golang.org/grpc" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/wal" + ledgerpb "github.com/onflow/flow-go/ledger/protobuf" + "github.com/onflow/flow-go/ledger/remote" + "github.com/onflow/flow-go/module/metrics" +) + +// TestRemoteLedgerClient creates a local ledger and a remote ledger client, +// and tests that they behave identically for various operations. +func TestRemoteLedgerClient(t *testing.T) { + withLedgerPair(t, func(localLedger, remoteLedger ledger.Ledger) { + + t.Run("InitialState", func(t *testing.T) { + localState := localLedger.InitialState() + remoteState := remoteLedger.InitialState() + + // Both should return the same initial state + assert.Equal(t, localState, remoteState, "InitialState should be the same for local and remote ledger") + assert.NotEqual(t, ledger.DummyState, localState) + assert.NotEqual(t, ledger.DummyState, remoteState) + }) + + t.Run("HasState", func(t *testing.T) { + localInitialState := localLedger.InitialState() + remoteInitialState := remoteLedger.InitialState() + + // Both should have the same initial state + assert.Equal(t, localInitialState, remoteInitialState) + + localHasState := localLedger.HasState(localInitialState) + remoteHasState := remoteLedger.HasState(remoteInitialState) + assert.Equal(t, localHasState, remoteHasState, "HasState should return the same result for local and remote ledger") + assert.True(t, localHasState) + + // Test with non-existent state + dummyState := ledger.DummyState + localHasState = localLedger.HasState(dummyState) + remoteHasState = remoteLedger.HasState(dummyState) + assert.Equal(t, localHasState, remoteHasState, "HasState for non-existent state should return the same result") + assert.False(t, localHasState) + }) + + t.Run("GetSingleValue", func(t *testing.T) { + localInitialState := localLedger.InitialState() + remoteInitialState := remoteLedger.InitialState() + assert.Equal(t, localInitialState, remoteInitialState) + + // Create a test key + key := ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key")), + }) + + localQuery, err := ledger.NewQuerySingleValue(localInitialState, key) + require.NoError(t, err) + remoteQuery, err := ledger.NewQuerySingleValue(remoteInitialState, key) + require.NoError(t, err) + + localValue, err := localLedger.GetSingleValue(localQuery) + require.NoError(t, err) + remoteValue, err := remoteLedger.GetSingleValue(remoteQuery) + require.NoError(t, err) + + // Both should return the same value + assert.Equal(t, localValue, remoteValue, "GetSingleValue should return the same value for local and remote ledger") + assert.Equal(t, ledger.Value([]byte{}), localValue) + assert.Equal(t, 0, len(localValue)) + }) + + t.Run("Get", func(t *testing.T) { + localInitialState := localLedger.InitialState() + remoteInitialState := remoteLedger.InitialState() + assert.Equal(t, localInitialState, remoteInitialState) + + // Create test keys + keys := []ledger.Key{ + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner1")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key1")), + }), + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner2")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key2")), + }), + } + + localQuery, err := ledger.NewQuery(localInitialState, keys) + require.NoError(t, err) + remoteQuery, err := ledger.NewQuery(remoteInitialState, keys) + require.NoError(t, err) + + localValues, err := localLedger.Get(localQuery) + require.NoError(t, err) + remoteValues, err := remoteLedger.Get(remoteQuery) + require.NoError(t, err) + + // Both should return the same values + require.Len(t, localValues, 2) + require.Len(t, remoteValues, 2) + assert.Equal(t, localValues, remoteValues, "Get should return the same values for local and remote ledger") + assert.Equal(t, ledger.Value([]byte{}), localValues[0]) + assert.Equal(t, 0, len(localValues[0])) + }) + + t.Run("Set", func(t *testing.T) { + localInitialState := localLedger.InitialState() + remoteInitialState := remoteLedger.InitialState() + assert.Equal(t, localInitialState, remoteInitialState) + + // Create test keys and values + keys := []ledger.Key{ + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key-non-empty")), + }), + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner-1")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key-empty-slice")), + }), + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner-2")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key-nil")), + }), + } + values := []ledger.Value{ + ledger.Value("test-value"), + ledger.Value([]byte{}), + ledger.Value(nil), + } + + localUpdate, err := ledger.NewUpdate(localInitialState, keys, values) + require.NoError(t, err) + remoteUpdate, err := ledger.NewUpdate(remoteInitialState, keys, values) + require.NoError(t, err) + + localNewState, localTrieUpdate, err := localLedger.Set(localUpdate) + require.NoError(t, err) + remoteNewState, remoteTrieUpdate, err := remoteLedger.Set(remoteUpdate) + require.NoError(t, err) + + // Both should return the same new state + assert.Equal(t, localNewState, remoteNewState, "Set should return the same new state for local and remote ledger") + assert.NotEqual(t, ledger.DummyState, localNewState) + assert.NotEqual(t, localInitialState, localNewState) + + // Both should return non-nil trie updates + assert.NotNil(t, localTrieUpdate) + assert.NotNil(t, remoteTrieUpdate) + + // Verify that both trie updates produce identical CBOR encodings + // This ensures that nil vs empty slice distinction is preserved correctly + // through the remote ledger's protobuf encoding/decoding cycle + localTrieUpdateCBOR := ledger.EncodeTrieUpdateCBOR(localTrieUpdate) + remoteTrieUpdateCBOR := ledger.EncodeTrieUpdateCBOR(remoteTrieUpdate) + assert.Equal(t, localTrieUpdateCBOR, remoteTrieUpdateCBOR, + "Trie updates must produce identical CBOR encodings. "+ + "Local CBOR length: %d, Remote CBOR length: %d", + len(localTrieUpdateCBOR), len(remoteTrieUpdateCBOR)) + + // Verify we can read back the value from both + localQuery, err := ledger.NewQuerySingleValue(localNewState, keys[0]) + require.NoError(t, err) + remoteQuery, err := ledger.NewQuerySingleValue(remoteNewState, keys[0]) + require.NoError(t, err) + + localValue, err := localLedger.GetSingleValue(localQuery) + require.NoError(t, err) + remoteValue, err := remoteLedger.GetSingleValue(remoteQuery) + require.NoError(t, err) + + // Both should return the same value + assert.Equal(t, localValue, remoteValue, "GetSingleValue after Set should return the same value") + assert.Equal(t, ledger.Value("test-value"), localValue) + }) + + t.Run("Prove", func(t *testing.T) { + localInitialState := localLedger.InitialState() + remoteInitialState := remoteLedger.InitialState() + assert.Equal(t, localInitialState, remoteInitialState) + + // Create test key + key := ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key")), + }) + + localQuery, err := ledger.NewQuery(localInitialState, []ledger.Key{key}) + require.NoError(t, err) + remoteQuery, err := ledger.NewQuery(remoteInitialState, []ledger.Key{key}) + require.NoError(t, err) + + localProof, err := localLedger.Prove(localQuery) + require.NoError(t, err) + remoteProof, err := remoteLedger.Prove(remoteQuery) + require.NoError(t, err) + + // Both should return proofs of the same length + assert.NotNil(t, localProof) + assert.NotNil(t, remoteProof) + assert.Equal(t, len(localProof), len(remoteProof), "Prove should return proofs of the same length") + assert.Greater(t, len(localProof), 0) + }) + }) +} + +// TestTrieUpdatePayloadValueEquivalence verifies that trie updates with three different +// payload value representations (non-empty, empty slice, nil) produce the same result ID +// for both remote ledger and local ledger. +// +// This test ensures that: +// 1. State commitments match between local and remote ledgers for all three value types +// 2. Execution data IDs are identical across all three scenarios +// 3. The nil vs empty slice distinction is properly preserved through protobuf encoding +func TestTrieUpdatePayloadValueEquivalence(t *testing.T) { + withLedgerPair(t, func(localLedger, remoteLedger ledger.Ledger) { + + // Get initial state (should be the same for both) + localInitialState := localLedger.InitialState() + remoteInitialState := remoteLedger.InitialState() + require.Equal(t, localInitialState, remoteInitialState, "Initial states must match") + + // Create three test keys for the three different payload value types + keys := []ledger.Key{ + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key-non-empty")), + }), + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key-empty-slice")), + }), + ledger.NewKey([]ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("test-owner")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("test-key-nil")), + }), + } + + // Create a single update with three different payload value representations: + // 1. Non-empty payload value + // 2. Empty slice payload value + // 3. Nil payload value + values := []ledger.Value{ + ledger.Value([]byte{1, 2, 3}), // non-empty + ledger.Value([]byte{}), // empty slice + ledger.Value(nil), // nil + } + + t.Logf("Creating single trie update with three payloads: non-empty, empty slice, and nil") + + // Create updates for both ledgers with all three values in a single update + localUpdate, err := ledger.NewUpdate(localInitialState, keys, values) + require.NoError(t, err, "Failed to create local update") + + remoteUpdate, err := ledger.NewUpdate(remoteInitialState, keys, values) + require.NoError(t, err, "Failed to create remote update") + + // Apply the single update to both ledgers + localNewState, localTrieUpdate, err := localLedger.Set(localUpdate) + require.NoError(t, err, "Failed to apply local update") + + remoteNewState, remoteTrieUpdate, err := remoteLedger.Set(remoteUpdate) + require.NoError(t, err, "Failed to apply remote update") + + // Verify state commitments match + assert.Equal(t, localNewState, remoteNewState, + "State commitments must match between local and remote ledger") + assert.NotEqual(t, ledger.DummyState, localNewState, + "State should not be dummy state") + + // Verify trie updates are not nil + require.NotNil(t, localTrieUpdate, "Local trie update should not be nil") + require.NotNil(t, remoteTrieUpdate, "Remote trie update should not be nil") + + // Verify both trie updates have the same number of payloads + require.Equal(t, len(localTrieUpdate.Payloads), len(remoteTrieUpdate.Payloads), + "Local and remote trie updates should have the same number of payloads") + require.Equal(t, 3, len(localTrieUpdate.Payloads), + "Trie update should contain exactly 3 payloads") + + t.Logf("Trie update contains %d payloads", len(localTrieUpdate.Payloads)) + t.Logf("Payload 0 (non-empty) value length: %d", len(localTrieUpdate.Payloads[0].Value())) + t.Logf("Payload 1 (empty slice) value length: %d, is nil: %v", len(localTrieUpdate.Payloads[1].Value()), localTrieUpdate.Payloads[1].Value() == nil) + t.Logf("Payload 2 (nil) value length: %d, is nil: %v", len(localTrieUpdate.Payloads[2].Value()), localTrieUpdate.Payloads[2].Value() == nil) + + // Create ChunkExecutionData from the local trie update + collection := unittest.CollectionFixture(1) + localChunkExecutionData := &execution_data.ChunkExecutionData{ + Collection: &collection, + Events: flow.EventsList{}, + TrieUpdate: localTrieUpdate, + TransactionResults: []flow.LightTransactionResult{}, + } + + // Create ChunkExecutionData from the remote trie update + remoteChunkExecutionData := &execution_data.ChunkExecutionData{ + Collection: &collection, + Events: flow.EventsList{}, + TrieUpdate: remoteTrieUpdate, + TransactionResults: []flow.LightTransactionResult{}, + } + + // Create BlockExecutionData for both + blockID := unittest.IdentifierFixture() + localBlockExecutionData := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{localChunkExecutionData}, + } + + remoteBlockExecutionData := &execution_data.BlockExecutionData{ + BlockID: blockID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{remoteChunkExecutionData}, + } + + // Calculate execution data IDs using the default serializer + serializer := execution_data.DefaultSerializer + ctx := context.Background() + + localExecutionDataID, err := execution_data.CalculateID(ctx, localBlockExecutionData, serializer) + require.NoError(t, err, "Failed to calculate local execution data ID") + + remoteExecutionDataID, err := execution_data.CalculateID(ctx, remoteBlockExecutionData, serializer) + require.NoError(t, err, "Failed to calculate remote execution data ID") + + // The key assertion: local and remote execution data IDs must match + // This verifies that the remote ledger properly preserves the nil vs empty slice + // distinction through protobuf encoding, ensuring deterministic CBOR serialization + assert.Equal(t, localExecutionDataID, remoteExecutionDataID, + "Execution data IDs must match between local and remote ledger. "+ + "Local ID: %s, Remote ID: %s", + localExecutionDataID, remoteExecutionDataID) + + t.Logf("Test completed successfully.") + t.Logf("State commitment: %s", localNewState) + t.Logf("Execution data ID (local and remote match): %s", localExecutionDataID) + }) +} + +// startLedgerServer starts a ledger server on a random port and returns the address and cleanup function. +func startLedgerServer(t *testing.T, walDir string) (string, func()) { + // Find an available port + listener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + addr := listener.Addr().String() + listener.Close() + + logger := unittest.Logger() + + // Create WAL + metricsCollector := &metrics.NoopCollector{} + diskWal, err := wal.NewDiskWAL( + logger, + nil, + metricsCollector, + walDir, + 100, + pathfinder.PathByteSize, + wal.SegmentSize, + ) + require.NoError(t, err) + + // Create compactor config + compactorConfig := ledger.DefaultCompactorConfig(metricsCollector) + + // Create ledger factory + factory := complete.NewLocalLedgerFactory( + diskWal, + 100, + compactorConfig, + atomic.NewBool(false), // trigger checkpoint signal + metricsCollector, + logger, + complete.DefaultPathFinderVersion, + ) + + // Create ledger instance + ledgerStorage, err := factory.NewLedger() + require.NoError(t, err) + + // Wait for ledger to be ready (WAL replay) + <-ledgerStorage.Ready() + + // Create gRPC server with max message size configuration + // Use large limits to match production defaults (1 GiB for both) + grpcServer := grpc.NewServer( + grpc.MaxRecvMsgSize(1<<30), // 1 GiB for requests + grpc.MaxSendMsgSize(1<<30), // 1 GiB for responses + ) + + // Create and register ledger service + ledgerService := remote.NewService(ledgerStorage, logger) + ledgerpb.RegisterLedgerServiceServer(grpcServer, ledgerService) + + // Start gRPC server + lis, err := net.Listen("tcp", addr) + require.NoError(t, err) + + // Start server in goroutine + serverErr := make(chan error, 1) + go func() { + if err := grpcServer.Serve(lis); err != nil { + serverErr <- fmt.Errorf("gRPC server error: %w", err) + } + }() + + // Wait a bit for server to start + time.Sleep(100 * time.Millisecond) + + // Cleanup function + cleanup := func() { + grpcServer.GracefulStop() + <-ledgerStorage.Done() + } + + return addr, cleanup +} + +// withLedgerPair creates both a local and remote ledger instance, handles Ready/Done, +// and automatically cleans up resources after the test function completes. +// The temp directory is automatically cleaned up by t.TempDir(). +func withLedgerPair(t *testing.T, fn func(localLedger, remoteLedger ledger.Ledger)) { + // Create temporary directories for WALs + tempDir := t.TempDir() + remoteWalDir := filepath.Join(tempDir, "remote_wal") + localWalDir := filepath.Join(tempDir, "local_wal") + + err := os.MkdirAll(remoteWalDir, 0755) + require.NoError(t, err) + err = os.MkdirAll(localWalDir, 0755) + require.NoError(t, err) + + // Start ledger server + serverAddr, serverCleanup := startLedgerServer(t, remoteWalDir) + + logger := zerolog.Nop() + metricsCollector := &metrics.NoopCollector{} + + // Create local ledger using factory + localLedger, err := NewLedger(Config{ + Triedir: localWalDir, + MTrieCacheSize: 100, + CheckpointDistance: 1000, + CheckpointsToKeep: 10, + MetricsRegisterer: nil, + WALMetrics: metricsCollector, + LedgerMetrics: metricsCollector, + Logger: logger, + }, atomic.NewBool(false)) + require.NoError(t, err) + require.NotNil(t, localLedger) + + // Create remote client using factory + remoteLedger, err := NewLedger(Config{ + LedgerServiceAddr: serverAddr, + Logger: logger, + }, nil) + require.NoError(t, err) + require.NotNil(t, remoteLedger) + + // Wait for both to be ready + <-localLedger.Ready() + <-remoteLedger.Ready() + + // Ensure cleanup happens even if the test function panics + defer func() { + // Stop remote ledger + <-remoteLedger.Done() + + // Stop local ledger (WAL cleanup is handled internally by the ledger) + <-localLedger.Done() + + // Stop server + serverCleanup() + }() + + // Execute the test function with the ledgers + fn(localLedger, remoteLedger) +} diff --git a/ledger/ledger.go b/ledger/ledger.go index 3e5b8c2a906..c7255648b31 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -39,6 +39,13 @@ type Ledger interface { // Prove returns proofs for the given keys at specific state Prove(query *Query) (proof Proof, err error) + + // StateCount returns the count + StateCount() int + + // StateByIndex returns the state at the given index + // -1 is the last index + StateByIndex(index int) (State, error) } // Query holds all data needed for a ledger read or ledger proof diff --git a/ledger/mock/factory.go b/ledger/mock/factory.go new file mode 100644 index 00000000000..4c26a640169 --- /dev/null +++ b/ledger/mock/factory.go @@ -0,0 +1,92 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mock + +import ( + "github.com/onflow/flow-go/ledger" + mock "github.com/stretchr/testify/mock" +) + +// NewFactory creates a new instance of Factory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *Factory { + mock := &Factory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// Factory is an autogenerated mock type for the Factory type +type Factory struct { + mock.Mock +} + +type Factory_Expecter struct { + mock *mock.Mock +} + +func (_m *Factory) EXPECT() *Factory_Expecter { + return &Factory_Expecter{mock: &_m.Mock} +} + +// NewLedger provides a mock function for the type Factory +func (_mock *Factory) NewLedger() (ledger.Ledger, error) { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for NewLedger") + } + + var r0 ledger.Ledger + var r1 error + if returnFunc, ok := ret.Get(0).(func() (ledger.Ledger, error)); ok { + return returnFunc() + } + if returnFunc, ok := ret.Get(0).(func() ledger.Ledger); ok { + r0 = returnFunc() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ledger.Ledger) + } + } + if returnFunc, ok := ret.Get(1).(func() error); ok { + r1 = returnFunc() + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// Factory_NewLedger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewLedger' +type Factory_NewLedger_Call struct { + *mock.Call +} + +// NewLedger is a helper method to define mock.On call +func (_e *Factory_Expecter) NewLedger() *Factory_NewLedger_Call { + return &Factory_NewLedger_Call{Call: _e.mock.On("NewLedger")} +} + +func (_c *Factory_NewLedger_Call) Run(run func()) *Factory_NewLedger_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Factory_NewLedger_Call) Return(ledger1 ledger.Ledger, err error) *Factory_NewLedger_Call { + _c.Call.Return(ledger1, err) + return _c +} + +func (_c *Factory_NewLedger_Call) RunAndReturn(run func() (ledger.Ledger, error)) *Factory_NewLedger_Call { + _c.Call.Return(run) + return _c +} diff --git a/ledger/mock/ledger.go b/ledger/mock/ledger.go index db00ab95206..8bad7e84dcd 100644 --- a/ledger/mock/ledger.go +++ b/ledger/mock/ledger.go @@ -480,3 +480,109 @@ func (_c *Ledger_Set_Call) RunAndReturn(run func(update *ledger.Update) (ledger. _c.Call.Return(run) return _c } + +// StateByIndex provides a mock function for the type Ledger +func (_mock *Ledger) StateByIndex(index int) (ledger.State, error) { + ret := _mock.Called(index) + + if len(ret) == 0 { + panic("no return value specified for StateByIndex") + } + + var r0 ledger.State + var r1 error + if returnFunc, ok := ret.Get(0).(func(int) (ledger.State, error)); ok { + return returnFunc(index) + } + if returnFunc, ok := ret.Get(0).(func(int) ledger.State); ok { + r0 = returnFunc(index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ledger.State) + } + } + if returnFunc, ok := ret.Get(1).(func(int) error); ok { + r1 = returnFunc(index) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// Ledger_StateByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateByIndex' +type Ledger_StateByIndex_Call struct { + *mock.Call +} + +// StateByIndex is a helper method to define mock.On call +// - index int +func (_e *Ledger_Expecter) StateByIndex(index interface{}) *Ledger_StateByIndex_Call { + return &Ledger_StateByIndex_Call{Call: _e.mock.On("StateByIndex", index)} +} + +func (_c *Ledger_StateByIndex_Call) Run(run func(index int)) *Ledger_StateByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 int + if args[0] != nil { + arg0 = args[0].(int) + } + run( + arg0, + ) + }) + return _c +} + +func (_c *Ledger_StateByIndex_Call) Return(state ledger.State, err error) *Ledger_StateByIndex_Call { + _c.Call.Return(state, err) + return _c +} + +func (_c *Ledger_StateByIndex_Call) RunAndReturn(run func(index int) (ledger.State, error)) *Ledger_StateByIndex_Call { + _c.Call.Return(run) + return _c +} + +// StateCount provides a mock function for the type Ledger +func (_mock *Ledger) StateCount() int { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for StateCount") + } + + var r0 int + if returnFunc, ok := ret.Get(0).(func() int); ok { + r0 = returnFunc() + } else { + r0 = ret.Get(0).(int) + } + return r0 +} + +// Ledger_StateCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateCount' +type Ledger_StateCount_Call struct { + *mock.Call +} + +// StateCount is a helper method to define mock.On call +func (_e *Ledger_Expecter) StateCount() *Ledger_StateCount_Call { + return &Ledger_StateCount_Call{Call: _e.mock.On("StateCount")} +} + +func (_c *Ledger_StateCount_Call) Run(run func()) *Ledger_StateCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Ledger_StateCount_Call) Return(n int) *Ledger_StateCount_Call { + _c.Call.Return(n) + return _c +} + +func (_c *Ledger_StateCount_Call) RunAndReturn(run func() int) *Ledger_StateCount_Call { + _c.Call.Return(run) + return _c +} diff --git a/ledger/partial/ledger.go b/ledger/partial/ledger.go index 33b3d141935..a807556af5d 100644 --- a/ledger/partial/ledger.go +++ b/ledger/partial/ledger.go @@ -164,3 +164,18 @@ func (l *Ledger) Set(update *ledger.Update) (newState ledger.State, trieUpdate * func (l *Ledger) Prove(query *ledger.Query) (proof ledger.Proof, err error) { return nil, err } + +// StateCount returns the number of states in the partial ledger +// Partial ledger only has one state +func (l *Ledger) StateCount() int { + return 1 +} + +// StateByIndex returns the state at the given index +// Partial ledger only has one state +func (l *Ledger) StateByIndex(index int) (ledger.State, error) { + if index == 0 || index == -1 { + return l.state, nil + } + return ledger.DummyState, fmt.Errorf("index %d is out of range (partial ledger has 1 state)", index) +} diff --git a/ledger/partial/ledger_test.go b/ledger/partial/ledger_test.go index 209bf707ed0..dd51343918e 100644 --- a/ledger/partial/ledger_test.go +++ b/ledger/partial/ledger_test.go @@ -169,3 +169,89 @@ func TestEmptyLedger(t *testing.T) { require.True(t, trieUpdate.IsEmpty()) require.Equal(t, u.State(), newState) } + +func TestPartialLedger_StateCount(t *testing.T) { + w := &fixtures.NoopWAL{} + + l, err := complete.NewLedger(w, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) + require.NoError(t, err) + + compactor := fixtures.NewNoopCompactor(l) + <-compactor.Ready() + + defer func() { + <-l.Done() + <-compactor.Done() + }() + + // create update and get proof + state := l.InitialState() + keys := testutils.RandomUniqueKeys(2, 2, 2, 4) + values := testutils.RandomValues(2, 1, 32) + update, err := ledger.NewUpdate(state, keys, values) + require.NoError(t, err) + + newState, _, err := l.Set(update) + require.NoError(t, err) + + query, err := ledger.NewQuery(newState, keys) + require.NoError(t, err) + proof, err := l.Prove(query) + require.NoError(t, err) + + pled, err := partial.NewLedger(proof, newState, partial.DefaultPathFinderVersion) + require.NoError(t, err) + + // Partial ledger should always have exactly one state + stateCount := pled.StateCount() + assert.Equal(t, 1, stateCount, "partial ledger should have exactly one state") +} + +func TestPartialLedger_StateByIndex(t *testing.T) { + w := &fixtures.NoopWAL{} + + l, err := complete.NewLedger(w, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) + require.NoError(t, err) + + compactor := fixtures.NewNoopCompactor(l) + <-compactor.Ready() + + defer func() { + <-l.Done() + <-compactor.Done() + }() + + // create update and get proof + state := l.InitialState() + keys := testutils.RandomUniqueKeys(2, 2, 2, 4) + values := testutils.RandomValues(2, 1, 32) + update, err := ledger.NewUpdate(state, keys, values) + require.NoError(t, err) + + newState, _, err := l.Set(update) + require.NoError(t, err) + + query, err := ledger.NewQuery(newState, keys) + require.NoError(t, err) + proof, err := l.Prove(query) + require.NoError(t, err) + + pled, err := partial.NewLedger(proof, newState, partial.DefaultPathFinderVersion) + require.NoError(t, err) + + // Test getting state at index 0 (only valid index for partial ledger) + state0, err := pled.StateByIndex(0) + require.NoError(t, err) + assert.Equal(t, newState, state0, "state at index 0 should match the ledger state") + + // Test that other indices fail + _, err = pled.StateByIndex(1) + require.Error(t, err, "should error for index out of range") + + state_1, err := pled.StateByIndex(-1) + require.NoError(t, err) + assert.Equal(t, newState, state_1, "state at index -1 should match the ledger state") + + _, err = pled.StateByIndex(-2) + require.Error(t, err, "should error for negative index out of range") +} diff --git a/ledger/protobuf/buf.gen.yaml b/ledger/protobuf/buf.gen.yaml new file mode 100644 index 00000000000..05f96e382fd --- /dev/null +++ b/ledger/protobuf/buf.gen.yaml @@ -0,0 +1,11 @@ +version: v1beta1 +plugins: + - name: go + out: . + opt: + - paths=source_relative + - name: go-grpc + out: . + opt: + - paths=source_relative + diff --git a/ledger/protobuf/buf.yaml b/ledger/protobuf/buf.yaml new file mode 100644 index 00000000000..25204840201 --- /dev/null +++ b/ledger/protobuf/buf.yaml @@ -0,0 +1,8 @@ +version: v1 +breaking: + use: + - FILE +lint: + use: + - DEFAULT + diff --git a/ledger/protobuf/ledger.pb.go b/ledger/protobuf/ledger.pb.go new file mode 100644 index 00000000000..602d79a9ba9 --- /dev/null +++ b/ledger/protobuf/ledger.pb.go @@ -0,0 +1,753 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: ledger.proto + +package protobuf + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/protobuf/types/known/emptypb" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// State represents a ledger state (32-byte hash) +type State struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *State) Reset() { *m = State{} } +func (m *State) String() string { return proto.CompactTextString(m) } +func (*State) ProtoMessage() {} +func (*State) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{0} +} + +func (m *State) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_State.Unmarshal(m, b) +} +func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_State.Marshal(b, m, deterministic) +} +func (m *State) XXX_Merge(src proto.Message) { + xxx_messageInfo_State.Merge(m, src) +} +func (m *State) XXX_Size() int { + return xxx_messageInfo_State.Size(m) +} +func (m *State) XXX_DiscardUnknown() { + xxx_messageInfo_State.DiscardUnknown(m) +} + +var xxx_messageInfo_State proto.InternalMessageInfo + +func (m *State) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// KeyPart represents a part of a hierarchical key +type KeyPart struct { + // type is actually uint16 but uint16 is not available in proto3 + Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyPart) Reset() { *m = KeyPart{} } +func (m *KeyPart) String() string { return proto.CompactTextString(m) } +func (*KeyPart) ProtoMessage() {} +func (*KeyPart) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{1} +} + +func (m *KeyPart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyPart.Unmarshal(m, b) +} +func (m *KeyPart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyPart.Marshal(b, m, deterministic) +} +func (m *KeyPart) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyPart.Merge(m, src) +} +func (m *KeyPart) XXX_Size() int { + return xxx_messageInfo_KeyPart.Size(m) +} +func (m *KeyPart) XXX_DiscardUnknown() { + xxx_messageInfo_KeyPart.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyPart proto.InternalMessageInfo + +func (m *KeyPart) GetType() uint32 { + if m != nil { + return m.Type + } + return 0 +} + +func (m *KeyPart) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Key represents a hierarchical ledger key +type Key struct { + Parts []*KeyPart `protobuf:"bytes,1,rep,name=parts,proto3" json:"parts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{2} +} + +func (m *Key) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Key.Unmarshal(m, b) +} +func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Key.Marshal(b, m, deterministic) +} +func (m *Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_Key.Merge(m, src) +} +func (m *Key) XXX_Size() int { + return xxx_messageInfo_Key.Size(m) +} +func (m *Key) XXX_DiscardUnknown() { + xxx_messageInfo_Key.DiscardUnknown(m) +} + +var xxx_messageInfo_Key proto.InternalMessageInfo + +func (m *Key) GetParts() []*KeyPart { + if m != nil { + return m.Parts + } + return nil +} + +// Value represents a ledger value +type Value struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // is_nil distinguishes between nil and []byte{} (empty slice). + // When data is nil or empty: + // - is_nil=true means the original value was nil + // - is_nil=false means the original value was []byte{} (empty slice) + // + // When data is non-empty, is_nil is ignored (should be false). + IsNil bool `protobuf:"varint,2,opt,name=is_nil,json=isNil,proto3" json:"is_nil,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{3} +} + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +func (m *Value) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Value) GetIsNil() bool { + if m != nil { + return m.IsNil + } + return false +} + +// StateRequest contains a state to query +type StateRequest struct { + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (m *StateRequest) String() string { return proto.CompactTextString(m) } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{4} +} + +func (m *StateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StateRequest.Unmarshal(m, b) +} +func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic) +} +func (m *StateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateRequest.Merge(m, src) +} +func (m *StateRequest) XXX_Size() int { + return xxx_messageInfo_StateRequest.Size(m) +} +func (m *StateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StateRequest proto.InternalMessageInfo + +func (m *StateRequest) GetState() *State { + if m != nil { + return m.State + } + return nil +} + +// StateResponse contains a state +type StateResponse struct { + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (m *StateResponse) String() string { return proto.CompactTextString(m) } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{5} +} + +func (m *StateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StateResponse.Unmarshal(m, b) +} +func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic) +} +func (m *StateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateResponse.Merge(m, src) +} +func (m *StateResponse) XXX_Size() int { + return xxx_messageInfo_StateResponse.Size(m) +} +func (m *StateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StateResponse proto.InternalMessageInfo + +func (m *StateResponse) GetState() *State { + if m != nil { + return m.State + } + return nil +} + +// HasStateResponse indicates if a state exists +type HasStateResponse struct { + HasState bool `protobuf:"varint,1,opt,name=has_state,json=hasState,proto3" json:"has_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HasStateResponse) Reset() { *m = HasStateResponse{} } +func (m *HasStateResponse) String() string { return proto.CompactTextString(m) } +func (*HasStateResponse) ProtoMessage() {} +func (*HasStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{6} +} + +func (m *HasStateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HasStateResponse.Unmarshal(m, b) +} +func (m *HasStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HasStateResponse.Marshal(b, m, deterministic) +} +func (m *HasStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HasStateResponse.Merge(m, src) +} +func (m *HasStateResponse) XXX_Size() int { + return xxx_messageInfo_HasStateResponse.Size(m) +} +func (m *HasStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HasStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HasStateResponse proto.InternalMessageInfo + +func (m *HasStateResponse) GetHasState() bool { + if m != nil { + return m.HasState + } + return false +} + +// GetSingleValueRequest contains a query for a single value +type GetSingleValueRequest struct { + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Key *Key `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSingleValueRequest) Reset() { *m = GetSingleValueRequest{} } +func (m *GetSingleValueRequest) String() string { return proto.CompactTextString(m) } +func (*GetSingleValueRequest) ProtoMessage() {} +func (*GetSingleValueRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{7} +} + +func (m *GetSingleValueRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSingleValueRequest.Unmarshal(m, b) +} +func (m *GetSingleValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSingleValueRequest.Marshal(b, m, deterministic) +} +func (m *GetSingleValueRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSingleValueRequest.Merge(m, src) +} +func (m *GetSingleValueRequest) XXX_Size() int { + return xxx_messageInfo_GetSingleValueRequest.Size(m) +} +func (m *GetSingleValueRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSingleValueRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSingleValueRequest proto.InternalMessageInfo + +func (m *GetSingleValueRequest) GetState() *State { + if m != nil { + return m.State + } + return nil +} + +func (m *GetSingleValueRequest) GetKey() *Key { + if m != nil { + return m.Key + } + return nil +} + +// ValueResponse contains a single value +type ValueResponse struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValueResponse) Reset() { *m = ValueResponse{} } +func (m *ValueResponse) String() string { return proto.CompactTextString(m) } +func (*ValueResponse) ProtoMessage() {} +func (*ValueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{8} +} + +func (m *ValueResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValueResponse.Unmarshal(m, b) +} +func (m *ValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValueResponse.Marshal(b, m, deterministic) +} +func (m *ValueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueResponse.Merge(m, src) +} +func (m *ValueResponse) XXX_Size() int { + return xxx_messageInfo_ValueResponse.Size(m) +} +func (m *ValueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueResponse proto.InternalMessageInfo + +func (m *ValueResponse) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// GetRequest contains a query for multiple values +type GetRequest struct { + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Keys []*Key `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{9} +} + +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +func (m *GetRequest) GetState() *State { + if m != nil { + return m.State + } + return nil +} + +func (m *GetRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// GetResponse contains multiple values +type GetResponse struct { + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{10} +} + +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// SetRequest contains an update operation +type SetRequest struct { + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Keys []*Key `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + Values []*Value `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetRequest) Reset() { *m = SetRequest{} } +func (m *SetRequest) String() string { return proto.CompactTextString(m) } +func (*SetRequest) ProtoMessage() {} +func (*SetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{11} +} + +func (m *SetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetRequest.Unmarshal(m, b) +} +func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) +} +func (m *SetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetRequest.Merge(m, src) +} +func (m *SetRequest) XXX_Size() int { + return xxx_messageInfo_SetRequest.Size(m) +} +func (m *SetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetRequest proto.InternalMessageInfo + +func (m *SetRequest) GetState() *State { + if m != nil { + return m.State + } + return nil +} + +func (m *SetRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +func (m *SetRequest) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// SetResponse contains the new state after an update +type SetResponse struct { + NewState *State `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + TrieUpdate []byte `protobuf:"bytes,2,opt,name=trie_update,json=trieUpdate,proto3" json:"trie_update,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetResponse) Reset() { *m = SetResponse{} } +func (m *SetResponse) String() string { return proto.CompactTextString(m) } +func (*SetResponse) ProtoMessage() {} +func (*SetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{12} +} + +func (m *SetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetResponse.Unmarshal(m, b) +} +func (m *SetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetResponse.Marshal(b, m, deterministic) +} +func (m *SetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetResponse.Merge(m, src) +} +func (m *SetResponse) XXX_Size() int { + return xxx_messageInfo_SetResponse.Size(m) +} +func (m *SetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetResponse proto.InternalMessageInfo + +func (m *SetResponse) GetNewState() *State { + if m != nil { + return m.NewState + } + return nil +} + +func (m *SetResponse) GetTrieUpdate() []byte { + if m != nil { + return m.TrieUpdate + } + return nil +} + +// ProveRequest contains a proof query +type ProveRequest struct { + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Keys []*Key `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProveRequest) Reset() { *m = ProveRequest{} } +func (m *ProveRequest) String() string { return proto.CompactTextString(m) } +func (*ProveRequest) ProtoMessage() {} +func (*ProveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{13} +} + +func (m *ProveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProveRequest.Unmarshal(m, b) +} +func (m *ProveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProveRequest.Marshal(b, m, deterministic) +} +func (m *ProveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProveRequest.Merge(m, src) +} +func (m *ProveRequest) XXX_Size() int { + return xxx_messageInfo_ProveRequest.Size(m) +} +func (m *ProveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProveRequest proto.InternalMessageInfo + +func (m *ProveRequest) GetState() *State { + if m != nil { + return m.State + } + return nil +} + +func (m *ProveRequest) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +// ProofResponse contains a proof +type ProofResponse struct { + Proof []byte `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProofResponse) Reset() { *m = ProofResponse{} } +func (m *ProofResponse) String() string { return proto.CompactTextString(m) } +func (*ProofResponse) ProtoMessage() {} +func (*ProofResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63585974d4c6a2c4, []int{14} +} + +func (m *ProofResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProofResponse.Unmarshal(m, b) +} +func (m *ProofResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProofResponse.Marshal(b, m, deterministic) +} +func (m *ProofResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofResponse.Merge(m, src) +} +func (m *ProofResponse) XXX_Size() int { + return xxx_messageInfo_ProofResponse.Size(m) +} +func (m *ProofResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProofResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofResponse proto.InternalMessageInfo + +func (m *ProofResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func init() { + proto.RegisterType((*State)(nil), "ledger.State") + proto.RegisterType((*KeyPart)(nil), "ledger.KeyPart") + proto.RegisterType((*Key)(nil), "ledger.Key") + proto.RegisterType((*Value)(nil), "ledger.Value") + proto.RegisterType((*StateRequest)(nil), "ledger.StateRequest") + proto.RegisterType((*StateResponse)(nil), "ledger.StateResponse") + proto.RegisterType((*HasStateResponse)(nil), "ledger.HasStateResponse") + proto.RegisterType((*GetSingleValueRequest)(nil), "ledger.GetSingleValueRequest") + proto.RegisterType((*ValueResponse)(nil), "ledger.ValueResponse") + proto.RegisterType((*GetRequest)(nil), "ledger.GetRequest") + proto.RegisterType((*GetResponse)(nil), "ledger.GetResponse") + proto.RegisterType((*SetRequest)(nil), "ledger.SetRequest") + proto.RegisterType((*SetResponse)(nil), "ledger.SetResponse") + proto.RegisterType((*ProveRequest)(nil), "ledger.ProveRequest") + proto.RegisterType((*ProofResponse)(nil), "ledger.ProofResponse") +} + +func init() { proto.RegisterFile("ledger.proto", fileDescriptor_63585974d4c6a2c4) } + +var fileDescriptor_63585974d4c6a2c4 = []byte{ + // 563 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x6a, 0xdb, 0x4c, + 0x10, 0xc5, 0x76, 0xe4, 0xcf, 0x19, 0xd9, 0x5f, 0xcb, 0xd6, 0x2e, 0xc6, 0x26, 0x34, 0xa8, 0x18, + 0xd2, 0x3f, 0x09, 0x6c, 0x5f, 0x15, 0x7a, 0x53, 0x68, 0xdd, 0x92, 0x52, 0x8c, 0xd4, 0xf6, 0x22, + 0xbd, 0x30, 0x72, 0x3c, 0x96, 0x45, 0x14, 0xad, 0xaa, 0x5d, 0xdb, 0xe8, 0x8d, 0xfb, 0x18, 0x65, + 0x7f, 0x14, 0x49, 0x6e, 0x08, 0x0d, 0xe4, 0x46, 0xec, 0xce, 0x9c, 0xb3, 0xe7, 0x8c, 0x66, 0x77, + 0xa0, 0x1d, 0xe1, 0x2a, 0xc0, 0xd4, 0x4e, 0x52, 0xca, 0x29, 0x69, 0xaa, 0xdd, 0x60, 0x18, 0x50, + 0x1a, 0x44, 0xe8, 0xc8, 0xe8, 0x72, 0xbb, 0x76, 0xf0, 0x3a, 0xe1, 0x99, 0x02, 0x59, 0x43, 0x30, + 0x3c, 0xee, 0x73, 0x24, 0x04, 0x8e, 0x36, 0x3e, 0xdb, 0xf4, 0x6b, 0xa7, 0xb5, 0xb3, 0xb6, 0x2b, + 0xd7, 0xd6, 0x04, 0xfe, 0x3b, 0xc7, 0x6c, 0xee, 0xa7, 0x5c, 0xa4, 0x79, 0x96, 0xa0, 0x4c, 0x77, + 0x5c, 0xb9, 0x26, 0x5d, 0x30, 0x76, 0x7e, 0xb4, 0xc5, 0x7e, 0x5d, 0x72, 0xd4, 0xc6, 0x7a, 0x0d, + 0x8d, 0x73, 0xcc, 0xc8, 0x08, 0x8c, 0xc4, 0x4f, 0x39, 0xeb, 0xd7, 0x4e, 0x1b, 0x67, 0xe6, 0xf8, + 0x91, 0xad, 0xbd, 0xe9, 0x03, 0x5d, 0x95, 0xb5, 0xc6, 0x60, 0xfc, 0x10, 0x34, 0x21, 0xb0, 0xf2, + 0xb9, 0x9f, 0xeb, 0x8b, 0x35, 0xe9, 0x41, 0x33, 0x64, 0x8b, 0x38, 0x8c, 0xa4, 0x42, 0xcb, 0x35, + 0x42, 0xf6, 0x35, 0x8c, 0xac, 0x09, 0xb4, 0xa5, 0x67, 0x17, 0x7f, 0x6d, 0x91, 0x71, 0xf2, 0x1c, + 0x0c, 0x26, 0xf6, 0x92, 0x6b, 0x8e, 0x3b, 0xb9, 0x94, 0x02, 0xa9, 0x9c, 0x35, 0x85, 0x8e, 0x26, + 0xb1, 0x84, 0xc6, 0x0c, 0xff, 0x8d, 0xe5, 0xc0, 0xe3, 0x4f, 0x3e, 0xab, 0x12, 0x87, 0x70, 0xbc, + 0xf1, 0xd9, 0xa2, 0x20, 0xb7, 0xdc, 0xd6, 0x46, 0x83, 0xac, 0x9f, 0xd0, 0x9b, 0x21, 0xf7, 0xc2, + 0x38, 0x88, 0x50, 0x16, 0x76, 0x1f, 0x93, 0xe4, 0x04, 0x1a, 0x57, 0x98, 0xc9, 0x6a, 0xcd, 0xb1, + 0x59, 0xfa, 0x65, 0xae, 0x88, 0x8b, 0x1a, 0xf4, 0x99, 0x45, 0x0d, 0xaa, 0x03, 0x07, 0x87, 0x2a, + 0x94, 0x6e, 0x88, 0x0b, 0x30, 0x43, 0x7e, 0x2f, 0x1f, 0xcf, 0xe0, 0xe8, 0x0a, 0x33, 0xd6, 0xaf, + 0xcb, 0xde, 0x55, 0x8c, 0xc8, 0x84, 0x35, 0x05, 0x53, 0x9e, 0xa9, 0x7d, 0x8c, 0xa0, 0x29, 0xb5, + 0xf2, 0x6e, 0x1f, 0x18, 0xd1, 0x49, 0x2b, 0x03, 0xf0, 0x1e, 0xd8, 0x49, 0x49, 0xba, 0x71, 0x97, + 0xf4, 0x05, 0x98, 0x5e, 0xc9, 0xf0, 0x4b, 0x38, 0x8e, 0x71, 0xbf, 0xb8, 0x43, 0xbf, 0x15, 0xe3, + 0xde, 0xd3, 0x16, 0x4c, 0x9e, 0x86, 0xb8, 0xd8, 0x26, 0x2b, 0x81, 0x56, 0x97, 0x1d, 0x44, 0xe8, + 0xbb, 0x8c, 0x58, 0xdf, 0xa0, 0x3d, 0x4f, 0xe9, 0x0e, 0x1f, 0xf6, 0x17, 0x8f, 0xa0, 0x33, 0x4f, + 0x29, 0x5d, 0xdf, 0x78, 0xee, 0x82, 0x91, 0x88, 0x80, 0x7e, 0x22, 0x6a, 0x33, 0xfe, 0x5d, 0x87, + 0xce, 0x17, 0xc9, 0xf5, 0x30, 0xdd, 0x85, 0x97, 0x48, 0xde, 0x41, 0xfb, 0x73, 0x1c, 0xf2, 0xd0, + 0x8f, 0x94, 0xff, 0xa7, 0xb6, 0x1a, 0x00, 0x76, 0x3e, 0x00, 0xec, 0x0f, 0x62, 0x00, 0x0c, 0x7a, + 0x55, 0x5f, 0xb9, 0xcc, 0x5b, 0x68, 0xe5, 0x57, 0x9e, 0x74, 0x0f, 0x20, 0xb2, 0xbe, 0x41, 0x3f, + 0x8f, 0xfe, 0xf5, 0x34, 0x3e, 0xc2, 0xff, 0xd5, 0xdb, 0x4f, 0x4e, 0x72, 0xec, 0xad, 0xaf, 0xa2, + 0xf0, 0x50, 0xbd, 0xd7, 0x36, 0x34, 0x66, 0xc8, 0x09, 0x29, 0x91, 0x73, 0xc6, 0x93, 0x4a, 0xac, + 0xc0, 0x7b, 0x65, 0xbc, 0x77, 0x0b, 0xbe, 0xdc, 0xfe, 0x29, 0x18, 0xb2, 0x63, 0x45, 0x81, 0xe5, + 0x06, 0x16, 0xae, 0x2a, 0x0d, 0x78, 0xff, 0xea, 0xe2, 0x45, 0x10, 0xf2, 0xcd, 0x76, 0x69, 0x5f, + 0xd2, 0x6b, 0x87, 0xc6, 0xeb, 0x88, 0xee, 0x1d, 0xf1, 0x79, 0x13, 0x50, 0x47, 0x31, 0x6e, 0x86, + 0xec, 0xb2, 0x29, 0x57, 0x93, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xcc, 0x9c, 0x5b, 0x94, + 0x05, 0x00, 0x00, +} diff --git a/ledger/protobuf/ledger.proto b/ledger/protobuf/ledger.proto new file mode 100644 index 00000000000..de24e694b65 --- /dev/null +++ b/ledger/protobuf/ledger.proto @@ -0,0 +1,118 @@ +syntax = "proto3"; + +package ledger; + +option go_package = "github.com/onflow/flow-go/ledger/protobuf"; + +import "google/protobuf/empty.proto"; + +// LedgerService provides remote access to ledger operations +service LedgerService { + // InitialState returns the initial state of the ledger + rpc InitialState(google.protobuf.Empty) returns (StateResponse); + + // HasState checks if the given state exists in the ledger + rpc HasState(StateRequest) returns (HasStateResponse); + + // GetSingleValue returns a single value for a given key at a specific state + rpc GetSingleValue(GetSingleValueRequest) returns (ValueResponse); + + // Get returns values for multiple keys at a specific state + rpc Get(GetRequest) returns (GetResponse); + + // Set updates keys with new values at a specific state and returns the new state + rpc Set(SetRequest) returns (SetResponse); + + // Prove returns proofs for the given keys at a specific state + rpc Prove(ProveRequest) returns (ProofResponse); +} + +// State represents a ledger state (32-byte hash) +message State { + bytes hash = 1; // 32 bytes +} + +// KeyPart represents a part of a hierarchical key +message KeyPart { + // type is actually uint16 but uint16 is not available in proto3 + uint32 type = 1; + bytes value = 2; +} + +// Key represents a hierarchical ledger key +message Key { + repeated KeyPart parts = 1; +} + +// Value represents a ledger value +message Value { + bytes data = 1; + // is_nil distinguishes between nil and []byte{} (empty slice). + // When data is nil or empty: + // - is_nil=true means the original value was nil + // - is_nil=false means the original value was []byte{} (empty slice) + // When data is non-empty, is_nil is ignored (should be false). + bool is_nil = 2; +} + +// StateRequest contains a state to query +message StateRequest { + State state = 1; +} + +// StateResponse contains a state +message StateResponse { + State state = 1; +} + +// HasStateResponse indicates if a state exists +message HasStateResponse { + bool has_state = 1; +} + +// GetSingleValueRequest contains a query for a single value +message GetSingleValueRequest { + State state = 1; + Key key = 2; +} + +// ValueResponse contains a single value +message ValueResponse { + Value value = 1; +} + +// GetRequest contains a query for multiple values +message GetRequest { + State state = 1; + repeated Key keys = 2; +} + +// GetResponse contains multiple values +message GetResponse { + repeated Value values = 1; +} + +// SetRequest contains an update operation +message SetRequest { + State state = 1; + repeated Key keys = 2; + repeated Value values = 3; +} + +// SetResponse contains the new state after an update +message SetResponse { + State new_state = 1; + bytes trie_update = 2; // Encoded TrieUpdate (opaque to gRPC) +} + +// ProveRequest contains a proof query +message ProveRequest { + State state = 1; + repeated Key keys = 2; +} + +// ProofResponse contains a proof +message ProofResponse { + bytes proof = 1; // Encoded Proof (opaque to gRPC) +} + diff --git a/ledger/protobuf/ledger_grpc.pb.go b/ledger/protobuf/ledger_grpc.pb.go new file mode 100644 index 00000000000..9563796331c --- /dev/null +++ b/ledger/protobuf/ledger_grpc.pb.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: ledger.proto + +package protobuf + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + LedgerService_InitialState_FullMethodName = "/ledger.LedgerService/InitialState" + LedgerService_HasState_FullMethodName = "/ledger.LedgerService/HasState" + LedgerService_GetSingleValue_FullMethodName = "/ledger.LedgerService/GetSingleValue" + LedgerService_Get_FullMethodName = "/ledger.LedgerService/Get" + LedgerService_Set_FullMethodName = "/ledger.LedgerService/Set" + LedgerService_Prove_FullMethodName = "/ledger.LedgerService/Prove" +) + +// LedgerServiceClient is the client API for LedgerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LedgerServiceClient interface { + // InitialState returns the initial state of the ledger + InitialState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StateResponse, error) + // HasState checks if the given state exists in the ledger + HasState(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*HasStateResponse, error) + // GetSingleValue returns a single value for a given key at a specific state + GetSingleValue(ctx context.Context, in *GetSingleValueRequest, opts ...grpc.CallOption) (*ValueResponse, error) + // Get returns values for multiple keys at a specific state + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + // Set updates keys with new values at a specific state and returns the new state + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) + // Prove returns proofs for the given keys at a specific state + Prove(ctx context.Context, in *ProveRequest, opts ...grpc.CallOption) (*ProofResponse, error) +} + +type ledgerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewLedgerServiceClient(cc grpc.ClientConnInterface) LedgerServiceClient { + return &ledgerServiceClient{cc} +} + +func (c *ledgerServiceClient) InitialState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := c.cc.Invoke(ctx, LedgerService_InitialState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ledgerServiceClient) HasState(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*HasStateResponse, error) { + out := new(HasStateResponse) + err := c.cc.Invoke(ctx, LedgerService_HasState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ledgerServiceClient) GetSingleValue(ctx context.Context, in *GetSingleValueRequest, opts ...grpc.CallOption) (*ValueResponse, error) { + out := new(ValueResponse) + err := c.cc.Invoke(ctx, LedgerService_GetSingleValue_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ledgerServiceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, LedgerService_Get_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ledgerServiceClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) { + out := new(SetResponse) + err := c.cc.Invoke(ctx, LedgerService_Set_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ledgerServiceClient) Prove(ctx context.Context, in *ProveRequest, opts ...grpc.CallOption) (*ProofResponse, error) { + out := new(ProofResponse) + err := c.cc.Invoke(ctx, LedgerService_Prove_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LedgerServiceServer is the server API for LedgerService service. +// All implementations must embed UnimplementedLedgerServiceServer +// for forward compatibility +type LedgerServiceServer interface { + // InitialState returns the initial state of the ledger + InitialState(context.Context, *emptypb.Empty) (*StateResponse, error) + // HasState checks if the given state exists in the ledger + HasState(context.Context, *StateRequest) (*HasStateResponse, error) + // GetSingleValue returns a single value for a given key at a specific state + GetSingleValue(context.Context, *GetSingleValueRequest) (*ValueResponse, error) + // Get returns values for multiple keys at a specific state + Get(context.Context, *GetRequest) (*GetResponse, error) + // Set updates keys with new values at a specific state and returns the new state + Set(context.Context, *SetRequest) (*SetResponse, error) + // Prove returns proofs for the given keys at a specific state + Prove(context.Context, *ProveRequest) (*ProofResponse, error) + mustEmbedUnimplementedLedgerServiceServer() +} + +// UnimplementedLedgerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedLedgerServiceServer struct { +} + +func (UnimplementedLedgerServiceServer) InitialState(context.Context, *emptypb.Empty) (*StateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitialState not implemented") +} +func (UnimplementedLedgerServiceServer) HasState(context.Context, *StateRequest) (*HasStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method HasState not implemented") +} +func (UnimplementedLedgerServiceServer) GetSingleValue(context.Context, *GetSingleValueRequest) (*ValueResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSingleValue not implemented") +} +func (UnimplementedLedgerServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedLedgerServiceServer) Set(context.Context, *SetRequest) (*SetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (UnimplementedLedgerServiceServer) Prove(context.Context, *ProveRequest) (*ProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Prove not implemented") +} +func (UnimplementedLedgerServiceServer) mustEmbedUnimplementedLedgerServiceServer() {} + +// UnsafeLedgerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LedgerServiceServer will +// result in compilation errors. +type UnsafeLedgerServiceServer interface { + mustEmbedUnimplementedLedgerServiceServer() +} + +func RegisterLedgerServiceServer(s grpc.ServiceRegistrar, srv LedgerServiceServer) { + s.RegisterService(&LedgerService_ServiceDesc, srv) +} + +func _LedgerService_InitialState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LedgerServiceServer).InitialState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: LedgerService_InitialState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LedgerServiceServer).InitialState(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _LedgerService_HasState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LedgerServiceServer).HasState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: LedgerService_HasState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LedgerServiceServer).HasState(ctx, req.(*StateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LedgerService_GetSingleValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSingleValueRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LedgerServiceServer).GetSingleValue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: LedgerService_GetSingleValue_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LedgerServiceServer).GetSingleValue(ctx, req.(*GetSingleValueRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LedgerService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LedgerServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: LedgerService_Get_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LedgerServiceServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LedgerService_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LedgerServiceServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: LedgerService_Set_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LedgerServiceServer).Set(ctx, req.(*SetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LedgerService_Prove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LedgerServiceServer).Prove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: LedgerService_Prove_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LedgerServiceServer).Prove(ctx, req.(*ProveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// LedgerService_ServiceDesc is the grpc.ServiceDesc for LedgerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LedgerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "ledger.LedgerService", + HandlerType: (*LedgerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "InitialState", + Handler: _LedgerService_InitialState_Handler, + }, + { + MethodName: "HasState", + Handler: _LedgerService_HasState_Handler, + }, + { + MethodName: "GetSingleValue", + Handler: _LedgerService_GetSingleValue_Handler, + }, + { + MethodName: "Get", + Handler: _LedgerService_Get_Handler, + }, + { + MethodName: "Set", + Handler: _LedgerService_Set_Handler, + }, + { + MethodName: "Prove", + Handler: _LedgerService_Prove_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ledger.proto", +} diff --git a/ledger/remote/client.go b/ledger/remote/client.go new file mode 100644 index 00000000000..9f2119bf677 --- /dev/null +++ b/ledger/remote/client.go @@ -0,0 +1,467 @@ +package remote + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/onflow/flow-go/ledger" + ledgerpb "github.com/onflow/flow-go/ledger/protobuf" +) + +// Client implements ledger.Ledger interface using gRPC calls to a remote ledger service. +type Client struct { + conn *grpc.ClientConn + client ledgerpb.LedgerServiceClient + logger zerolog.Logger + done chan struct{} + once sync.Once + ctx context.Context + cancel context.CancelFunc + callTimeout time.Duration +} + +// clientConfig holds configuration options for the Client. +type clientConfig struct { + maxRequestSize uint + maxResponseSize uint + callTimeout time.Duration +} + +// defaultClientConfig returns the default configuration. +func defaultClientConfig() *clientConfig { + return &clientConfig{ + maxRequestSize: 1 << 30, // 1 GiB + maxResponseSize: 1 << 30, // 1 GiB + callTimeout: time.Minute, + } +} + +// ClientOption is a function that configures a Client. +type ClientOption func(*clientConfig) + +// WithMaxRequestSize sets the maximum request message size in bytes. +func WithMaxRequestSize(size uint) ClientOption { + return func(cfg *clientConfig) { + cfg.maxRequestSize = size + } +} + +// WithMaxResponseSize sets the maximum response message size in bytes. +func WithMaxResponseSize(size uint) ClientOption { + return func(cfg *clientConfig) { + cfg.maxResponseSize = size + } +} + +// WithCallTimeout sets the timeout for individual gRPC calls. +func WithCallTimeout(timeout time.Duration) ClientOption { + return func(cfg *clientConfig) { + cfg.callTimeout = timeout + } +} + +// NewClient creates a new remote ledger client. +// grpcAddr can be either a TCP address (e.g., "localhost:9000") or a Unix domain socket. +// For Unix sockets, you can use either the full gRPC format (e.g., "unix:///tmp/ledger.sock") +// or just the absolute path (e.g., "/tmp/ledger.sock") - the unix:// prefix will be added automatically. +// Options can be provided to customize the client configuration. +// By default, max request and response sizes are 1 GiB. +func NewClient(grpcAddr string, logger zerolog.Logger, opts ...ClientOption) (*Client, error) { + logger = logger.With().Str("component", "remote_ledger_client").Logger() + + cfg := defaultClientConfig() + for _, opt := range opts { + opt(cfg) + } + + // Handle Unix domain socket addresses + // gRPC client accepts "unix:///absolute/path" or "unix://relative/path" format + // For convenience, if an absolute path is provided (starts with /), automatically add the unix:// prefix + if strings.HasPrefix(grpcAddr, "/") { + grpcAddr = "unix://" + grpcAddr + logger.Debug().Str("address", grpcAddr).Msg("using Unix domain socket (auto-prefixed)") + } else if strings.HasPrefix(grpcAddr, "unix://") { + logger.Debug().Str("address", grpcAddr).Msg("using Unix domain socket") + } + + // Create gRPC connection with max message size configuration. + // Default to 1 GiB (instead of standard 4 MiB) to handle large proofs that can exceed 4MB. + // This was increased to fix "grpc: received message larger than max" errors when generating + // proofs for blocks with many state changes. + // Retry connection with exponential backoff until the service becomes available. + // After approximately 40 minutes of retrying (90 attempts), the client will give up and crash. + var conn *grpc.ClientConn + retryDelay := 100 * time.Millisecond + maxRetryDelay := 30 * time.Second + maxRetries := 90 // ~40 minutes total wait time with exponential backoff capped at 30s + + for attempt := 0; ; attempt++ { + var err error + conn, err = grpc.NewClient( + grpcAddr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(int(cfg.maxResponseSize)), + grpc.MaxCallSendMsgSize(int(cfg.maxRequestSize)), + ), + ) + if err == nil { + logger.Info().Str("address", grpcAddr).Msg("successfully connected to ledger service") + break + } + + if attempt >= maxRetries { + logger.Fatal(). + Err(err). + Int("attempts", attempt). + Str("address", grpcAddr). + Msg("failed to connect to ledger service after maximum retries, crashing node") + } + + logger.Warn(). + Err(err). + Int("attempt", attempt+1). + Int("max_attempts", maxRetries). + Dur("retry_delay", retryDelay). + Time("retry_at", time.Now().Add(retryDelay)). + Str("address", grpcAddr). + Msg("failed to connect to ledger service, retrying...") + + time.Sleep(retryDelay) + // Exponential backoff with max cap + retryDelay = min(maxRetryDelay, time.Duration(float64(retryDelay)*1.5)) + } + + client := ledgerpb.NewLedgerServiceClient(conn) + + ctx, cancel := context.WithCancel(context.Background()) + + return &Client{ + conn: conn, + client: client, + logger: logger, + done: make(chan struct{}), + ctx: ctx, + cancel: cancel, + callTimeout: cfg.callTimeout, + }, nil +} + +// Close closes the gRPC connection. +func (c *Client) Close() error { + if c.conn != nil { + err := c.conn.Close() + c.conn = nil + return err + } + return nil +} + +// callCtx returns a context for gRPC calls with the configured timeout. +// The context is also cancelled when the client is shut down via Done(). +func (c *Client) callCtx() (context.Context, context.CancelFunc) { + return context.WithTimeout(c.ctx, c.callTimeout) +} + +// InitialState returns the initial state of the ledger. +func (c *Client) InitialState() ledger.State { + ctx, cancel := c.callCtx() + defer cancel() + resp, err := c.client.InitialState(ctx, &emptypb.Empty{}) + if err != nil { + c.logger.Fatal().Err(err).Msg("failed to get initial state") + return ledger.DummyState + } + + var state ledger.State + if len(resp.State.Hash) != len(state) { + c.logger.Fatal(). + Int("expected", len(state)). + Int("got", len(resp.State.Hash)). + Msg("invalid state hash length") + return ledger.DummyState + } + copy(state[:], resp.State.Hash) + return state +} + +// HasState returns true if the given state exists in the ledger. +func (c *Client) HasState(state ledger.State) bool { + ctx, cancel := c.callCtx() + defer cancel() + req := &ledgerpb.StateRequest{ + State: &ledgerpb.State{ + Hash: state[:], + }, + } + + resp, err := c.client.HasState(ctx, req) + if err != nil { + c.logger.Error().Err(err).Msg("failed to check state") + return false + } + + return resp.HasState +} + +// GetSingleValue returns a single value for a given key at a specific state. +func (c *Client) GetSingleValue(query *ledger.QuerySingleValue) (ledger.Value, error) { + ctx, cancel := c.callCtx() + defer cancel() + state := query.State() + req := &ledgerpb.GetSingleValueRequest{ + State: &ledgerpb.State{ + Hash: state[:], + }, + Key: ledgerKeyToProtoKey(query.Key()), + } + + resp, err := c.client.GetSingleValue(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to get single value: %w", err) + } + + // Reconstruct the original value type using is_nil flag + // This preserves the distinction between nil and []byte{} that protobuf loses + if len(resp.Value.Data) == 0 { + if resp.Value.IsNil { + return nil, nil + } + return ledger.Value([]byte{}), nil + } + // Copy the data to avoid holding reference to the gRPC response buffer + return ledger.Value(append([]byte{}, resp.Value.Data...)), nil +} + +// Get returns values for multiple keys at a specific state. +func (c *Client) Get(query *ledger.Query) ([]ledger.Value, error) { + ctx, cancel := c.callCtx() + defer cancel() + state := query.State() + req := &ledgerpb.GetRequest{ + State: &ledgerpb.State{ + Hash: state[:], + }, + Keys: make([]*ledgerpb.Key, len(query.Keys())), + } + + for i, key := range query.Keys() { + req.Keys[i] = ledgerKeyToProtoKey(key) + } + + resp, err := c.client.Get(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to get values: %w", err) + } + + values := make([]ledger.Value, len(resp.Values)) + for i, protoValue := range resp.Values { + // Reconstruct the original value type using is_nil flag + // This preserves the distinction between nil and []byte{} that protobuf loses + if len(protoValue.Data) == 0 { + if protoValue.IsNil { + values[i] = nil + } else { + values[i] = ledger.Value([]byte{}) + } + } else { + // Copy the data to avoid holding reference to the gRPC response buffer + values[i] = ledger.Value(append([]byte{}, protoValue.Data...)) + } + } + + return values, nil +} + +// Set updates keys with new values at a specific state and returns the new state. +func (c *Client) Set(update *ledger.Update) (ledger.State, *ledger.TrieUpdate, error) { + // Handle empty updates locally without RPC call. + // This matches the behavior of the local ledger implementations which return + // the same state when there are no keys to update. + if update.Size() == 0 { + return update.State(), + &ledger.TrieUpdate{ + RootHash: ledger.RootHash(update.State()), + Paths: []ledger.Path{}, + Payloads: []*ledger.Payload{}, + }, + nil + } + + ctx, cancel := c.callCtx() + defer cancel() + state := update.State() + req := &ledgerpb.SetRequest{ + State: &ledgerpb.State{ + Hash: state[:], + }, + Keys: make([]*ledgerpb.Key, len(update.Keys())), + Values: make([]*ledgerpb.Value, len(update.Values())), + } + + for i, key := range update.Keys() { + req.Keys[i] = ledgerKeyToProtoKey(key) + } + + for i, value := range update.Values() { + // Distinguish between nil and []byte{} for protobuf encoding + // Protobuf cannot distinguish them, so we use is_nil flag + isNil := value == nil + req.Values[i] = &ledgerpb.Value{ + Data: value, + IsNil: isNil, + } + } + + resp, err := c.client.Set(ctx, req) + if err != nil { + return ledger.DummyState, nil, fmt.Errorf("failed to set values: %w", err) + } + + if resp == nil || resp.NewState == nil { + return ledger.DummyState, nil, fmt.Errorf("invalid response: missing new state") + } + + var newState ledger.State + if len(resp.NewState.Hash) != len(newState) { + return ledger.DummyState, nil, fmt.Errorf("invalid new state hash length") + } + copy(newState[:], resp.NewState.Hash) + + // Decode trie update using centralized decoding function to ensure + // client and server use the same encoding method + trieUpdate, err := decodeTrieUpdateFromTransport(resp.TrieUpdate) + if err != nil { + return ledger.DummyState, nil, fmt.Errorf("failed to decode trie update: %w", err) + } + + return newState, trieUpdate, nil +} + +// Prove returns proofs for the given keys at a specific state. +func (c *Client) Prove(query *ledger.Query) (ledger.Proof, error) { + ctx, cancel := c.callCtx() + defer cancel() + state := query.State() + req := &ledgerpb.ProveRequest{ + State: &ledgerpb.State{ + Hash: state[:], + }, + Keys: make([]*ledgerpb.Key, len(query.Keys())), + } + + for i, key := range query.Keys() { + req.Keys[i] = ledgerKeyToProtoKey(key) + } + + resp, err := c.client.Prove(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to generate proof: %w", err) + } + + // Copy the proof to avoid holding reference to the gRPC response buffer + return ledger.Proof(append([]byte{}, resp.Proof...)), nil +} + +// Ready returns a channel that is closed when the client is ready. +// For a remote client, this waits for the ledger service to be ready by +// calling InitialState() with retries to ensure the service has finished initialization. +func (c *Client) Ready() <-chan struct{} { + ready := make(chan struct{}) + go func() { + defer close(ready) + // Wait for the ledger service to be ready by calling InitialState() + // This ensures the service has finished WAL replay and is ready to serve requests + // Retry with exponential backoff (delay capped at 30s) + maxRetries := 30 + retryDelay := 100 * time.Millisecond + maxRetryDelay := 30 * time.Second + + for i := 0; i < maxRetries; i++ { + ctx, cancel := c.callCtx() + _, err := c.client.InitialState(ctx, &emptypb.Empty{}) + cancel() + if err == nil { + c.logger.Info().Msg("ledger service ready") + return + } + + // Check if the client context was cancelled (shutdown in progress) + if c.ctx.Err() != nil { + c.logger.Info().Msg("client shutdown during ready check") + return + } + + if i < maxRetries-1 { + c.logger.Warn(). + Err(err). + Int("attempt", i+1). + Dur("retry_delay", retryDelay). + Time("retry_at", time.Now().Add(retryDelay)). + Msg("ledger service not ready, retrying...") + time.Sleep(retryDelay) + retryDelay = min(time.Duration(float64(retryDelay)*1.5), maxRetryDelay) + } else { + c.logger.Warn().Err(err).Msg("ledger service not ready after retries, proceeding anyway") + // Still close the channel to avoid blocking forever + // The execution node will fail later with a more specific error if the service is truly not ready + } + } + }() + return ready +} + +// Done returns a channel that is closed when the client is done. +// This cancels any in-flight gRPC calls and closes the connection. +// The method is idempotent - multiple calls return the same channel. +func (c *Client) Done() <-chan struct{} { + c.once.Do(func() { + go func() { + defer close(c.done) + // Cancel context first to abort any in-flight calls + c.cancel() + if err := c.Close(); err != nil { + c.logger.Error().Err(err).Msg("error closing gRPC connection") + } + }() + }) + return c.done +} + +// StateCount returns the number of states in the ledger. +// This is not supported for remote clients as it requires gRPC methods that are not yet implemented. +func (c *Client) StateCount() int { + // Remote client doesn't have access to state count without additional gRPC methods + // Return 0 to indicate no states are available (or unknown) + // This will cause the health check to fail, which is appropriate + return 0 +} + +// StateByIndex returns the state at the given index. +// -1 is the last index. +// This is not supported for remote clients as it requires gRPC methods that are not yet implemented. +func (c *Client) StateByIndex(index int) (ledger.State, error) { + return ledger.DummyState, fmt.Errorf("StateByIndex is not supported for remote ledger clients") +} + +// ledgerKeyToProtoKey converts a ledger.Key to a protobuf Key. +func ledgerKeyToProtoKey(key ledger.Key) *ledgerpb.Key { + parts := make([]*ledgerpb.KeyPart, len(key.KeyParts)) + for i, part := range key.KeyParts { + parts[i] = &ledgerpb.KeyPart{ + Type: uint32(part.Type), + Value: part.Value, + } + } + return &ledgerpb.Key{ + Parts: parts, + } +} diff --git a/ledger/remote/client_test.go b/ledger/remote/client_test.go new file mode 100644 index 00000000000..1401de77151 --- /dev/null +++ b/ledger/remote/client_test.go @@ -0,0 +1,74 @@ +package remote + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestClientSetEmptyUpdate verifies that the Client.Set method handles empty updates +// correctly without making an RPC call. This matches the behavior of the local ledger +// implementations (ledger/complete/ledger.go and ledger/partial/ledger.go). +// +// When a transaction/collection doesn't modify any registers (read-only), the update +// will have zero keys. The client should return the same state without making an RPC +// call, avoiding the "keys cannot be empty" error from the server. +func TestClientSetEmptyUpdate(t *testing.T) { + // Create an empty update (no keys, no values) + state := ledger.State(unittest.StateCommitmentFixture()) + update, err := ledger.NewUpdate(state, []ledger.Key{}, []ledger.Value{}) + require.NoError(t, err) + require.Equal(t, 0, update.Size(), "update should have zero keys") + + // Create a client with no actual connection (we shouldn't need it for empty updates) + // The client will panic if it tries to make an RPC call, which verifies we don't call the server + client := &Client{ + // All fields are nil/zero - any RPC call would panic + } + + // Call Set with empty update + newState, trieUpdate, err := client.Set(update) + + // Verify no error + require.NoError(t, err, "Set with empty update should not return error") + + // Verify the state is unchanged + assert.Equal(t, state, newState, "new state should equal original state for empty update") + + // Verify the trie update is valid but empty + require.NotNil(t, trieUpdate, "trie update should not be nil") + assert.Equal(t, ledger.RootHash(state), trieUpdate.RootHash, "trie update root hash should match state") + assert.Empty(t, trieUpdate.Paths, "trie update should have no paths for empty update") + assert.Empty(t, trieUpdate.Payloads, "trie update should have no payloads for empty update") +} + +// TestClientSetEmptyUpdateMatchesLocalLedger verifies that the remote client's +// empty update handling produces the same result as the local ledger implementations. +func TestClientSetEmptyUpdateMatchesLocalLedger(t *testing.T) { + state := ledger.State(unittest.StateCommitmentFixture()) + update, err := ledger.NewUpdate(state, []ledger.Key{}, []ledger.Value{}) + require.NoError(t, err) + + // Get result from remote client (without actual connection) + client := &Client{} + remoteState, remoteTrieUpdate, err := client.Set(update) + require.NoError(t, err) + + // The expected result matches what local ledger implementations return: + // - State unchanged + // - TrieUpdate with RootHash equal to state, empty Paths and Payloads + expectedTrieUpdate := &ledger.TrieUpdate{ + RootHash: ledger.RootHash(state), + Paths: []ledger.Path{}, + Payloads: []*ledger.Payload{}, + } + + assert.Equal(t, state, remoteState, "state should be unchanged") + assert.Equal(t, expectedTrieUpdate.RootHash, remoteTrieUpdate.RootHash) + assert.Equal(t, len(expectedTrieUpdate.Paths), len(remoteTrieUpdate.Paths)) + assert.Equal(t, len(expectedTrieUpdate.Payloads), len(remoteTrieUpdate.Payloads)) +} diff --git a/ledger/remote/encoding.go b/ledger/remote/encoding.go new file mode 100644 index 00000000000..8a09a30e523 --- /dev/null +++ b/ledger/remote/encoding.go @@ -0,0 +1,33 @@ +package remote + +import ( + "github.com/onflow/flow-go/ledger" +) + +// encodeTrieUpdateForTransport encodes a trie update for transmission over gRPC. +// This function MUST be used by the server when encoding trie updates. +// The client MUST use decodeTrieUpdateFromTransport to decode. +// +// This centralized function ensures that both client and server use the same +// encoding method, preventing encoding/decoding mismatches. +// +// Currently uses CBOR encoding to preserve the distinction between nil and []byte{} +// values in payloads. If the encoding method needs to change, update this function +// and ensure decodeTrieUpdateFromTransport uses the matching decoder. +func encodeTrieUpdateForTransport(trieUpdate *ledger.TrieUpdate) []byte { + return ledger.EncodeTrieUpdateCBOR(trieUpdate) +} + +// decodeTrieUpdateFromTransport decodes a trie update received over gRPC. +// This function MUST be used by the client when decoding trie updates. +// The server MUST use encodeTrieUpdateForTransport to encode. +// +// This centralized function ensures that both client and server use the same +// encoding method, preventing encoding/decoding mismatches. +// +// Currently uses CBOR decoding to preserve the distinction between nil and []byte{} +// values in payloads. If the encoding method needs to change, update this function +// and ensure encodeTrieUpdateForTransport uses the matching encoder. +func decodeTrieUpdateFromTransport(encodedTrieUpdate []byte) (*ledger.TrieUpdate, error) { + return ledger.DecodeTrieUpdateCBOR(encodedTrieUpdate) +} diff --git a/ledger/remote/factory.go b/ledger/remote/factory.go new file mode 100644 index 00000000000..d7e5ad89b98 --- /dev/null +++ b/ledger/remote/factory.go @@ -0,0 +1,46 @@ +package remote + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/ledger" +) + +// RemoteLedgerFactory creates remote ledger instances via gRPC. +type RemoteLedgerFactory struct { + grpcAddr string + logger zerolog.Logger + maxRequestSize uint + maxResponseSize uint +} + +// NewRemoteLedgerFactory creates a new factory for remote ledger instances. +// maxRequestSize and maxResponseSize specify the maximum message sizes in bytes. +// If both are 0, defaults to 1 GiB for both requests and responses. +func NewRemoteLedgerFactory( + grpcAddr string, + logger zerolog.Logger, + maxRequestSize, maxResponseSize uint, +) ledger.Factory { + return &RemoteLedgerFactory{ + grpcAddr: grpcAddr, + logger: logger, + maxRequestSize: maxRequestSize, + maxResponseSize: maxResponseSize, + } +} + +func (f *RemoteLedgerFactory) NewLedger() (ledger.Ledger, error) { + var opts []ClientOption + if f.maxRequestSize > 0 { + opts = append(opts, WithMaxRequestSize(f.maxRequestSize)) + } + if f.maxResponseSize > 0 { + opts = append(opts, WithMaxResponseSize(f.maxResponseSize)) + } + client, err := NewClient(f.grpcAddr, f.logger, opts...) + if err != nil { + return nil, err + } + return client, nil +} diff --git a/ledger/remote/protobuf_encoding_test.go b/ledger/remote/protobuf_encoding_test.go new file mode 100644 index 00000000000..2b519777b92 --- /dev/null +++ b/ledger/remote/protobuf_encoding_test.go @@ -0,0 +1,257 @@ +package remote + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + ledgerpb "github.com/onflow/flow-go/ledger/protobuf" +) + +// TestProtobufNilVsEmptySlice demonstrates that protobuf cannot distinguish +// between nil and []byte{} after encoding/decoding. +// +// This test shows the root cause of the execution_data_id mismatch: +// - When a client sends []byte{} (empty slice), protobuf encodes it +// - When the server decodes it, it becomes nil +// - The server cannot distinguish between originally nil vs originally []byte{} +// - This causes different CBOR encodings (f6 for nil vs 40 for []byte{}) +// - Leading to different execution_data_id values +// +// Note: In practice, gRPC handles protobuf encoding/decoding automatically. +// This test simulates what happens by directly checking protobuf message behavior. +func TestProtobufNilVsEmptySlice(t *testing.T) { + // Create three different values that should be distinguishable + values := []struct { + name string + input []byte + }{ + {"nil_value", nil}, + {"empty_slice", []byte{}}, + {"non_empty_slice", []byte{1, 2, 3}}, + } + + // Create protobuf messages with these values (what the client does) + protoValues := make([]*ledgerpb.Value, len(values)) + for i, v := range values { + protoValues[i] = &ledgerpb.Value{ + Data: v.input, + } + } + + // Verify the original distinction on the client side + t.Log("Client side - Original values before protobuf encoding:") + for i, v := range values { + if v.input == nil { + t.Logf(" [%d] %s: nil (len=%d, isNil=%v)", i, v.name, 0, true) + } else { + t.Logf(" [%d] %s: []byte{} (len=%d, isNil=%v)", i, v.name, len(v.input), false) + } + } + + // Simulate what happens: when protobuf encodes []byte{}, it becomes + // indistinguishable from nil on the wire. When decoded, both become nil. + // This is what the server sees after gRPC decodes the protobuf message. + t.Log("\nServer side - What server receives after protobuf encoding/decoding:") + t.Log(" (In real gRPC, this happens automatically during transmission)") + + // The key insight: protobuf treats empty bytes fields as optional + // When []byte{} is encoded and decoded, it becomes nil + // We can simulate this by checking what happens when we set Data to []byte{} + serverSeesTypes := make(map[string]int) + for i, protoValue := range protoValues { + // In protobuf, when Data is []byte{}, after encoding/decoding it becomes nil + // This is the behavior we're testing + var typeStr string + var serverSees []byte + + // Simulate protobuf behavior: empty slice becomes nil after round-trip + if protoValue.Data == nil { + serverSees = nil + typeStr = "NIL" + } else if len(protoValue.Data) == 0 { + // This is the problem: []byte{} becomes nil after protobuf round-trip + serverSees = nil + typeStr = "NIL" // Lost distinction! + } else { + serverSees = protoValue.Data + typeStr = "NON_EMPTY" + } + + serverSeesTypes[typeStr]++ + t.Logf(" [%d] %s: server sees %s (len=%d, isNil=%v)", + i, values[i].name, typeStr, len(serverSees), serverSees == nil) + } + + // The critical assertion: we expected 3 distinct types, but protobuf only gives us 2 + // - nil and []byte{} both become nil (indistinguishable) + // - Only non-empty slices remain distinct + t.Logf("\nDistinct types server can distinguish: %d (expected 2, not 3)", len(serverSeesTypes)) + for k, v := range serverSeesTypes { + t.Logf(" %s: %d occurrences", k, v) + } + + // Verify that nil and empty slice both become nil after protobuf round-trip + // This is the core issue: the server cannot distinguish them + assert.Equal(t, 2, len(serverSeesTypes), + "Expected 2 distinct types after protobuf round-trip (NIL and NON_EMPTY), "+ + "but got %d. This proves protobuf loses the nil vs []byte{} distinction.", + len(serverSeesTypes)) + + assert.Equal(t, 2, serverSeesTypes["NIL"], + "Both nil and []byte{} become NIL on the server (lost distinction)") + assert.Equal(t, 1, serverSeesTypes["NON_EMPTY"], + "Only non-empty slice remains distinguishable") +} + +// TestProtobufEncodingDemonstratesIssue demonstrates the issue in the context +// of how it affects the ledger service. +func TestProtobufEncodingDemonstratesIssue(t *testing.T) { + // Simulate what happens when CommitDelta sends values to remote ledger + originalValues := []struct { + name string + value []byte + }{ + {"nil_from_local_ledger", nil}, + {"empty_slice_from_local_ledger", []byte{}}, + {"non_empty_value", []byte{1, 2, 3}}, + } + + // Step 1: Client creates protobuf messages (what LedgerClient does in client.go:172-174) + protoValues := make([]*ledgerpb.Value, len(originalValues)) + for i, v := range originalValues { + protoValues[i] = &ledgerpb.Value{ + Data: v.value, + } + } + + t.Log("Step 1 - Client creates protobuf messages:") + for i, v := range originalValues { + t.Logf(" [%d] %s: Data=%v (len=%d, isNil=%v)", + i, v.name, protoValues[i].Data, len(protoValues[i].Data), protoValues[i].Data == nil) + } + + // Step 2: gRPC automatically encodes protobuf (happens over the wire) + // In protobuf, empty bytes fields are optional and can be represented as nil + // When []byte{} is encoded, it becomes an empty bytes field + // When decoded, empty bytes fields become nil + + // Step 3: Server receives and decodes (what LedgerService does) + // After gRPC decodes, both nil and []byte{} become nil + t.Log("\nStep 2-3 - After gRPC encoding/decoding (what server sees):") + serverSeesTypes := make(map[string]int) + for i, protoValue := range protoValues { + // Simulate what gRPC/protobuf does: []byte{} becomes nil after round-trip + var serverSees []byte + var typeStr string + + if protoValue.Data == nil { + serverSees = nil + typeStr = "NIL" + } else if len(protoValue.Data) == 0 { + // This is the problem: []byte{} becomes nil after protobuf round-trip + serverSees = nil + typeStr = "NIL" // Lost distinction! + } else { + serverSees = protoValue.Data + typeStr = "NON_EMPTY" + } + + serverSeesTypes[typeStr]++ + t.Logf(" [%d] %s: server sees %s (len=%d, isNil=%v)", + i, originalValues[i].name, typeStr, len(serverSees), serverSees == nil) + } + + // The problem: server can only distinguish 2 types, not 3 + assert.Equal(t, 2, len(serverSeesTypes), + "Server can only distinguish 2 types (NIL and NON_EMPTY), "+ + "not 3 (nil, []byte{}, non-empty). The nil vs []byte{} distinction is lost.") + + // This is why normalization won't work - the server can't tell which was which + t.Log("\nConclusion:") + t.Log(" - Client sends: nil, []byte{}, [1,2,3]") + t.Log(" - Server receives: nil, nil, [1,2,3]") + t.Log(" - Server cannot distinguish between originally nil vs originally []byte{}") + t.Log(" - This causes different TrieUpdate structures and different execution_data_id") + t.Log(" - The fix must happen at CBOR encoding level, not at protobuf level") +} + +// TestProtobufIsNilFieldPreservesDistinction verifies that the IsNil field +// allows the server to distinguish between nil and []byte{} after protobuf round-trip. +func TestProtobufIsNilFieldPreservesDistinction(t *testing.T) { + // Create three different values that should be distinguishable + originalValues := []struct { + name string + value []byte + }{ + {"nil_value", nil}, + {"empty_slice", []byte{}}, + {"non_empty_slice", []byte{1, 2, 3}}, + } + + // Step 1: Client creates protobuf messages with IsNil field (what LedgerClient does) + protoValues := make([]*ledgerpb.Value, len(originalValues)) + for i, v := range originalValues { + isNil := v.value == nil + protoValues[i] = &ledgerpb.Value{ + Data: v.value, + IsNil: isNil, + } + } + + t.Log("Step 1 - Client creates protobuf messages with IsNil field:") + for i, v := range originalValues { + t.Logf(" [%d] %s: Data=%v (len=%d, isNil=%v, IsNil=%v)", + i, v.name, protoValues[i].Data, len(protoValues[i].Data), + protoValues[i].Data == nil, protoValues[i].IsNil) + } + + // Step 2-3: Simulate protobuf encoding/decoding (gRPC does this automatically) + // After protobuf round-trip, Data becomes nil for both nil and []byte{} + // But IsNil field is preserved! + t.Log("\nStep 2-3 - After protobuf encoding/decoding:") + serverReconstructsTypes := make(map[string]int) + for i, protoValue := range protoValues { + // Simulate what happens: Data becomes nil, but IsNil is preserved + var serverSees []byte + var typeStr string + + // Simulate protobuf behavior: empty Data becomes nil after round-trip + if len(protoValue.Data) == 0 { + // Use IsNil to reconstruct original value type + if protoValue.IsNil { + serverSees = nil + typeStr = "NIL" + } else { + serverSees = []byte{} // Reconstruct empty slice + typeStr = "EMPTY_SLICE" + } + } else { + serverSees = protoValue.Data + typeStr = "NON_EMPTY" + } + + serverReconstructsTypes[typeStr]++ + t.Logf(" [%d] %s: server reconstructs %s (len=%d, isNil=%v, IsNil=%v)", + i, originalValues[i].name, typeStr, len(serverSees), serverSees == nil, protoValue.IsNil) + } + + // The fix: server can now distinguish all 3 types! + assert.Equal(t, 3, len(serverReconstructsTypes), + "With IsNil field, server can distinguish 3 types (NIL, EMPTY_SLICE, NON_EMPTY), "+ + "not just 2. The nil vs []byte{} distinction is preserved!") + + assert.Equal(t, 1, serverReconstructsTypes["NIL"], + "nil value is correctly identified as NIL") + assert.Equal(t, 1, serverReconstructsTypes["EMPTY_SLICE"], + "empty slice []byte{} is correctly identified as EMPTY_SLICE (distinction preserved!)") + assert.Equal(t, 1, serverReconstructsTypes["NON_EMPTY"], + "non-empty slice remains distinguishable") + + t.Log("\nConclusion:") + t.Log(" - Client sends: nil (IsNil=true), []byte{} (IsNil=false), [1,2,3] (IsNil=false)") + t.Log(" - After protobuf: Data becomes nil for both nil and []byte{}") + t.Log(" - Server uses IsNil to reconstruct: nil, []byte{}, [1,2,3]") + t.Log(" - All 3 types are now distinguishable!") + t.Log(" - This preserves the distinction needed for deterministic CBOR encoding") +} diff --git a/ledger/remote/service.go b/ledger/remote/service.go new file mode 100644 index 00000000000..b98bdb245a4 --- /dev/null +++ b/ledger/remote/service.go @@ -0,0 +1,252 @@ +package remote + +import ( + "context" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/onflow/flow-go/ledger" + ledgerpb "github.com/onflow/flow-go/ledger/protobuf" +) + +// Service implements the gRPC LedgerService interface +type Service struct { + ledgerpb.UnimplementedLedgerServiceServer + ledger ledger.Ledger + logger zerolog.Logger +} + +// NewService creates a new ledger service +func NewService(l ledger.Ledger, logger zerolog.Logger) *Service { + return &Service{ + ledger: l, + logger: logger, + } +} + +// InitialState returns the initial state of the ledger +func (s *Service) InitialState(ctx context.Context, req *emptypb.Empty) (*ledgerpb.StateResponse, error) { + state := s.ledger.InitialState() + return &ledgerpb.StateResponse{ + State: &ledgerpb.State{ + Hash: state[:], + }, + }, nil +} + +// HasState checks if the given state exists in the ledger +func (s *Service) HasState(ctx context.Context, req *ledgerpb.StateRequest) (*ledgerpb.HasStateResponse, error) { + if req.State == nil || len(req.State.Hash) != len(ledger.State{}) { + return nil, status.Error(codes.InvalidArgument, "invalid state") + } + + var state ledger.State + copy(state[:], req.State.Hash) + + hasState := s.ledger.HasState(state) + return &ledgerpb.HasStateResponse{ + HasState: hasState, + }, nil +} + +// GetSingleValue returns a single value for a given key at a specific state +func (s *Service) GetSingleValue(ctx context.Context, req *ledgerpb.GetSingleValueRequest) (*ledgerpb.ValueResponse, error) { + if req.State == nil || len(req.State.Hash) != len(ledger.State{}) { + return nil, status.Error(codes.InvalidArgument, "invalid state") + } + + var state ledger.State + copy(state[:], req.State.Hash) + + key, err := protoKeyToLedgerKey(req.Key) + if err != nil { + return nil, err // protoKeyToLedgerKey already returns status.Error + } + + query, err := ledger.NewQuerySingleValue(state, key) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + value, err := s.ledger.GetSingleValue(query) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &ledgerpb.ValueResponse{ + Value: &ledgerpb.Value{ + Data: value, + IsNil: value == nil, + }, + }, nil +} + +// Get returns values for multiple keys at a specific state +func (s *Service) Get(ctx context.Context, req *ledgerpb.GetRequest) (*ledgerpb.GetResponse, error) { + if req.State == nil || len(req.State.Hash) != len(ledger.State{}) { + return nil, status.Error(codes.InvalidArgument, "invalid state") + } + + if len(req.Keys) == 0 { + return nil, status.Error(codes.InvalidArgument, "keys cannot be empty") + } + + var state ledger.State + copy(state[:], req.State.Hash) + + keys := make([]ledger.Key, len(req.Keys)) + for i, protoKey := range req.Keys { + key, err := protoKeyToLedgerKey(protoKey) + if err != nil { + return nil, err // protoKeyToLedgerKey already returns status.Error + } + keys[i] = key + } + + query, err := ledger.NewQuery(state, keys) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + values, err := s.ledger.Get(query) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + protoValues := make([]*ledgerpb.Value, len(values)) + for i, v := range values { + protoValues[i] = &ledgerpb.Value{ + Data: v, + IsNil: v == nil, + } + } + + return &ledgerpb.GetResponse{ + Values: protoValues, + }, nil +} + +// Set updates keys with new values at a specific state and returns the new state +func (s *Service) Set(ctx context.Context, req *ledgerpb.SetRequest) (*ledgerpb.SetResponse, error) { + if req.State == nil || len(req.State.Hash) != len(ledger.State{}) { + return nil, status.Error(codes.InvalidArgument, "invalid state") + } + + if len(req.Keys) == 0 { + return nil, status.Error(codes.InvalidArgument, "keys cannot be empty") + } + + if len(req.Keys) != len(req.Values) { + return nil, status.Error(codes.InvalidArgument, "keys and values length mismatch") + } + + var state ledger.State + copy(state[:], req.State.Hash) + + keys := make([]ledger.Key, len(req.Keys)) + for i, protoKey := range req.Keys { + key, err := protoKeyToLedgerKey(protoKey) + if err != nil { + return nil, err // protoKeyToLedgerKey already returns status.Error + } + keys[i] = key + } + + values := make([]ledger.Value, len(req.Values)) + for i, protoValue := range req.Values { + var value ledger.Value + // Reconstruct the original value type using is_nil flag + // This preserves the distinction between nil and []byte{} that protobuf loses + if len(protoValue.Data) == 0 { + if protoValue.IsNil { + // Original value was nil + value = nil + } else { + // Original value was []byte{} (empty slice) + value = ledger.Value([]byte{}) + } + } else { + // Non-empty value, use data as-is + value = ledger.Value(protoValue.Data) + } + values[i] = value + } + + update, err := ledger.NewUpdate(state, keys, values) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + newState, trieUpdate, err := s.ledger.Set(update) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + // Encode trie update using centralized encoding function to ensure + // client and server use the same encoding method + trieUpdateBytes := encodeTrieUpdateForTransport(trieUpdate) + + return &ledgerpb.SetResponse{ + NewState: &ledgerpb.State{ + Hash: newState[:], + }, + TrieUpdate: trieUpdateBytes, + }, nil +} + +// Prove returns proofs for the given keys at a specific state +func (s *Service) Prove(ctx context.Context, req *ledgerpb.ProveRequest) (*ledgerpb.ProofResponse, error) { + if req.State == nil || len(req.State.Hash) != len(ledger.State{}) { + return nil, status.Error(codes.InvalidArgument, "invalid state") + } + + if len(req.Keys) == 0 { + return nil, status.Error(codes.InvalidArgument, "keys cannot be empty") + } + + var state ledger.State + copy(state[:], req.State.Hash) + + keys := make([]ledger.Key, len(req.Keys)) + for i, protoKey := range req.Keys { + key, err := protoKeyToLedgerKey(protoKey) + if err != nil { + return nil, err // protoKeyToLedgerKey already returns status.Error + } + keys[i] = key + } + + query, err := ledger.NewQuery(state, keys) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + proof, err := s.ledger.Prove(query) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &ledgerpb.ProofResponse{ + Proof: proof, + }, nil +} + +// protoKeyToLedgerKey converts a protobuf Key to a ledger.Key +func protoKeyToLedgerKey(protoKey *ledgerpb.Key) (ledger.Key, error) { + if protoKey == nil { + return ledger.Key{}, status.Error(codes.InvalidArgument, "key is nil") + } + + keyParts := make([]ledger.KeyPart, len(protoKey.Parts)) + for i, part := range protoKey.Parts { + if part.Type > 65535 { + return ledger.Key{}, status.Error(codes.InvalidArgument, "key part type exceeds uint16") + } + keyParts[i] = ledger.NewKeyPart(uint16(part.Type), part.Value) + } + + return ledger.NewKey(keyParts), nil +} diff --git a/ledger/trie_encoder.go b/ledger/trie_encoder.go index 442bb46e28a..d7bc6f98438 100644 --- a/ledger/trie_encoder.go +++ b/ledger/trie_encoder.go @@ -3,6 +3,8 @@ package ledger import ( "fmt" + "github.com/fxamacker/cbor/v2" + "github.com/onflow/flow-go/ledger/common/bitutils" "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/common/utils" @@ -540,6 +542,12 @@ func decodePayload(inp []byte, zeroCopy bool, version uint16) (*Payload, error) return nil, fmt.Errorf("error decoding payload: %w", err) } + // Normalize nil to empty slice for deterministic CBOR serialization + // ReadSlice returns nil when size is 0, but we need []byte{} for consistency + if encValue == nil { + encValue = []byte{} + } + if zeroCopy { return &Payload{encKey, encValue}, nil } @@ -688,6 +696,39 @@ func decodeTrieUpdate(inp []byte, version uint16) (*TrieUpdate, error) { return &TrieUpdate{RootHash: rh, Paths: paths, Payloads: payloads}, nil } +// EncodeTrieUpdateCBOR encodes a trie update struct using CBOR encoding. +// CBOR encoding preserves the distinction between nil and []byte{} values in payloads +// because Payload has MarshalCBOR/UnmarshalCBOR methods that handle this correctly. +func EncodeTrieUpdateCBOR(t *TrieUpdate) []byte { + if t == nil { + return []byte{} + } + + encoded, err := cbor.Marshal(t) + if err != nil { + // This should not happen in normal operation + panic(fmt.Errorf("failed to encode trie update with CBOR: %w", err)) + } + + return encoded +} + +// DecodeTrieUpdateCBOR constructs a trie update from a CBOR-encoded byte slice. +// CBOR encoding preserves the distinction between nil and []byte{} values in payloads +// because Payload has MarshalCBOR/UnmarshalCBOR methods that handle this correctly. +func DecodeTrieUpdateCBOR(encodedTrieUpdate []byte) (*TrieUpdate, error) { + if len(encodedTrieUpdate) == 0 { + return nil, nil + } + + var tu TrieUpdate + if err := cbor.Unmarshal(encodedTrieUpdate, &tu); err != nil { + return nil, fmt.Errorf("error decoding trie update with CBOR: %w", err) + } + + return &tu, nil +} + // EncodeTrieProof encodes the content of a proof into a byte slice func EncodeTrieProof(p *TrieProof) []byte { if p == nil { diff --git a/ledger/trie_encoder_test.go b/ledger/trie_encoder_test.go index c8b667e8e96..b69296a3f4a 100644 --- a/ledger/trie_encoder_test.go +++ b/ledger/trie_encoder_test.go @@ -5,11 +5,13 @@ import ( "encoding/hex" "testing" + "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/common/testutils" + ledgerpb "github.com/onflow/flow-go/ledger/protobuf" ) // TestKeyPartSerialization tests encoding and decoding functionality of a ledger key part @@ -618,8 +620,7 @@ func TestTrieUpdateSerialization(t *testing.T) { } // TestTrieUpdateNilVsEmptySlice verifies that EncodeTrieUpdate/DecodeTrieUpdate -// for payloads created with nil vs empty []byte values results in both being treated -// as empty []byte{} after decoding, due to normalization in NewPayload. +// does not distinguish between nil and []byte{} values in payloads. func TestTrieUpdateNilVsEmptySlice(t *testing.T) { p1 := testutils.PathByUint16(1) kp1 := ledger.NewKeyPart(uint16(1), []byte("key 1")) @@ -640,9 +641,8 @@ func TestTrieUpdateNilVsEmptySlice(t *testing.T) { } // Step 1: Verify original distinction - require.Nil(t, tu.Payloads[0].Value(), "Payload 0 should have nil value (we don't normalize at creation time, only encoding time)") + require.Nil(t, tu.Payloads[0].Value(), "Payload 0 should have nil value") require.NotNil(t, tu.Payloads[1].Value(), "Payload 1 should have non-nil value") - require.Equal(t, 0, len(tu.Payloads[0].Value()), "Payload 0 should have 0 length") require.Equal(t, 0, len(tu.Payloads[1].Value()), "Payload 1 should have 0 length") // Step 2: Encode and Decode @@ -650,7 +650,10 @@ func TestTrieUpdateNilVsEmptySlice(t *testing.T) { decoded, err := ledger.DecodeTrieUpdate(encoded) require.NoError(t, err) - // Both will be []byte{} after decode due to normalization in NewPayload. + // Step 3: Verify distinction is lost + // Both will be []byte{} after decode due to normalization in decodePayload. + // Even if we removed the normalization, both would be nil because ReadSlice(0) returns nil. + // This proves EncodeTrieUpdate/DecodeTrieUpdate does not distinguish between nil and []byte{}. t.Logf("Decoded Payload 0 value: %v (isNil=%v)", decoded.Payloads[0].Value(), decoded.Payloads[0].Value() == nil) t.Logf("Decoded Payload 1 value: %v (isNil=%v)", decoded.Payloads[1].Value(), decoded.Payloads[1].Value() == nil) @@ -659,5 +662,222 @@ func TestTrieUpdateNilVsEmptySlice(t *testing.T) { // The key assertion: they are now identical despite starting differently require.Equal(t, decoded.Payloads[0].Value(), decoded.Payloads[1].Value(), - "Decoded nil and []byte{} are identical") + "Decoded nil and []byte{} are now identical (loss of distinction)") +} + +// TestTrieUpdateCBORNilVsEmptySlice verifies that EncodeTrieUpdateCBOR/DecodeTrieUpdateCBOR +// does not distinction between nil and []byte{} values in payloads after normalization +func TestTrieUpdateCBORNilVsEmptySlice(t *testing.T) { + t.Parallel() + p1 := testutils.PathByUint16(1) + kp1 := ledger.NewKeyPart(uint16(1), []byte("key 1")) + k1 := ledger.NewKey([]ledger.KeyPart{kp1}) + // Original value is nil + pl1 := ledger.NewPayload(k1, nil) + + p2 := testutils.PathByUint16(2) + kp2 := ledger.NewKeyPart(uint16(1), []byte("key 2")) + k2 := ledger.NewKey([]ledger.KeyPart{kp2}) + // Original value is []byte{} + pl2 := ledger.NewPayload(k2, []byte{}) + + tu := &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: []ledger.Path{p1, p2}, + Payloads: []*ledger.Payload{pl1, pl2}, + } + + // Step 1: Verify original distinction + require.Nil(t, tu.Payloads[0].Value(), "Payload 0 should have nil value") + require.NotNil(t, tu.Payloads[1].Value(), "Payload 1 should have non-nil value") + require.Equal(t, 0, len(tu.Payloads[1].Value()), "Payload 1 should have 0 length") + + // Step 2: Encode and Decode using CBOR + encoded := ledger.EncodeTrieUpdateCBOR(tu) + decoded, err := ledger.DecodeTrieUpdateCBOR(encoded) + require.NoError(t, err) + + // Step 3: Verify distinction is preserved with CBOR encoding + t.Logf("Decoded Payload 0 value: %v (isNil=%v)", decoded.Payloads[0].Value(), decoded.Payloads[0].Value() == nil) + t.Logf("Decoded Payload 1 value: %v (isNil=%v)", decoded.Payloads[1].Value(), decoded.Payloads[1].Value() == nil) + + // Verify that []byte{} is preserved as []byte{} + require.NotNil(t, decoded.Payloads[0].Value(), "Decoded Payload 0 should have nil value") + require.Equal(t, 0, len(decoded.Payloads[0].Value()), "Decoded Payload 0 should have 0 length") + require.NotNil(t, decoded.Payloads[1].Value(), "Decoded Payload 1 should have non-nil value") + require.Equal(t, 0, len(decoded.Payloads[1].Value()), "Decoded Payload 1 should have 0 length") + + // The key assertion: they should be the same after encoding/decoding due to normalization in encoding + require.Equal(t, decoded.Payloads[0].Value(), decoded.Payloads[1].Value(), + "Decoded nil and []byte{} should remain distinct with CBOR encoding") +} + +// TestTrieUpdateCBORRoundTrip tests that EncodeTrieUpdateCBOR and DecodeTrieUpdateCBOR +// correctly round-trip various TrieUpdate configurations. +func TestTrieUpdateCBORRoundTrip(t *testing.T) { + t.Parallel() + t.Run("empty trie update", func(t *testing.T) { + t.Parallel() + tu := &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: []ledger.Path{}, + Payloads: []*ledger.Payload{}, + } + + encoded := ledger.EncodeTrieUpdateCBOR(tu) + decoded, err := ledger.DecodeTrieUpdateCBOR(encoded) + require.NoError(t, err) + require.True(t, tu.Equals(decoded)) + }) + + t.Run("single payload with nil value", func(t *testing.T) { + t.Parallel() + p1 := testutils.PathByUint16(1) + kp1 := ledger.NewKeyPart(uint16(1), []byte("key 1")) + k1 := ledger.NewKey([]ledger.KeyPart{kp1}) + pl1 := ledger.NewPayload(k1, nil) + + tu := &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: []ledger.Path{p1}, + Payloads: []*ledger.Payload{pl1}, + } + + encoded := ledger.EncodeTrieUpdateCBOR(tu) + decoded, err := ledger.DecodeTrieUpdateCBOR(encoded) + require.NoError(t, err) + require.True(t, tu.Equals(decoded)) + require.NotNil(t, decoded.Payloads[0].Value(), "Nil value should be normalized to empty slice") + }) + + t.Run("single payload with empty slice value", func(t *testing.T) { + t.Parallel() + p1 := testutils.PathByUint16(1) + kp1 := ledger.NewKeyPart(uint16(1), []byte("key 1")) + k1 := ledger.NewKey([]ledger.KeyPart{kp1}) + pl1 := ledger.NewPayload(k1, []byte{}) + + tu := &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: []ledger.Path{p1}, + Payloads: []*ledger.Payload{pl1}, + } + + encoded := ledger.EncodeTrieUpdateCBOR(tu) + decoded, err := ledger.DecodeTrieUpdateCBOR(encoded) + require.NoError(t, err) + require.True(t, tu.Equals(decoded)) + require.NotNil(t, decoded.Payloads[0].Value(), "Empty slice value should be preserved as non-nil") + require.Equal(t, 0, len(decoded.Payloads[0].Value()), "Empty slice should have 0 length") + }) + + t.Run("multiple payloads with mixed nil and non-nil values", func(t *testing.T) { + t.Parallel() + p1 := testutils.PathByUint16(1) + kp1 := ledger.NewKeyPart(uint16(1), []byte("key 1")) + k1 := ledger.NewKey([]ledger.KeyPart{kp1}) + pl1 := ledger.NewPayload(k1, nil) + + p2 := testutils.PathByUint16(2) + kp2 := ledger.NewKeyPart(uint16(1), []byte("key 2")) + k2 := ledger.NewKey([]ledger.KeyPart{kp2}) + pl2 := ledger.NewPayload(k2, []byte{}) + + p3 := testutils.PathByUint16(3) + kp3 := ledger.NewKeyPart(uint16(1), []byte("key 3")) + k3 := ledger.NewKey([]ledger.KeyPart{kp3}) + pl3 := ledger.NewPayload(k3, []byte{1, 2, 3}) + + tu := &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: []ledger.Path{p1, p2, p3}, + Payloads: []*ledger.Payload{pl1, pl2, pl3}, + } + + encoded := ledger.EncodeTrieUpdateCBOR(tu) + decoded, err := ledger.DecodeTrieUpdateCBOR(encoded) + require.NoError(t, err) + require.True(t, tu.Equals(decoded)) + + // Verify each value type is preserved + require.NotNil(t, decoded.Payloads[0].Value(), "First payload should have no-nil empty slice after normalization") + require.Equal(t, 0, len(decoded.Payloads[0].Value()), "First payload should have 0 length") + require.NotNil(t, decoded.Payloads[1].Value(), "Second payload should have non-nil empty slice") + require.Equal(t, 0, len(decoded.Payloads[1].Value()), "Second payload should have 0 length") + require.NotNil(t, decoded.Payloads[2].Value(), "Third payload should have non-nil value") + require.Equal(t, ledger.Value([]byte{1, 2, 3}), decoded.Payloads[2].Value(), "Third payload should have correct value") + }) + + t.Run("nil trie update", func(t *testing.T) { + t.Parallel() + encoded := ledger.EncodeTrieUpdateCBOR(nil) + require.Equal(t, []byte{}, encoded) + + decoded, err := ledger.DecodeTrieUpdateCBOR([]byte{}) + require.NoError(t, err) + require.Nil(t, decoded) + }) +} + +// TestTrieUpdateEncodingMethodsPreservesValueTypes tests that all three encoding methods +// (EncodeTrieUpdate/DecodeTrieUpdate, EncodeTrieUpdateCBOR/DecodeTrieUpdateCBOR, and protobuf) +// correctly preserve nil, empty slice, and non-empty slice values in payloads after round-trip encoding/decoding. +func TestTrieUpdateEncodingMethodsPreservesValueTypes(t *testing.T) { + t.Parallel() + + // Create a TrieUpdate with three payloads: nil, empty slice, and non-empty slice + p1 := testutils.PathByUint16(1) + kp1 := ledger.NewKeyPart(uint16(1), []byte("key 1")) + k1 := ledger.NewKey([]ledger.KeyPart{kp1}) + pl1 := ledger.NewPayload(k1, nil) // nil value + + p2 := testutils.PathByUint16(2) + kp2 := ledger.NewKeyPart(uint16(1), []byte("key 2")) + k2 := ledger.NewKey([]ledger.KeyPart{kp2}) + pl2 := ledger.NewPayload(k2, []byte{}) // empty slice value + + p3 := testutils.PathByUint16(3) + kp3 := ledger.NewKeyPart(uint16(1), []byte("key 3")) + k3 := ledger.NewKey([]ledger.KeyPart{kp3}) + pl3 := ledger.NewPayload(k3, []byte{1, 2, 3}) // non-empty slice value + + originalTU := &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: []ledger.Path{p1, p2, p3}, + Payloads: []*ledger.Payload{pl1, pl2, pl3}, + } + + // Verify all three methods produce equivalent results for the non-empty value + t.Run("all methods produce equivalent results", func(t *testing.T) { + t.Parallel() + + // Encode using all three methods + encoded1 := ledger.EncodeTrieUpdate(originalTU) + encoded2 := ledger.EncodeTrieUpdateCBOR(originalTU) + trieUpdateBytes := ledger.EncodeTrieUpdateCBOR(originalTU) + setResponse := &ledgerpb.SetResponse{ + TrieUpdate: trieUpdateBytes, + } + encoded3, err := proto.Marshal(setResponse) + require.NoError(t, err) + + // Decode all three + decoded1, err := ledger.DecodeTrieUpdate(encoded1) + require.NoError(t, err) + + decoded2, err := ledger.DecodeTrieUpdateCBOR(encoded2) + require.NoError(t, err) + + var protoDecoded ledgerpb.SetResponse + err = proto.Unmarshal(encoded3, &protoDecoded) + require.NoError(t, err) + decoded3, err := ledger.DecodeTrieUpdateCBOR(protoDecoded.TrieUpdate) + require.NoError(t, err) + + // All three should produce the same non-empty value + require.Equal(t, decoded1.Payloads[2].Value(), decoded2.Payloads[2].Value(), "Method 1 and 2 should produce same non-empty value") + require.Equal(t, decoded2.Payloads[2].Value(), decoded3.Payloads[2].Value(), "Method 2 and 3 should produce same non-empty value") + require.Equal(t, decoded1, decoded2, "EncodeTrieUpdate and EncodeTrieUpdateCBOR should produce same TrieUpdate") + require.Equal(t, decoded2, decoded3, "EncodeTrieUpdateCBOR and EncodeTrieUpdateProtoBuf should produce same TrieUpdate") + }) } diff --git a/module/epochs/epoch_lookup_test.go b/module/epochs/epoch_lookup_test.go index 41e7efc079b..7feeaa32171 100644 --- a/module/epochs/epoch_lookup_test.go +++ b/module/epochs/epoch_lookup_test.go @@ -268,17 +268,24 @@ func (suite *EpochLookupSuite) TestProtocolEvents_EpochExtended_SanityChecks() { FinalView: suite.currEpoch.finalView + 100, } + throwCalled := make(chan struct{}) ctx.On("Throw", mock.AnythingOfType("*errors.errorString")).Run(func(args mock.Arguments) { err, ok := args.Get(0).(error) assert.True(suite.T(), ok) assert.Contains(suite.T(), err.Error(), fmt.Sprintf(invalidEpochViewSequence, extension.FirstView, suite.currEpoch.finalView)) + close(throwCalled) }) suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) // wait for the protocol event to be processed (async) assert.Eventually(suite.T(), func() bool { - return len(suite.lookup.epochEvents) == 0 + select { + case <-throwCalled: + return len(suite.lookup.epochEvents) == 0 + default: + return false + } }, 2*time.Second, 50*time.Millisecond) }) } diff --git a/module/metrics/execution.go b/module/metrics/execution.go index f11543a6d5d..16ef95bf835 100644 --- a/module/metrics/execution.go +++ b/module/metrics/execution.go @@ -12,6 +12,7 @@ import ( ) type ExecutionCollector struct { + *LedgerCollector tracer module.Tracer totalExecutedBlocksCounter prometheus.Counter totalExecutedCollectionsCounter prometheus.Counter @@ -24,24 +25,6 @@ type ExecutionCollector struct { targetChunkDataPackPrunedHeightGauge prometheus.Gauge stateStorageDiskTotal prometheus.Gauge storageStateCommitment prometheus.Gauge - checkpointSize prometheus.Gauge - forestApproxMemorySize prometheus.Gauge - forestNumberOfTrees prometheus.Gauge - latestTrieRegCount prometheus.Gauge - latestTrieRegCountDiff prometheus.Gauge - latestTrieRegSize prometheus.Gauge - latestTrieRegSizeDiff prometheus.Gauge - latestTrieMaxDepthTouched prometheus.Gauge - updated prometheus.Counter - proofSize prometheus.Gauge - updatedValuesNumber prometheus.Counter - updatedValuesSize prometheus.Gauge - updatedDuration prometheus.Histogram - updatedDurationPerValue prometheus.Histogram - readValuesNumber prometheus.Counter - readValuesSize prometheus.Gauge - readDuration prometheus.Histogram - readDurationPerValue prometheus.Histogram blockComputationUsed prometheus.Histogram blockComputationVector *prometheus.GaugeVec blockCachedPrograms prometheus.Gauge @@ -104,129 +87,8 @@ type ExecutionCollector struct { } func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { - - forestApproxMemorySize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "forest_approx_memory_size", - Help: "an approximate size of in-memory forest in bytes", - }) - - forestNumberOfTrees := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "forest_number_of_trees", - Help: "the number of trees in memory", - }) - - latestTrieRegCount := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "latest_trie_reg_count", - Help: "the number of allocated registers (latest created trie)", - }) - - latestTrieRegCountDiff := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "latest_trie_reg_count_diff", - Help: "the difference between number of unique register allocated of the latest created trie and parent trie", - }) - - latestTrieRegSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "latest_trie_reg_size", - Help: "the size of allocated registers (latest created trie)", - }) - - latestTrieRegSizeDiff := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "latest_trie_reg_size_diff", - Help: "the difference between size of unique register allocated of the latest created trie and parent trie", - }) - - latestTrieMaxDepthTouched := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "latest_trie_max_depth_touched", - Help: "the maximum depth touched of the latest created trie", - }) - - updatedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "updates_counted", - Help: "the number of updates", - }) - - proofSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "average_proof_size", - Help: "the average size of a single generated proof in bytes", - }) - - updatedValuesNumber := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "update_values_number", - Help: "the total number of values updated", - }) - - updatedValuesSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "update_values_size", - Help: "the total size of values for single update in bytes", - }) - - updatedDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "update_duration", - Help: "the duration of update operation", - Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, - }) - - updatedDurationPerValue := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "update_duration_per_value", - Help: "the duration of update operation per value", - Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, - }) - - readValuesNumber := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "read_values_number", - Help: "the total number of values read", - }) - - readValuesSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "read_values_size", - Help: "the total size of values for single read in bytes", - }) - - readDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "read_duration", - Help: "the duration of read operation", - Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, - }) - - readDurationPerValue := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, - Name: "read_duration_per_value", - Help: "the duration of read operation per value", - Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, - }) + // Create LedgerCollector with execution namespace and state_storage subsystem for checkpoint + ledgerCollector := NewLedgerCollector(namespaceExecution, subsystemStateStorage) blockExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: namespaceExecution, @@ -549,25 +411,8 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { }) ec := &ExecutionCollector{ - tracer: tracer, - - forestApproxMemorySize: forestApproxMemorySize, - forestNumberOfTrees: forestNumberOfTrees, - latestTrieRegCount: latestTrieRegCount, - latestTrieRegCountDiff: latestTrieRegCountDiff, - latestTrieRegSize: latestTrieRegSize, - latestTrieRegSizeDiff: latestTrieRegSizeDiff, - latestTrieMaxDepthTouched: latestTrieMaxDepthTouched, - updated: updatedCount, - proofSize: proofSize, - updatedValuesNumber: updatedValuesNumber, - updatedValuesSize: updatedValuesSize, - updatedDuration: updatedDuration, - updatedDurationPerValue: updatedDurationPerValue, - readValuesNumber: readValuesNumber, - readValuesSize: readValuesSize, - readDuration: readDuration, - readDurationPerValue: readDurationPerValue, + LedgerCollector: ledgerCollector, + tracer: tracer, blockExecutionTime: blockExecutionTime, blockComputationUsed: blockComputationUsed, blockComputationVector: blockComputationVector, @@ -687,13 +532,6 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Help: "the storage size of a state commitment in bytes", }), - checkpointSize: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemStateStorage, - Name: "checkpoint_size_bytes", - Help: "the size of a checkpoint in bytes", - }), - stateSyncActive: promauto.NewGauge(prometheus.GaugeOpts{ Namespace: namespaceExecution, Subsystem: subsystemIngestion, @@ -929,11 +767,6 @@ func (ec *ExecutionCollector) ExecutionStorageStateCommitment(bytes int64) { ec.storageStateCommitment.Set(float64(bytes)) } -// ExecutionCheckpointSize reports the size of a checkpoint in bytes -func (ec *ExecutionCollector) ExecutionCheckpointSize(bytes uint64) { - ec.checkpointSize.Set(float64(bytes)) -} - // ExecutionLastExecutedBlockHeight reports last executed block height func (ec *ExecutionCollector) ExecutionLastExecutedBlockHeight(height uint64) { ec.lastExecutedBlockHeightGauge.Set(float64(height)) @@ -953,91 +786,6 @@ func (ec *ExecutionCollector) ExecutionTargetChunkDataPackPrunedHeight(height ui ec.targetChunkDataPackPrunedHeightGauge.Set(float64(height)) } -// ForestApproxMemorySize records approximate memory usage of forest (all in-memory trees) -func (ec *ExecutionCollector) ForestApproxMemorySize(bytes uint64) { - ec.forestApproxMemorySize.Set(float64(bytes)) -} - -// ForestNumberOfTrees current number of trees in a forest (in memory) -func (ec *ExecutionCollector) ForestNumberOfTrees(number uint64) { - ec.forestNumberOfTrees.Set(float64(number)) -} - -// LatestTrieRegCount records the number of unique register allocated (the lastest created trie) -func (ec *ExecutionCollector) LatestTrieRegCount(number uint64) { - ec.latestTrieRegCount.Set(float64(number)) -} - -// LatestTrieRegCountDiff records the difference between the number of unique register allocated of the latest created trie and parent trie -func (ec *ExecutionCollector) LatestTrieRegCountDiff(number int64) { - ec.latestTrieRegCountDiff.Set(float64(number)) -} - -// LatestTrieRegSize records the size of unique register allocated (the lastest created trie) -func (ec *ExecutionCollector) LatestTrieRegSize(size uint64) { - ec.latestTrieRegSize.Set(float64(size)) -} - -// LatestTrieRegSizeDiff records the difference between the size of unique register allocated of the latest created trie and parent trie -func (ec *ExecutionCollector) LatestTrieRegSizeDiff(size int64) { - ec.latestTrieRegSizeDiff.Set(float64(size)) -} - -// LatestTrieMaxDepthTouched records the maximum depth touched of the last created trie -func (ec *ExecutionCollector) LatestTrieMaxDepthTouched(maxDepth uint16) { - ec.latestTrieMaxDepthTouched.Set(float64(maxDepth)) -} - -// UpdateCount increase a counter of performed updates -func (ec *ExecutionCollector) UpdateCount() { - ec.updated.Inc() -} - -// ProofSize records a proof size -func (ec *ExecutionCollector) ProofSize(bytes uint32) { - ec.proofSize.Set(float64(bytes)) -} - -// UpdateValuesNumber accumulates number of updated values -func (ec *ExecutionCollector) UpdateValuesNumber(number uint64) { - ec.updatedValuesNumber.Add(float64(number)) -} - -// UpdateValuesSize total size (in bytes) of updates values -func (ec *ExecutionCollector) UpdateValuesSize(bytes uint64) { - ec.updatedValuesSize.Set(float64(bytes)) -} - -// UpdateDuration records absolute time for the update of a trie -func (ec *ExecutionCollector) UpdateDuration(duration time.Duration) { - ec.updatedDuration.Observe(duration.Seconds()) -} - -// UpdateDurationPerItem records update time for single value (total duration / number of updated values) -func (ec *ExecutionCollector) UpdateDurationPerItem(duration time.Duration) { - ec.updatedDurationPerValue.Observe(duration.Seconds()) -} - -// ReadValuesNumber accumulates number of read values -func (ec *ExecutionCollector) ReadValuesNumber(number uint64) { - ec.readValuesNumber.Add(float64(number)) -} - -// ReadValuesSize total size (in bytes) of read values -func (ec *ExecutionCollector) ReadValuesSize(bytes uint64) { - ec.readValuesSize.Set(float64(bytes)) -} - -// ReadDuration records absolute time for the read from a trie -func (ec *ExecutionCollector) ReadDuration(duration time.Duration) { - ec.readDuration.Observe(duration.Seconds()) -} - -// ReadDurationPerItem records read time for single value (total duration / number of read values) -func (ec *ExecutionCollector) ReadDurationPerItem(duration time.Duration) { - ec.readDurationPerValue.Observe(duration.Seconds()) -} - func (ec *ExecutionCollector) ExecutionCollectionRequestSent() { ec.collectionRequestSent.Inc() } diff --git a/module/metrics/ledger.go b/module/metrics/ledger.go new file mode 100644 index 00000000000..0bae9e6ffe6 --- /dev/null +++ b/module/metrics/ledger.go @@ -0,0 +1,252 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" +) + +const ( + namespaceLedger = "ledger" + subsystemWAL = "wal" +) + +// LedgerCollector implements both module.LedgerMetrics and module.WALMetrics. +// It can be reused by both the standalone ledger service and execution nodes. +type LedgerCollector struct { + namespace string + walSubsystem string + // LedgerMetrics + forestApproxMemorySize prometheus.Gauge + forestNumberOfTrees prometheus.Gauge + latestTrieRegCount prometheus.Gauge + latestTrieRegCountDiff prometheus.Gauge + latestTrieRegSize prometheus.Gauge + latestTrieRegSizeDiff prometheus.Gauge + latestTrieMaxDepthTouched prometheus.Gauge + updateCount prometheus.Counter + proofSize prometheus.Gauge + updateValuesNumber prometheus.Counter + updateValuesSize prometheus.Gauge + updateDuration prometheus.Histogram + updateDurationPerItem prometheus.Histogram + readValuesNumber prometheus.Counter + readValuesSize prometheus.Gauge + readDuration prometheus.Histogram + readDurationPerItem prometheus.Histogram + + // WALMetrics + checkpointSize prometheus.Gauge +} + +// NewLedgerCollector creates a new LedgerCollector that implements both +// module.LedgerMetrics and module.WALMetrics interfaces. +// If namespace is empty, it defaults to "ledger". +// If walSubsystem is empty, it defaults to "wal". +func NewLedgerCollector(namespace, walSubsystem string) *LedgerCollector { + if namespace == "" { + namespace = namespaceLedger + } + if walSubsystem == "" { + walSubsystem = subsystemWAL + } + return &LedgerCollector{ + namespace: namespace, + walSubsystem: walSubsystem, + forestApproxMemorySize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "forest_approx_memory_size", + Help: "an approximate size of in-memory forest in bytes", + }), + forestNumberOfTrees: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "forest_number_of_trees", + Help: "the number of trees in memory", + }), + latestTrieRegCount: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "latest_trie_reg_count", + Help: "the number of allocated registers (latest created trie)", + }), + latestTrieRegCountDiff: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "latest_trie_reg_count_diff", + Help: "the difference between number of unique register allocated of the latest created trie and parent trie", + }), + latestTrieRegSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "latest_trie_reg_size", + Help: "the size of allocated registers (latest created trie)", + }), + latestTrieRegSizeDiff: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "latest_trie_reg_size_diff", + Help: "the difference between size of unique register allocated of the latest created trie and parent trie", + }), + latestTrieMaxDepthTouched: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "latest_trie_max_depth_touched", + Help: "the maximum depth touched of the latest created trie", + }), + updateCount: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "updates_counted", + Help: "the number of updates", + }), + proofSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "average_proof_size", + Help: "the average size of a single generated proof in bytes", + }), + updateValuesNumber: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "update_values_number", + Help: "the total number of values updated", + }), + updateValuesSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "update_values_size", + Help: "the total size of values for single update in bytes", + }), + updateDuration: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "update_duration", + Help: "the duration of update operation", + Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, + }), + updateDurationPerItem: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "update_duration_per_value", + Help: "the duration of update operation per value", + Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, + }), + readValuesNumber: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "read_values_number", + Help: "the total number of values read", + }), + readValuesSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "read_values_size", + Help: "the total size of values for single read in bytes", + }), + readDuration: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "read_duration", + Help: "the duration of read operation", + Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, + }), + readDurationPerItem: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystemMTrie, + Name: "read_duration_per_value", + Help: "the duration of read operation per value", + Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, + }), + checkpointSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: walSubsystem, + Name: "checkpoint_size_bytes", + Help: "the size of a checkpoint in bytes", + }), + } +} + +// Ensure LedgerCollector implements both interfaces +var _ module.LedgerMetrics = (*LedgerCollector)(nil) +var _ module.WALMetrics = (*LedgerCollector)(nil) + +// LedgerMetrics implementation + +func (lc *LedgerCollector) ForestApproxMemorySize(bytes uint64) { + lc.forestApproxMemorySize.Set(float64(bytes)) +} + +func (lc *LedgerCollector) ForestNumberOfTrees(number uint64) { + lc.forestNumberOfTrees.Set(float64(number)) +} + +func (lc *LedgerCollector) LatestTrieRegCount(number uint64) { + lc.latestTrieRegCount.Set(float64(number)) +} + +func (lc *LedgerCollector) LatestTrieRegCountDiff(number int64) { + lc.latestTrieRegCountDiff.Set(float64(number)) +} + +func (lc *LedgerCollector) LatestTrieRegSize(size uint64) { + lc.latestTrieRegSize.Set(float64(size)) +} + +func (lc *LedgerCollector) LatestTrieRegSizeDiff(size int64) { + lc.latestTrieRegSizeDiff.Set(float64(size)) +} + +func (lc *LedgerCollector) LatestTrieMaxDepthTouched(maxDepth uint16) { + lc.latestTrieMaxDepthTouched.Set(float64(maxDepth)) +} + +func (lc *LedgerCollector) UpdateCount() { + lc.updateCount.Inc() +} + +func (lc *LedgerCollector) ProofSize(bytes uint32) { + lc.proofSize.Set(float64(bytes)) +} + +func (lc *LedgerCollector) UpdateValuesNumber(number uint64) { + lc.updateValuesNumber.Add(float64(number)) +} + +func (lc *LedgerCollector) UpdateValuesSize(bytes uint64) { + lc.updateValuesSize.Set(float64(bytes)) +} + +func (lc *LedgerCollector) UpdateDuration(duration time.Duration) { + lc.updateDuration.Observe(duration.Seconds()) +} + +func (lc *LedgerCollector) UpdateDurationPerItem(duration time.Duration) { + lc.updateDurationPerItem.Observe(duration.Seconds()) +} + +func (lc *LedgerCollector) ReadValuesNumber(number uint64) { + lc.readValuesNumber.Add(float64(number)) +} + +func (lc *LedgerCollector) ReadValuesSize(bytes uint64) { + lc.readValuesSize.Set(float64(bytes)) +} + +func (lc *LedgerCollector) ReadDuration(duration time.Duration) { + lc.readDuration.Observe(duration.Seconds()) +} + +func (lc *LedgerCollector) ReadDurationPerItem(duration time.Duration) { + lc.readDurationPerItem.Observe(duration.Seconds()) +} + +// WALMetrics implementation + +func (lc *LedgerCollector) ExecutionCheckpointSize(bytes uint64) { + lc.checkpointSize.Set(float64(bytes)) +} diff --git a/utils/io/filelock.go b/utils/io/filelock.go new file mode 100644 index 00000000000..290b58c2915 --- /dev/null +++ b/utils/io/filelock.go @@ -0,0 +1,84 @@ +package io + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/gofrs/flock" +) + +// FileLock represents an exclusive file lock that prevents multiple processes +// from accessing the same resource. If another process tries to acquire the lock, +// it will fail and should crash. +type FileLock struct { + lockFile *flock.Flock +} + +// NewFileLock creates a new file lock at the specified path. +// The lock file will be created in the same directory as the path. +// If path is a directory, the lock file will be created inside it. +// If the directory doesn't exist yet, it assumes the path is intended to be a directory. +func NewFileLock(path string) (*FileLock, error) { + // Determine the lock file path + // Always create the lock file in the specified path (treating it as a directory) + // This ensures the lock is always in the WAL directory itself + lockPath := filepath.Join(path, ".lock") + + // Ensure the directory exists before trying to create the lock file + dir := filepath.Dir(lockPath) + if err := os.MkdirAll(dir, 0755); err != nil { + if os.IsPermission(err) { + return nil, fmt.Errorf("failed to create directory for lock file %s (permission denied): %w", lockPath, err) + } + return nil, fmt.Errorf("failed to create directory for lock file %s: %w", lockPath, err) + } + + return &FileLock{ + lockFile: flock.New(lockPath), + }, nil +} + +// Lock acquires an exclusive lock on the file. This will block until the lock +// can be acquired. If the lock cannot be acquired (e.g., another process holds it), +// it returns an error. The process should crash in this case. +func (fl *FileLock) Lock() error { + locked, err := fl.lockFile.TryLock() + if err != nil { + // Check if the error is due to permission denied + var pathErr *os.PathError + if errors.Is(err, os.ErrPermission) || (errors.As(err, &pathErr) && os.IsPermission(pathErr.Err)) { + return fmt.Errorf("failed to acquire file lock at %s (permission denied): %w", fl.lockFile.Path(), err) + } + return fmt.Errorf("failed to acquire file lock at %s: %w", fl.lockFile.Path(), err) + } + if !locked { + // Lock file exists and is held by another process + return fmt.Errorf("cannot acquire exclusive lock at %s: another process is already using this directory", fl.lockFile.Path()) + } + return nil +} + +// Unlock releases the file lock. +func (fl *FileLock) Unlock() error { + if err := fl.lockFile.Unlock(); err != nil { + return fmt.Errorf("failed to release file lock at %s: %w", fl.lockFile.Path(), err) + } + return nil +} + +// Close releases the file lock. Implements io.Closer. +func (fl *FileLock) Close() error { + return fl.lockFile.Close() +} + +// Path returns the path to the lock file. +func (fl *FileLock) Path() string { + return fl.lockFile.Path() +} + +// IsLocked returns true if this FileLock instance currently holds the lock. +func (fl *FileLock) IsLocked() bool { + return fl.lockFile.Locked() +} diff --git a/utils/io/filelock_test.go b/utils/io/filelock_test.go new file mode 100644 index 00000000000..0312be47d6d --- /dev/null +++ b/utils/io/filelock_test.go @@ -0,0 +1,499 @@ +package io + +import ( + "os" + "path/filepath" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +// RemoveLockFile removes the lock file if it exists. +// This is only used in tests. +func RemoveLockFile(lockDir string) error { + lockPath := filepath.Join(lockDir, ".lock") + if !FileExists(lockPath) { + return nil + } + return os.Remove(lockPath) +} + +func TestFileLock(t *testing.T) { + t.Run("basic lock and unlock", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock, err := NewFileLock(dir) + require.NoError(t, err) + require.NotNil(t, lock) + + // Verify lock path + expectedPath := filepath.Join(dir, ".lock") + require.Equal(t, expectedPath, lock.Path()) + + // Acquire lock + err = lock.Lock() + require.NoError(t, err) + + // Release lock + err = lock.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("lock prevents concurrent access", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock1, err := NewFileLock(dir) + require.NoError(t, err) + lock2, err := NewFileLock(dir) + require.NoError(t, err) + + // First lock should succeed + err = lock1.Lock() + require.NoError(t, err) + + // Second lock should fail + err = lock2.Lock() + require.Error(t, err) + require.Contains(t, err.Error(), "another process is already using this directory") + + // Release first lock + err = lock1.Unlock() + require.NoError(t, err) + + // Now second lock should succeed + err = lock2.Lock() + require.NoError(t, err) + + err = lock2.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("lock can be re-acquired after unlock", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock, err := NewFileLock(dir) + require.NoError(t, err) + + // Acquire and release multiple times + for i := 0; i < 3; i++ { + err := lock.Lock() + require.NoError(t, err, "iteration %d", i) + + err = lock.Unlock() + require.NoError(t, err, "iteration %d", i) + } + }) + }) + + t.Run("concurrent goroutines competing for lock", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + const numGoroutines = 10 + var wg sync.WaitGroup + successCount := 0 + failureCount := 0 + var mu sync.Mutex + var lockHeld sync.WaitGroup + + // First, acquire the lock to hold it + mainLock, err := NewFileLock(dir) + require.NoError(t, err) + err = mainLock.Lock() + require.NoError(t, err) + + // Start multiple goroutines trying to acquire the same lock + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + lockHeld.Add(1) + go func() { + defer wg.Done() + lock, err := NewFileLock(dir) + if err != nil { + mu.Lock() + failureCount++ + mu.Unlock() + lockHeld.Done() + return + } + err = lock.Lock() + mu.Lock() + if err != nil { + failureCount++ + } else { + successCount++ + } + mu.Unlock() + lockHeld.Done() + if err == nil { + _ = lock.Unlock() + } + }() + } + + // Wait a bit to ensure all goroutines have tried to acquire the lock + lockHeld.Wait() + + // Release the main lock + err = mainLock.Unlock() + require.NoError(t, err) + + // Wait for all goroutines to finish + wg.Wait() + + // All should have failed since the main lock was held + require.Equal(t, 0, successCount, "no goroutine should acquire the lock while main lock is held") + require.Equal(t, numGoroutines, failureCount, "all goroutines should fail to acquire the lock") + }) + }) + + t.Run("lock file is created in correct location", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock, err := NewFileLock(dir) + require.NoError(t, err) + lockPath := lock.Path() + + // Lock file should not exist before locking + require.False(t, FileExists(lockPath)) + + // Acquire lock + err = lock.Lock() + require.NoError(t, err) + + // Lock file should exist after locking + require.True(t, FileExists(lockPath)) + + // Verify it's in the expected location + expectedPath := filepath.Join(dir, ".lock") + require.Equal(t, expectedPath, lockPath) + + err = lock.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("multiple locks on different directories", func(t *testing.T) { + unittest.RunWithTempDirs(t, func(dir1, dir2 string) { + lock1, err := NewFileLock(dir1) + require.NoError(t, err) + lock2, err := NewFileLock(dir2) + require.NoError(t, err) + + // Both locks should succeed since they're on different directories + err = lock1.Lock() + require.NoError(t, err) + + err = lock2.Lock() + require.NoError(t, err) + + // Both should be able to unlock + err = lock1.Unlock() + require.NoError(t, err) + + err = lock2.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("lock works with non-existent directory", func(t *testing.T) { + unittest.RunWithTempDir(t, func(baseDir string) { + nonExistentDir := filepath.Join(baseDir, "non-existent", "subdir") + lock, err := NewFileLock(nonExistentDir) + require.NoError(t, err) + + // Lock should still work (the directory was created in NewFileLock) + err = lock.Lock() + require.NoError(t, err) + + // Verify lock file path is correct + expectedPath := filepath.Join(nonExistentDir, ".lock") + require.Equal(t, expectedPath, lock.Path()) + + err = lock.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("unlock without lock is safe", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock, err := NewFileLock(dir) + require.NoError(t, err) + + // Unlocking without locking should not panic + // (though it may return an error) + err = lock.Unlock() + // The error is acceptable - we just want to ensure it doesn't panic + _ = err + }) + }) + + t.Run("double unlock is safe", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock, err := NewFileLock(dir) + require.NoError(t, err) + + err = lock.Lock() + require.NoError(t, err) + + err = lock.Unlock() + require.NoError(t, err) + + // Unlocking again should be safe (may return error but shouldn't panic) + err = lock.Unlock() + _ = err // Error is acceptable + }) + }) + + t.Run("lock is released when process terminates", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + // Acquire lock in first "process" (goroutine) + lock1, err := NewFileLock(dir) + require.NoError(t, err) + err = lock1.Lock() + require.NoError(t, err) + + // Simulate process termination by unlocking + err = lock1.Unlock() + require.NoError(t, err) + + // Now a new "process" should be able to acquire the lock + lock2, err := NewFileLock(dir) + require.NoError(t, err) + err = lock2.Lock() + require.NoError(t, err) + + err = lock2.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("lock file persists after unlock", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock, err := NewFileLock(dir) + require.NoError(t, err) + lockPath := lock.Path() + + err = lock.Lock() + require.NoError(t, err) + + // Lock file should exist + require.True(t, FileExists(lockPath)) + + err = lock.Unlock() + require.NoError(t, err) + + // Lock file may or may not exist after unlock (implementation detail) + // But the important thing is that we can acquire a new lock + lock2, err := NewFileLock(dir) + require.NoError(t, err) + err = lock2.Lock() + require.NoError(t, err) + + err = lock2.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("error message contains lock path", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock1, err := NewFileLock(dir) + require.NoError(t, err) + lock2, err := NewFileLock(dir) + require.NoError(t, err) + + err = lock1.Lock() + require.NoError(t, err) + + err = lock2.Lock() + require.Error(t, err) + require.Contains(t, err.Error(), lock2.Path()) + require.Contains(t, err.Error(), "another process is already using this directory") + + err = lock1.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("lock works with absolute paths", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + absDir, err := filepath.Abs(dir) + require.NoError(t, err) + + lock, err := NewFileLock(absDir) + require.NoError(t, err) + err = lock.Lock() + require.NoError(t, err) + + // Verify lock path is also absolute + lockPath := lock.Path() + require.True(t, filepath.IsAbs(lockPath)) + + err = lock.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("lock works with relative paths", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + // Change to the temp directory + originalDir, err := os.Getwd() + require.NoError(t, err) + defer func() { + _ = os.Chdir(originalDir) + }() + + err = os.Chdir(dir) + require.NoError(t, err) + + // Use relative path + lock, err := NewFileLock(".") + require.NoError(t, err) + err = lock.Lock() + require.NoError(t, err) + + err = lock.Unlock() + require.NoError(t, err) + }) + }) + + t.Run("IsLocked detects lock state", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock1, err := NewFileLock(dir) + require.NoError(t, err) + lock2, err := NewFileLock(dir) + require.NoError(t, err) + + // Initially, lock should not be locked + require.False(t, lock1.IsLocked(), "lock should not be locked initially") + + // Acquire lock + err = lock1.Lock() + require.NoError(t, err) + + // Now lock1 should know it's locked + require.True(t, lock1.IsLocked(), "lock1 should know it holds the lock") + + // Release lock + err = lock1.Unlock() + require.NoError(t, err) + + // Now lock should not be locked + require.False(t, lock1.IsLocked(), "lock should not be locked after unlock") + _ = lock2 // lock2 unused after IsLocked behavior changed + }) + }) + + t.Run("RemoveLockFile removes lock file", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lockPath := filepath.Join(dir, ".lock") + + // Lock file shouldn't exist initially + require.False(t, FileExists(lockPath)) + + // Remove non-existent lock file should succeed + err := RemoveLockFile(dir) + require.NoError(t, err) + + // Acquire lock + lock, err := NewFileLock(dir) + require.NoError(t, err) + err = lock.Lock() + require.NoError(t, err) + + // Lock file should exist + require.True(t, FileExists(lockPath)) + + // Can't remove lock file while lock is held + // (this is expected - the file is locked) + // But we can unlock first + err = lock.Unlock() + require.NoError(t, err) + + // Now we can remove the lock file + err = RemoveLockFile(dir) + require.NoError(t, err) + require.False(t, FileExists(lockPath), "lock file should be removed") + }) + }) + + t.Run("NewFileLock error for permission denied on directory creation", func(t *testing.T) { + // This test may not work on all systems, especially Windows + // Skip if we can't create a read-only parent directory + unittest.RunWithTempDir(t, func(baseDir string) { + // Create a subdirectory that we'll make read-only + restrictedDir := filepath.Join(baseDir, "restricted") + err := os.MkdirAll(restrictedDir, 0755) + require.NoError(t, err) + + // Create a subdirectory inside that we'll try to lock + // But first make the parent read-only so we can't create subdirectories + lockTargetDir := filepath.Join(restrictedDir, "wal") + + // Make the restricted directory read-only (remove write permission) + err = os.Chmod(restrictedDir, 0555) + require.NoError(t, err) + defer func() { + // Restore permissions for cleanup + _ = os.Chmod(restrictedDir, 0755) + }() + + _, err = NewFileLock(lockTargetDir) + require.Error(t, err) + + // Verify the error message contains permission denied + require.Contains(t, err.Error(), "permission denied") + require.Contains(t, err.Error(), lockTargetDir) + }) + }) + + t.Run("lock error for permission denied on lock file creation", func(t *testing.T) { + // This test may not work on all systems, especially Windows + unittest.RunWithTempDir(t, func(baseDir string) { + // Create the WAL directory + walDir := filepath.Join(baseDir, "wal") + err := os.MkdirAll(walDir, 0755) + require.NoError(t, err) + + // Create the lock first while we have write permission + lock, err := NewFileLock(walDir) + require.NoError(t, err) + + // Make the directory read-only so we can't create the lock file + err = os.Chmod(walDir, 0555) + require.NoError(t, err) + defer func() { + // Restore permissions for cleanup + _ = os.Chmod(walDir, 0755) + }() + + err = lock.Lock() + require.Error(t, err) + + // Verify the error message contains permission denied + require.Contains(t, err.Error(), "permission denied") + require.Contains(t, err.Error(), walDir) + }) + }) + + t.Run("lock error message distinguishes permission denied from lock conflict", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + lock1, err := NewFileLock(dir) + require.NoError(t, err) + lock2, err := NewFileLock(dir) + require.NoError(t, err) + + // Acquire first lock + err = lock1.Lock() + require.NoError(t, err) + + // Try to acquire second lock - should get lock conflict, not permission denied + err = lock2.Lock() + require.Error(t, err) + require.Contains(t, err.Error(), "another process is already using this directory") + require.NotContains(t, err.Error(), "permission denied") + + err = lock1.Unlock() + require.NoError(t, err) + }) + }) +}