diff --git a/Makefile b/Makefile index 223274249..125f3bcdf 100644 --- a/Makefile +++ b/Makefile @@ -76,6 +76,24 @@ ifeq ($(BUILD_TYPE),debug) endif GO_TEST_PACKAGES ?= ./... +GO_TEST_FLAGS ?= + +VERBOSE ?= +ifeq ($(VERBOSE),true) + GO_BUILD_PARAMS += -v + GO_TEST_FLAGS += -v +endif + +TEST_PATTERN ?= +ifneq ($(TEST_PATTERN),) + GO_TEST_FLAGS += -run $(TEST_PATTERN) +endif + +TEST_PACKAGES ?= +ifneq ($(TEST_PACKAGES),) + GO_TEST_PACKAGES := $(addprefix ./, $(addsuffix /..., $(subst :, ,$(TEST_PACKAGES)))) +endif + ROLLUPS_CONTRACTS_ABI_BASEDIR:= rollups-contracts/ ROLLUPS_PRT_CONTRACTS_ABI_BASEDIR:= rollups-prt-contracts/ @@ -206,7 +224,7 @@ test: unit-test ## Execute all tests unit-test: ## Execute go unit tests @echo "Running go unit tests" @go clean -testcache - @go test -p 1 $(GO_BUILD_PARAMS) $(GO_TEST_PACKAGES) + @go test -p 1 $(GO_BUILD_PARAMS) $(GO_TEST_FLAGS) $(GO_TEST_PACKAGES) integration-test: ## Execute e2e tests @echo "Running end-to-end tests" @@ -245,6 +263,10 @@ deploy-exception-dapp: applications/exception-dapp ## Deploy exception-dapp test @echo "Deploying exception-dapp test application" @./cartesi-rollups-cli deploy application exception-dapp applications/exception-dapp/ +deploy-prt-echo-dapp: applications/echo-dapp ## Deploy echo-dapp test application + @echo "Deploying echo-dapp test application" + @./cartesi-rollups-cli deploy application prt-echo-dapp applications/echo-dapp/ --prt + # Temporary test dependencies target while we are not using distribution packages DOWNLOADS_DIR = test/downloads CARTESI_TEST_MACHINE_IMAGES = $(DOWNLOADS_DIR)/linux.bin diff --git a/cmd/cartesi-rollups-cli/root/app/register/register.go b/cmd/cartesi-rollups-cli/root/app/register/register.go index c9c88d4d0..281e6e3f1 100644 --- a/cmd/cartesi-rollups-cli/root/app/register/register.go +++ b/cmd/cartesi-rollups-cli/root/app/register/register.go @@ -52,6 +52,7 @@ var ( inputBoxAddressFromEnv bool dataAvailability string enableMachineHashCheck bool + applicationTypePRT bool disabled bool printAsJSON bool executionParametersFileParam string @@ -75,7 +76,7 @@ func init() { "Application template hash. (DO NOT USE IN PRODUCTION)\nThis value is retrieved from the application contract", ) - Cmd.Flags().Uint64VarP(&epochLength, "epoch-length", "e", 10, // nolint: mnd + Cmd.Flags().Uint64VarP(&epochLength, "epoch-length", "e", 0, // nolint: mnd "Consensus Epoch length. (DO NOT USE IN PRODUCTION)\nThis value is retrieved from the consensus contract", ) @@ -97,6 +98,8 @@ func init() { "Enable or disable machine hash check (DO NOT DISABLE IN PRODUCTION)") cobra.CheckErr(viper.BindPFlag(config.FEATURE_MACHINE_HASH_CHECK_ENABLED, Cmd.Flags().Lookup("machine-hash-check"))) + Cmd.Flags().BoolVarP(&applicationTypePRT, "prt", "", false, "Register as PRT application.") + origHelpFunc := Cmd.HelpFunc() Cmd.SetHelpFunc(func(command *cobra.Command, strings []string) { command.Flags().Lookup("verbose").Hidden = false @@ -165,7 +168,7 @@ func run(cmd *cobra.Command, args []string) { } } - if !cmd.Flags().Changed("epoch-length") { + if !cmd.Flags().Changed("epoch-length") && !applicationTypePRT { epochLength, err = getEpochLength(ctx, consensus) if err != nil { fmt.Fprintf(os.Stderr, "Failed to get epoch length from consensus: %v\n", err) @@ -201,19 +204,27 @@ func run(cmd *cobra.Command, args []string) { os.Exit(1) } + consensusType := model.Consensus_Authority + if applicationTypePRT { + consensusType = model.Consensus_PRT + } + application := model.Application{ - Name: validName, - IApplicationAddress: address, - IConsensusAddress: consensus, - IInputBoxAddress: *inputBoxAddress, - TemplateURI: templatePath, - TemplateHash: parsedTemplateHash, - EpochLength: epochLength, - DataAvailability: encodedDA, - State: applicationState, - IInputBoxBlock: inputBoxBlockNumber, - LastInputCheckBlock: 0, - LastOutputCheckBlock: 0, + Name: validName, + IApplicationAddress: address, + IConsensusAddress: consensus, + IInputBoxAddress: *inputBoxAddress, + TemplateURI: templatePath, + TemplateHash: parsedTemplateHash, + EpochLength: epochLength, + DataAvailability: encodedDA, + ConsensusType: consensusType, + State: applicationState, + IInputBoxBlock: inputBoxBlockNumber, + LastEpochCheckBlock: 0, + LastInputCheckBlock: 0, + LastOutputCheckBlock: 0, + LastTournamentCheckBlock: 0, } // load execution parameters from a file? diff --git a/cmd/cartesi-rollups-cli/root/deploy/application.go b/cmd/cartesi-rollups-cli/root/deploy/application.go index 9493ef3ed..d965b3282 100644 --- a/cmd/cartesi-rollups-cli/root/deploy/application.go +++ b/cmd/cartesi-rollups-cli/root/deploy/application.go @@ -174,6 +174,7 @@ func runDeployApplication(cmd *cobra.Command, args []string) { application.Name = applicationName application.TemplateURI = templateURI application.State = model.ApplicationState_Disabled + application.ConsensusType = model.Consensus_Authority if applicationEnableParam { application.State = model.ApplicationState_Enabled } @@ -256,6 +257,7 @@ func runDeployApplication(cmd *cobra.Command, args []string) { application.EpochLength = res.Deployment.EpochLength application.DataAvailability = res.DataAvailability application.IInputBoxBlock = res.IInputBoxBlock + application.ConsensusType = model.Consensus_PRT default: panic("unimplemented deployment type\n") } diff --git a/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go b/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go index ad64c5e70..0688c9fe9 100644 --- a/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go +++ b/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go @@ -94,7 +94,7 @@ func run(cmd *cobra.Command, args []string) { if err := status.Scan(statusFilter); err != nil { cobra.CheckErr(fmt.Errorf("invalid status filter: %w", err)) } - filter.Status = &status + filter.Status = []model.EpochStatus{status} } // Limit is validated in PreRunE diff --git a/cmd/cartesi-rollups-node/root/root.go b/cmd/cartesi-rollups-node/root/root.go index d69d502ae..f76e3b12b 100644 --- a/cmd/cartesi-rollups-node/root/root.go +++ b/cmd/cartesi-rollups-node/root/root.go @@ -32,6 +32,7 @@ var ( advancerPollInterval string validatorPollInterval string claimerPollInterval string + prtPollInterval string maxStartupTime string enableInputReader bool enableInspect bool @@ -93,6 +94,9 @@ func init() { Cmd.Flags().StringVar(&claimerPollInterval, "claimer-poll-interval", "3", "Claimer poll interval") cobra.CheckErr(viper.BindPFlag(config.CLAIMER_POLLING_INTERVAL, Cmd.Flags().Lookup("claimer-poll-interval"))) + Cmd.Flags().StringVar(&prtPollInterval, "prt-poll-interval", "3", "Claimer poll interval") + cobra.CheckErr(viper.BindPFlag(config.PRT_POLLING_INTERVAL, Cmd.Flags().Lookup("prt-poll-interval"))) + Cmd.Flags().StringVar(&maxStartupTime, "max-startup-time", "15", "Maximum startup time in seconds") cobra.CheckErr(viper.BindPFlag(config.MAX_STARTUP_TIME, Cmd.Flags().Lookup("max-startup-time"))) @@ -184,6 +188,10 @@ func run(cmd *cobra.Command, args []string) { createInfo.ClaimerClient, err = createEthClient(ctx, cfg.BlockchainHttpEndpoint.String(), logger) cobra.CheckErr(err) + logger = service.NewLogger(cfg.LogLevel, cfg.LogColor).With("service", "prt") + createInfo.PrtClient, err = createEthClient(ctx, cfg.BlockchainHttpEndpoint.String(), logger) + cobra.CheckErr(err) + createInfo.Repository, err = factory.NewRepositoryFromConnectionString(ctx, cfg.DatabaseConnection.String()) cobra.CheckErr(err) defer createInfo.Repository.Close() diff --git a/cmd/cartesi-rollups-prt/root/root.go b/cmd/cartesi-rollups-prt/root/root.go index b8ac78d4a..c8309fb26 100644 --- a/cmd/cartesi-rollups-prt/root/root.go +++ b/cmd/cartesi-rollups-prt/root/root.go @@ -5,12 +5,16 @@ package root import ( "context" + "log/slog" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/prt" "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/internal/version" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/hashicorp/go-retryablehttp" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -25,7 +29,7 @@ var ( pollInterval string maxStartupTime string telemetryAddress string - cfg *config.ValidatorConfig + cfg *config.PrtConfig ) var Cmd = &cobra.Command{ @@ -50,8 +54,8 @@ func init() { "Database connection string in the URL format\n(eg.: 'postgres://user:password@hostname:port/database') ") cobra.CheckErr(viper.BindPFlag(config.DATABASE_CONNECTION, Cmd.Flags().Lookup("database-connection"))) - Cmd.Flags().StringVar(&pollInterval, "poll-interval", "7", "Poll interval") - cobra.CheckErr(viper.BindPFlag(config.VALIDATOR_POLLING_INTERVAL, Cmd.Flags().Lookup("poll-interval"))) + Cmd.Flags().StringVar(&pollInterval, "poll-interval", "3", "Poll interval") + cobra.CheckErr(viper.BindPFlag(config.PRT_POLLING_INTERVAL, Cmd.Flags().Lookup("poll-interval"))) Cmd.Flags().StringVar(&maxStartupTime, "max-startup-time", "15", "Maximum startup time in seconds") cobra.CheckErr(viper.BindPFlag(config.MAX_STARTUP_TIME, Cmd.Flags().Lookup("max-startup-time"))) @@ -59,7 +63,7 @@ func init() { // TODO: validate on preRunE Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { var err error - cfg, err = config.LoadValidatorConfig() + cfg, err = config.LoadPrtConfig() if err != nil { return err } @@ -67,6 +71,31 @@ func init() { } } +func createEthClient(ctx context.Context, endpoint string, logger *slog.Logger) (*ethclient.Client, error) { + rclient := retryablehttp.NewClient() + rclient.Logger = logger + rclient.RetryMax = int(cfg.BlockchainHttpMaxRetries) + rclient.RetryWaitMin = cfg.BlockchainHttpRetryMinWait + rclient.RetryWaitMax = cfg.BlockchainHttpRetryMaxWait + + clientOptions := []rpc.ClientOption{ + rpc.WithHTTPClient(rclient.StandardClient()), + } + + authOpt, err := config.HTTPAuthorizationOption() + cobra.CheckErr(err) + if authOpt != nil { + clientOptions = append(clientOptions, authOpt) + } + + rpcClient, err := rpc.DialOptions(ctx, endpoint, clientOptions...) + if err != nil { + return nil, err + } + + return ethclient.NewClient(rpcClient), nil +} + func run(cmd *cobra.Command, args []string) { ctx, cancel := context.WithTimeout(context.Background(), cfg.MaxStartupTime) defer cancel() @@ -79,11 +108,16 @@ func run(cmd *cobra.Command, args []string) { EnableSignalHandling: true, TelemetryCreate: true, TelemetryAddress: cfg.TelemetryAddress, - PollInterval: cfg.ValidatorPollingInterval, + PollInterval: cfg.PrtPollingInterval, }, Config: *cfg, } + var err error + logger := service.NewLogger(cfg.LogLevel, cfg.LogColor).With("service", serviceName) + createInfo.EthClient, err = createEthClient(ctx, cfg.BlockchainHttpEndpoint.String(), logger) + cobra.CheckErr(err) + createInfo.Repository, err = factory.NewRepositoryFromConnectionString(ctx, cfg.DatabaseConnection.String()) cobra.CheckErr(err) defer createInfo.Repository.Close() diff --git a/internal/advancer/advancer.go b/internal/advancer/advancer.go index f55385214..22ef67c08 100644 --- a/internal/advancer/advancer.go +++ b/internal/advancer/advancer.go @@ -7,17 +7,13 @@ import ( "context" "errors" "fmt" - "net/http" "os" "path" "strings" - "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/inspect" "github.com/cartesi/rollups-node/internal/manager" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" - "github.com/cartesi/rollups-node/pkg/service" ) var ( @@ -30,10 +26,13 @@ var ( // AdvancerRepository defines the repository interface needed by the Advancer service type AdvancerRepository interface { + ListEpochs(ctx context.Context, nameOrAddress string, f repository.EpochFilter, p repository.Pagination, descending bool) ([]*Epoch, uint64, error) ListInputs(ctx context.Context, nameOrAddress string, f repository.InputFilter, p repository.Pagination, descending bool) ([]*Input, uint64, error) GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) StoreAdvanceResult(ctx context.Context, appID int64, ar *AdvanceResult) error - UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) (int64, error) + UpdateEpochInputsProcessed(ctx context.Context, nameOrAddress string, epochIndex uint64) error + UpdateEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64, proof *OutputsProof) error + RepeatPreviousEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64) error UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error @@ -41,95 +40,14 @@ type AdvancerRepository interface { GetLastProcessedInput(ctx context.Context, appAddress string) (*Input, error) } -// Service is the main advancer service that processes inputs through Cartesi machines -type Service struct { - service.Service - snapshotsDir string - repository AdvancerRepository - machineManager manager.MachineProvider - inspector *inspect.Inspector - HTTPServer *http.Server - HTTPServerFunc func() error -} - -// CreateInfo contains the configuration for creating an advancer service -type CreateInfo struct { - service.CreateInfo - Config config.AdvancerConfig - Repository repository.Repository -} - -// Create initializes a new advancer service -func Create(ctx context.Context, c *CreateInfo) (*Service, error) { - var err error - if err = ctx.Err(); err != nil { - return nil, err // This returns context.Canceled or context.DeadlineExceeded. - } - - s := &Service{} - c.Impl = s - - err = service.Create(ctx, &c.CreateInfo, &s.Service) - if err != nil { - return nil, err - } - - s.repository = c.Repository - if s.repository == nil { - return nil, fmt.Errorf("repository on advancer service Create is nil") - } - - // Create the machine manager - manager := manager.NewMachineManager( - ctx, - c.Repository, - s.Logger, - c.Config.FeatureMachineHashCheckEnabled, - ) - s.machineManager = manager - - // Initialize the inspect service if enabled - if c.Config.FeatureInspectEnabled { - s.inspector, s.HTTPServer, s.HTTPServerFunc = inspect.NewInspector( - c.Repository, - manager, - c.Config.InspectAddress, - c.LogLevel, - c.LogColor, - ) - } - - s.snapshotsDir = c.Config.SnapshotsDir - - return s, nil -} - -// Service interface implementation -func (s *Service) Alive() bool { return true } -func (s *Service) Ready() bool { return true } -func (s *Service) Reload() []error { return nil } -func (s *Service) Tick() []error { - if err := s.Step(s.Context); err != nil { - return []error{err} - } - return []error{} -} -func (s *Service) Stop(b bool) []error { - return nil -} -func (s *Service) Serve() error { - if s.inspector != nil && s.HTTPServerFunc != nil { - go s.HTTPServerFunc() - } - return s.Service.Serve() -} -func (s *Service) String() string { - return s.Name +func getUnprocessedEpochs(ctx context.Context, er AdvancerRepository, address string) ([]*Epoch, uint64, error) { + f := repository.EpochFilter{Status: []EpochStatus{EpochStatus_Open, EpochStatus_Closed}} + return er.ListEpochs(ctx, address, f, repository.Pagination{}, false) } // getUnprocessedInputs retrieves inputs that haven't been processed yet -func getUnprocessedInputs(ctx context.Context, repo AdvancerRepository, appAddress string) ([]*Input, uint64, error) { - f := repository.InputFilter{Status: Pointer(InputCompletionStatus_None)} +func getUnprocessedInputs(ctx context.Context, repo AdvancerRepository, appAddress string, epochIndex uint64) ([]*Input, uint64, error) { + f := repository.InputFilter{Status: Pointer(InputCompletionStatus_None), EpochIndex: &epochIndex} return repo.ListInputs(ctx, appAddress, f, repository.Pagination{}, false) } @@ -154,38 +72,66 @@ func (s *Service) Step(ctx context.Context) error { for _, app := range apps { appAddress := app.IApplicationAddress.String() - err := s.handleEpochSnapshotAfterInputProcessed(ctx, app) + epochs, _, err := getUnprocessedEpochs(ctx, s.repository, appAddress) if err != nil { return err } - // Get unprocessed inputs for this application - s.Logger.Debug("Querying for unprocessed inputs", "application", app.Name) - inputs, _, err := getUnprocessedInputs(ctx, s.repository, appAddress) - if err != nil { - return err - } + for _, epoch := range epochs { + // Get unprocessed inputs for this application + s.Logger.Debug("Querying for unprocessed inputs", "application", app.Name, "epoch_index", epoch.Index) + inputs, _, err := getUnprocessedInputs(ctx, s.repository, appAddress, epoch.Index) + if err != nil { + return err + } - // Process the inputs - s.Logger.Debug("Processing inputs", "application", app.Name, "count", len(inputs)) - err = s.processInputs(ctx, app, inputs) - if err != nil { - return err - } + // Process the inputs + s.Logger.Debug("Processing inputs", "application", app.Name, "epoch_index", epoch.Index, "count", len(inputs)) + err = s.processInputs(ctx, app, inputs) + if err != nil { + return err + } - // Update epochs to mark inputs as processed - rows, err := s.repository.UpdateEpochsInputsProcessed(ctx, appAddress) - if err != nil { - return err - } - if rows > 0 { - s.Logger.Info("Epochs updated to Inputs Processed", "application", app.Name, "count", rows) + if epoch.Status == EpochStatus_Closed { + if allProcessed, perr := s.isAllEpochInputsProcessed(app, epoch); perr == nil && allProcessed { + err := s.handleEpochAfterInputsProcessed(ctx, app, epoch) + if err != nil { + return err + } + + // Update epochs to mark inputs as processed + err = s.repository.UpdateEpochInputsProcessed(ctx, appAddress, epoch.Index) + if err != nil { + return err + } + s.Logger.Info("Epoch updated to Inputs Processed", "application", app.Name, "epoch_index", epoch.Index) + } else if perr != nil { + return perr + } else { + break // some inputs were not processed yet, check next time + } + } } } return nil } +func (s *Service) isAllEpochInputsProcessed(app *Application, epoch *Epoch) (bool, error) { + // epoch has no inputs + if epoch.InputIndexLowerBound == epoch.InputIndexUpperBound { + return true, nil + } + machine, exists := s.machineManager.GetMachine(app.ID) + if !exists { + return false, fmt.Errorf("%w: %d", ErrNoApp, app.ID) + } + if machine.ProcessedInputs() == epoch.InputIndexUpperBound { + return true, nil + } + return false, nil +} + // processInputs handles the processing of inputs for an application func (s *Service) processInputs(ctx context.Context, app *Application, inputs []*Input) error { // Skip if there are no inputs to process @@ -212,7 +158,7 @@ func (s *Service) processInputs(ctx context.Context, app *Application, inputs [] "index", input.Index) // Advance the machine with this input - result, err := machine.Advance(ctx, input.RawData, input.Index) + result, err := machine.Advance(ctx, input.RawData, input.EpochIndex, input.Index, app.IsDaveConsensus()) if err != nil { // If there's an error, mark the application as inoperable s.Logger.Error("Error executing advance", @@ -235,14 +181,16 @@ func (s *Service) processInputs(ctx context.Context, app *Application, inputs [] return err } - + // log advance result hashes s.Logger.Info("Processing input finished", "application", app.Name, - "epoch", input.EpochIndex, - "index", input.Index, + "epoch", result.EpochIndex, + "index", result.InputIndex, "status", result.Status, "outputs", len(result.Outputs), "reports", len(result.Reports), + "hashes", len(result.Hashes), + "remaining_cycles", result.RemainingMetaCycles, ) // Store the result in the database @@ -271,31 +219,84 @@ func (s *Service) processInputs(ctx context.Context, app *Application, inputs [] return nil } -// handleEpochSnapshotAfterInputProcessed handles the snapshot creation after when an epoch is closed after an input was processed -func (s *Service) handleEpochSnapshotAfterInputProcessed(ctx context.Context, app *Application) error { - // Check if the application has a epoch snapshot policy - if app.ExecutionParameters.SnapshotPolicy != SnapshotPolicy_EveryEpoch { - return nil +func (s *Service) isEpochLastInput(ctx context.Context, app *Application, input *Input) (bool, error) { + if app == nil || input == nil { + return false, fmt.Errorf("application and input must not be nil") + } + // Get the epoch for this input + epoch, err := s.repository.GetEpoch(ctx, app.IApplicationAddress.String(), input.EpochIndex) + if err != nil { + return false, fmt.Errorf("failed to get epoch: %w", err) } - // Get the machine instance for this application - machine, exists := s.machineManager.GetMachine(app.ID) - if !exists { - return fmt.Errorf("%w: %d", ErrNoApp, app.ID) + // Skip if the epoch is still open + if epoch.Status == EpochStatus_Open { + return false, nil } - // Check if this is the last processed input - lastProcessedInput, err := s.repository.GetLastProcessedInput(ctx, app.IApplicationAddress.String()) + // Check if this is the last input of the epoch + lastInput, err := s.repository.GetLastInput(ctx, app.IApplicationAddress.String(), input.EpochIndex) if err != nil { - return fmt.Errorf("failed to get last input: %w", err) + return false, fmt.Errorf("failed to get last input: %w", err) } - if lastProcessedInput == nil { + // If this is the last input and the epoch is closed, return true + if lastInput != nil && lastInput.Index == input.Index { + return true, nil + } + + return false, nil +} + +// handleEpochAfterInputsProcessed handles the snapshot creation after when an epoch is closed after an input was processed +func (s *Service) handleEpochAfterInputsProcessed(ctx context.Context, app *Application, epoch *Epoch) error { + // if epoch has inputs, all data is updated after advance, just check for snapshot + if epoch.InputIndexLowerBound != epoch.InputIndexUpperBound { + // Get the machine instance for this application + machine, exists := s.machineManager.GetMachine(app.ID) + if !exists { + return fmt.Errorf("%w: %d", ErrNoApp, app.ID) + } + + // Check if this is the last processed input + lastProcessedInput, err := s.repository.GetLastProcessedInput(ctx, app.IApplicationAddress.String()) + if err != nil { + return fmt.Errorf("failed to get last input: %w", err) + } + + // Check if the application has a epoch snapshot policy + if lastProcessedInput != nil && app.ExecutionParameters.SnapshotPolicy == SnapshotPolicy_EveryEpoch { + // Handle the snapshot + return s.handleSnapshot(ctx, app, machine, lastProcessedInput) + } + return nil } - // Handle the snapshot - return s.handleSnapshot(ctx, app, machine, lastProcessedInput) + // if epoch has no inputs, we need to copy previous epoch Outputs Proof + // first epoch we need to get it from the template + if epoch.Index == 0 { + // Get the machine instance for this application + machine, exists := s.machineManager.GetMachine(app.ID) + if !exists { + return fmt.Errorf("%w: %d", ErrNoApp, app.ID) + } + outputsProof, err := machine.OutputsProof(ctx, 0) + if err != nil { + return fmt.Errorf("failed to get outputs proof from machine: %w", err) + } + err = s.repository.UpdateEpochOutputsProof(ctx, app.ID, epoch.Index, outputsProof) + if err != nil { + return fmt.Errorf("failed to store outputs proof for epoch 0: %w", err) + } + } else { + err := s.repository.RepeatPreviousEpochOutputsProof(ctx, app.ID, epoch.Index) + if err != nil { + return fmt.Errorf("failed to repeat previous epoch outputs proof: %w", err) + } + } + + return nil } // handleSnapshot creates a snapshot based on the application's snapshot policy @@ -314,25 +315,12 @@ func (s *Service) handleSnapshot(ctx context.Context, app *Application, machine // For EVERY_EPOCH policy, check if this is the last input of the epoch if policy == SnapshotPolicy_EveryEpoch { - // Get the epoch for this input - epoch, err := s.repository.GetEpoch(ctx, app.IApplicationAddress.String(), input.EpochIndex) - if err != nil { - return fmt.Errorf("failed to get epoch: %w", err) - } - - // Skip if the epoch is still open - if epoch.Status == EpochStatus_Open { - return nil - } - - // Check if this is the last input of the epoch - lastInput, err := s.repository.GetLastInput(ctx, app.IApplicationAddress.String(), input.EpochIndex) + // If this is the last input and the epoch is closed, create a snapshot + isLastInput, err := s.isEpochLastInput(ctx, app, input) if err != nil { - return fmt.Errorf("failed to get last input: %w", err) + return err } - - // If this is the last input and the epoch is closed, create a snapshot - if lastInput != nil && lastInput.Index == input.Index { + if isLastInput { return s.createSnapshot(ctx, app, machine, input) } } @@ -364,22 +352,13 @@ func (s *Service) createSnapshot(ctx context.Context, app *Application, machine // Ensure the parent directory exists if _, err := os.Stat(s.snapshotsDir); os.IsNotExist(err) { - if err := os.MkdirAll(s.snapshotsDir, 0755); err != nil { // nolint: mnd + if err := os.MkdirAll(s.snapshotsDir, 0755); err != nil { //nolint: mnd return fmt.Errorf("failed to create snapshots directory: %w", err) } } - // Remove previous snapshot if it exists - previousSnapshot, err := s.repository.GetLastSnapshot(ctx, app.IApplicationAddress.String()) - if err != nil { - s.Logger.Error("Failed to get previous snapshot", - "application", app.Name, - "error", err) - // Continue even if we can't get the previous snapshot - } - // Create the snapshot - err = machine.CreateSnapshot(ctx, input.Index+1, snapshotPath) + err := machine.CreateSnapshot(ctx, input.Index+1, snapshotPath) if err != nil { return err } @@ -393,6 +372,15 @@ func (s *Service) createSnapshot(ctx context.Context, app *Application, machine return fmt.Errorf("failed to update input snapshot URI: %w", err) } + // Get previous snapshot if it exists + previousSnapshot, err := s.repository.GetLastSnapshot(ctx, app.IApplicationAddress.String()) + if err != nil { + s.Logger.Error("Failed to get previous snapshot", + "application", app.Name, + "error", err) + // Continue even if we can't get the previous snapshot + } + // Remove previous snapshot if it exists if previousSnapshot != nil && previousSnapshot.Index != input.Index && previousSnapshot.SnapshotURI != nil { // Only remove if it's a different snapshot than the one we just created diff --git a/internal/advancer/advancer_test.go b/internal/advancer/advancer_test.go index 5cd9acceb..f1699bdcb 100644 --- a/internal/advancer/advancer_test.go +++ b/internal/advancer/advancer_test.go @@ -61,17 +61,17 @@ func (s *AdvancerSuite) TestServiceInterface() { // Test Tick method machineManager.Map[1] = *newMockMachine(1) - repository.GetInputsReturn = map[common.Address][]*Input{ + repository.GetEpochsReturn = map[common.Address][]*Epoch{ machineManager.Map[1].Application.IApplicationAddress: {}, } tickErrors := advancer.Tick() require.Empty(tickErrors) // Test Tick with error - repository.UpdateEpochsError = errors.New("update epochs error") + repository.GetEpochsError = errors.New("list epochs error") tickErrors = advancer.Tick() require.NotEmpty(tickErrors) - require.Contains(tickErrors[0].Error(), "update epochs error") + require.Contains(tickErrors[0].Error(), "list epochs error") }) } @@ -89,6 +89,14 @@ func (s *AdvancerSuite) TestStep() { res3 := randomAdvanceResult(3) repository := &MockRepository{ + GetEpochsReturn: map[common.Address][]*Epoch{ + app1.Application.IApplicationAddress: { + &Epoch{Index: 0, Status: EpochStatus_Open}, + }, + app2.Application.IApplicationAddress: { + &Epoch{Index: 0, Status: EpochStatus_Open}, + }, + }, GetInputsReturn: map[common.Address][]*Input{ app1.Application.IApplicationAddress: { newInput(app1.Application.ID, 0, 0, marshal(res1)), @@ -119,6 +127,11 @@ func (s *AdvancerSuite) TestStep() { res1 := randomAdvanceResult(1) repository := &MockRepository{ + GetEpochsReturn: map[common.Address][]*Epoch{ + app1.Application.IApplicationAddress: { + &Epoch{Index: 0, Status: EpochStatus_Closed}, + }, + }, GetInputsReturn: map[common.Address][]*Input{ app1.Application.IApplicationAddress: { newInput(app1.Application.ID, 0, 0, marshal(res1)), @@ -162,6 +175,11 @@ func (s *AdvancerSuite) TestStep() { machineManager.Map[1] = *app1 repository := &MockRepository{ + GetEpochsReturn: map[common.Address][]*Epoch{ + app1.Application.IApplicationAddress: { + &Epoch{Index: 0, Status: EpochStatus_Closed}, + }, + }, GetInputsError: errors.New("get inputs error"), } @@ -213,7 +231,7 @@ func (s *AdvancerSuite) TestGetUnprocessedInputs() { }, } - result, count, err := getUnprocessedInputs(context.Background(), repository, app1.Application.IApplicationAddress.String()) + result, count, err := getUnprocessedInputs(context.Background(), repository, app1.Application.IApplicationAddress.String(), 0) require.Nil(err) require.Equal(uint64(2), count) require.Equal(inputs, result) @@ -227,7 +245,7 @@ func (s *AdvancerSuite) TestGetUnprocessedInputs() { GetInputsError: errors.New("list inputs error"), } - _, _, err := getUnprocessedInputs(context.Background(), repository, app1.Application.IApplicationAddress.String()) + _, _, err := getUnprocessedInputs(context.Background(), repository, app1.Application.IApplicationAddress.String(), 0) require.Error(err) require.Contains(err.Error(), "list inputs error") }) @@ -367,7 +385,7 @@ func (s *AdvancerSuite) TestContextCancellation() { // Create a repository that will block until we cancel the context repository := &MockRepository{ - GetInputsBlock: true, + GetEpochsBlock: true, } advancer, err := newMockAdvancerService(machineManager, repository) @@ -513,6 +531,8 @@ func (mock *MockMachineImpl) Advance( ctx context.Context, input []byte, _ uint64, + _ uint64, + _ bool, ) (*AdvanceResult, error) { // If AdvanceBlock is true, block until context is canceled if mock.AdvanceBlock { @@ -605,8 +625,8 @@ type MockMachineInstance struct { } // Advance implements the MachineInstance interface for testing -func (m *MockMachineInstance) Advance(ctx context.Context, input []byte, index uint64) (*AdvanceResult, error) { - return m.machineImpl.Advance(ctx, input, index) +func (m *MockMachineInstance) Advance(ctx context.Context, input []byte, epochIndex uint64, index uint64, leafs bool) (*AdvanceResult, error) { + return m.machineImpl.Advance(ctx, input, epochIndex, index, leafs) } // Inspect implements the MachineInstance interface for testing @@ -620,6 +640,14 @@ func (m *MockMachineInstance) Application() *Application { return m.application } +func (m *MockMachineInstance) ProcessedInputs() uint64 { + return 0 +} + +func (m *MockMachineInstance) OutputsProof(ctx context.Context, processedInputs uint64) (*OutputsProof, error) { + return nil, nil +} + // Synchronize implements the MachineInstance interface for testing func (m *MockMachineInstance) Synchronize(ctx context.Context, repo manager.MachineRepository) error { // Not used in advancer tests, but needed to satisfy the interface @@ -632,6 +660,12 @@ func (m *MockMachineInstance) CreateSnapshot(ctx context.Context, processInputs return nil } +// Retrieves the hash of the current machine state +func (m *MockMachineInstance) Hash(ctx context.Context) ([32]byte, error) { + // Not used in advancer tests, but needed to satisfy the interface + return [32]byte{}, nil +} + // Close implements the MachineInstance interface for testing func (m *MockMachineInstance) Close() error { // Not used in advancer tests, but needed to satisfy the interface @@ -641,6 +675,9 @@ func (m *MockMachineInstance) Close() error { // ------------------------------------------------------------------------------------------------ type MockRepository struct { + GetEpochsReturn map[common.Address][]*Epoch + GetEpochsError error + GetEpochsBlock bool GetInputsReturn map[common.Address][]*Input GetInputsError error GetInputsBlock bool @@ -648,9 +685,10 @@ type MockRepository struct { StoreAdvanceFailCount int UpdateApplicationStateError error UpdateEpochsError error - UpdateEpochsCount int64 + UpdateOutputsProofError error GetLastSnapshotReturn *Input GetLastSnapshotError error + RepeatOutputsProofError error StoredResults []*AdvanceResult ApplicationStateUpdates int @@ -660,6 +698,28 @@ type MockRepository struct { mu sync.Mutex } +func (mock *MockRepository) ListEpochs( + ctx context.Context, + nameOrAddress string, + f repository.EpochFilter, + p repository.Pagination, + descending bool, +) ([]*Epoch, uint64, error) { + // Check for context cancellation + if ctx.Err() != nil { + return nil, 0, ctx.Err() + } + + // If GetEpochsBlock is true, block until context is canceled + if mock.GetEpochsBlock { + <-ctx.Done() + return nil, 0, ctx.Err() + } + + address := common.HexToAddress(nameOrAddress) + return mock.GetEpochsReturn[address], uint64(len(mock.GetEpochsReturn[address])), mock.GetEpochsError +} + func (mock *MockRepository) ListInputs( ctx context.Context, nameOrAddress string, @@ -706,13 +766,22 @@ func (mock *MockRepository) StoreAdvanceResult( return mock.StoreAdvanceError } -func (mock *MockRepository) UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) (int64, error) { +func (mock *MockRepository) UpdateEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64, proof *OutputsProof) error { // Check for context cancellation if ctx.Err() != nil { - return 0, ctx.Err() + return ctx.Err() } - return mock.UpdateEpochsCount, mock.UpdateEpochsError + return mock.UpdateOutputsProofError +} + +func (mock *MockRepository) UpdateEpochInputsProcessed(ctx context.Context, nameOrAddress string, epochIndex uint64) error { + // Check for context cancellation + if ctx.Err() != nil { + return ctx.Err() + } + + return mock.UpdateEpochsError } func (mock *MockRepository) UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error { @@ -792,6 +861,14 @@ func (mock *MockRepository) GetLastSnapshot(ctx context.Context, nameOrAddress s return mock.GetLastSnapshotReturn, mock.GetLastSnapshotError } +func (mock *MockRepository) RepeatPreviousEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64) error { + // Check for context cancellation + if ctx.Err() != nil { + return ctx.Err() + } + return mock.RepeatOutputsProofError +} + // ------------------------------------------------------------------------------------------------ func randomAddress() common.Address { @@ -849,14 +926,15 @@ func randomInputs(appId int64, epochIndex uint64, size int) []*Input { } func randomAdvanceResult(inputIndex uint64) *AdvanceResult { - hash := randomHash() res := &AdvanceResult{ - InputIndex: inputIndex, - Status: InputCompletionStatus_Accepted, - Outputs: randomSliceOfBytes(), - Reports: randomSliceOfBytes(), - OutputsHash: randomHash(), - MachineHash: &hash, + InputIndex: inputIndex, + Status: InputCompletionStatus_Accepted, + Outputs: randomSliceOfBytes(), + Reports: randomSliceOfBytes(), + OutputsProof: OutputsProof{ + OutputsHash: randomHash(), + MachineHash: randomHash(), + }, } return res } diff --git a/internal/advancer/service.go b/internal/advancer/service.go new file mode 100644 index 000000000..1f8399331 --- /dev/null +++ b/internal/advancer/service.go @@ -0,0 +1,102 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package advancer + +import ( + "context" + "fmt" + "net/http" + + "github.com/cartesi/rollups-node/internal/config" + "github.com/cartesi/rollups-node/internal/inspect" + "github.com/cartesi/rollups-node/internal/manager" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/pkg/service" +) + +// Service is the main advancer service that processes inputs through Cartesi machines +type Service struct { + service.Service + snapshotsDir string + repository AdvancerRepository + machineManager manager.MachineProvider + inspector *inspect.Inspector + HTTPServer *http.Server + HTTPServerFunc func() error +} + +// CreateInfo contains the configuration for creating an advancer service +type CreateInfo struct { + service.CreateInfo + Config config.AdvancerConfig + Repository repository.Repository +} + +// Create initializes a new advancer service +func Create(ctx context.Context, c *CreateInfo) (*Service, error) { + var err error + if err = ctx.Err(); err != nil { + return nil, err // This returns context.Canceled or context.DeadlineExceeded. + } + + s := &Service{} + c.Impl = s + + err = service.Create(ctx, &c.CreateInfo, &s.Service) + if err != nil { + return nil, err + } + + s.repository = c.Repository + if s.repository == nil { + return nil, fmt.Errorf("repository on advancer service Create is nil") + } + + // Create the machine manager + manager := manager.NewMachineManager( + ctx, + c.Repository, + s.Logger, + c.Config.FeatureMachineHashCheckEnabled, + ) + s.machineManager = manager + + // Initialize the inspect service if enabled + if c.Config.FeatureInspectEnabled { + s.inspector, s.HTTPServer, s.HTTPServerFunc = inspect.NewInspector( + c.Repository, + manager, + c.Config.InspectAddress, + c.LogLevel, + c.LogColor, + ) + } + + s.snapshotsDir = c.Config.SnapshotsDir + + return s, nil +} + +// Service interface implementation +func (s *Service) Alive() bool { return true } +func (s *Service) Ready() bool { return true } +func (s *Service) Reload() []error { return nil } +func (s *Service) Tick() []error { + if err := s.Step(s.Context); err != nil { + return []error{err} + } + return []error{} +} +func (s *Service) Stop(b bool) []error { + return nil +} +func (s *Service) Serve() error { + if s.inspector != nil && s.HTTPServerFunc != nil { + go s.HTTPServerFunc() + } + return s.Service.Serve() +} +func (s *Service) String() string { + return s.Name +} diff --git a/internal/claimer/blockchain.go b/internal/claimer/blockchain.go index 727189964..e148460dd 100644 --- a/internal/claimer/blockchain.go +++ b/internal/claimer/blockchain.go @@ -85,18 +85,18 @@ func (self *claimerBlockchain) submitClaimToBlockchain( txHash := common.Hash{} lastBlockNumber := new(big.Int).SetUint64(epoch.LastBlock) tx, err := ic.SubmitClaim(self.txOpts, application.IApplicationAddress, - lastBlockNumber, *epoch.ClaimHash) + lastBlockNumber, *epoch.OutputsMerkleRoot) if err != nil { self.logger.Error("submitClaimToBlockchain:failed", "appContractAddress", application.IApplicationAddress, - "claimHash", *epoch.ClaimHash, + "claimHash", *epoch.OutputsMerkleRoot, "last_block", epoch.LastBlock, "error", err) } else { txHash = tx.Hash() self.logger.Debug("submitClaimToBlockchain:success", "appContractAddress", application.IApplicationAddress, - "claimHash", *epoch.ClaimHash, + "claimHash", *epoch.OutputsMerkleRoot, "last_block", epoch.LastBlock, "TxHash", txHash) } diff --git a/internal/claimer/claimer.go b/internal/claimer/claimer.go index e5fdcabe7..5c5f4b982 100644 --- a/internal/claimer/claimer.go +++ b/internal/claimer/claimer.go @@ -45,7 +45,6 @@ import ( "time" "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/ethereum/go-ethereum/common" @@ -58,8 +57,6 @@ var ( ) type iclaimerRepository interface { - ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination, descending bool) ([]*model.Application, uint64, error) - // key is model.Application.ID SelectSubmittedClaimPairsPerApp(ctx context.Context) ( map[int64]*model.Epoch, @@ -138,7 +135,7 @@ func (s *Service) submitClaimsAndUpdateDatabase( s.Logger.Info("Claim submitted", "app", apps[key].IApplicationAddress, "receipt_block_number", receipt.BlockNumber, - "claim_hash", fmt.Sprintf("%x", computedEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", computedEpoch.OutputsMerkleRoot), "last_block", computedEpoch.LastBlock, "tx", txHash) delete(computedEpochs, key) @@ -261,7 +258,7 @@ func (s *Service) submitClaimsAndUpdateDatabase( } s.Logger.Debug("Updating claim status to submitted", "app", app.IApplicationAddress, - "claim_hash", fmt.Sprintf("%x", currEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", currEpoch.OutputsMerkleRoot), "last_block", currEpoch.LastBlock, ) txHash := currClaimSubmissionEvent.Raw.TxHash @@ -280,21 +277,21 @@ func (s *Service) submitClaimsAndUpdateDatabase( s.Logger.Info("Claim previously submitted", "app", app.IApplicationAddress, "event_block_number", currClaimSubmissionEvent.Raw.BlockNumber, - "claim_hash", fmt.Sprintf("%x", currEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", currEpoch.OutputsMerkleRoot), "last_block", currEpoch.LastBlock, ) } else if s.submissionEnabled { if prevEpoch != nil && prevEpoch.Status != model.EpochStatus_ClaimAccepted { s.Logger.Debug("Waiting previous claim to be accepted before submitting new one. Previous:", "app", app.IApplicationAddress, - "claim_hash", fmt.Sprintf("%x", prevEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", prevEpoch.OutputsMerkleRoot), "last_block", prevEpoch.LastBlock, ) goto nextApp } s.Logger.Debug("Submitting claim to blockchain", "app", app.IApplicationAddress, - "claim_hash", fmt.Sprintf("%x", currEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", currEpoch.OutputsMerkleRoot), "last_block", currEpoch.LastBlock, ) txHash, err := s.blockchain.submitClaimToBlockchain(ic, app, currEpoch) @@ -307,7 +304,7 @@ func (s *Service) submitClaimsAndUpdateDatabase( } else { s.Logger.Debug("Claim submission disabled. Doing nothing", "app", app.IApplicationAddress, - "claim_hash", fmt.Sprintf("%x", currEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", currEpoch.OutputsMerkleRoot), "last_block", currEpoch.LastBlock, ) @@ -412,7 +409,7 @@ func (s *Service) acceptClaimsAndUpdateDatabase( } s.Logger.Debug("Updating claim status to accepted", "app", app.IApplicationAddress, - "claim_hash", fmt.Sprintf("%x", submittedEpoch.ClaimHash), + "claim_hash", fmt.Sprintf("%x", submittedEpoch.OutputsMerkleRoot), "last_block", submittedEpoch.LastBlock, ) txHash := currEvent.Raw.TxHash @@ -485,7 +482,7 @@ func checkEpochConstraint(c *model.Epoch) error { return fmt.Errorf("unexpected epoch state. first_block: %v > last_block: %v", c.FirstBlock, c.LastBlock) } if c.Status == model.EpochStatus_ClaimSubmitted { - if c.ClaimHash == nil { + if c.OutputsMerkleRoot == nil { return fmt.Errorf("unexpected epoch state. missing claim_hash.") } } @@ -523,13 +520,13 @@ func checkEpochSequenceConstraint(prevEpoch *model.Epoch, currEpoch *model.Epoch func claimSubmittedEventMatches(application *model.Application, epoch *model.Epoch, event *iconsensus.IConsensusClaimSubmitted) bool { return application.IApplicationAddress == event.AppContract && - *epoch.ClaimHash == event.OutputsMerkleRoot && + *epoch.OutputsMerkleRoot == event.OutputsMerkleRoot && epoch.LastBlock == event.LastProcessedBlockNumber.Uint64() } func claimAcceptedEventMatches(application *model.Application, epoch *model.Epoch, event *iconsensus.IConsensusClaimAccepted) bool { return application.IApplicationAddress == event.AppContract && - *epoch.ClaimHash == event.OutputsMerkleRoot && + *epoch.OutputsMerkleRoot == event.OutputsMerkleRoot && epoch.LastBlock == event.LastProcessedBlockNumber.Uint64() } diff --git a/internal/claimer/claimer_test.go b/internal/claimer/claimer_test.go index 2e4dc9ac5..620650711 100644 --- a/internal/claimer/claimer_test.go +++ b/internal/claimer/claimer_test.go @@ -13,7 +13,6 @@ import ( "time" "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/pkg/contracts/iconsensus" "github.com/cartesi/rollups-node/pkg/service" "github.com/lmittmann/tint" @@ -29,16 +28,6 @@ type claimerRepositoryMock struct { mock.Mock } -func (m *claimerRepositoryMock) ListApplications( - ctx context.Context, - f repository.ApplicationFilter, - pagination repository.Pagination, - descending bool, -) ([]*model.Application, uint64, error) { - args := m.Called(ctx, f, pagination, descending) - return args.Get(0).([]*model.Application), args.Get(1).(uint64), args.Error(2) -} - func (m *claimerRepositoryMock) SelectSubmittedClaimPairsPerApp(ctx context.Context) ( map[int64]*model.Epoch, map[int64]*model.Epoch, @@ -226,7 +215,7 @@ func makeEpoch(id int64, status model.EpochStatus, i uint64) *model.Epoch { LastBlock: i*10 + 9, Status: status, ClaimTransactionHash: &tx, - ClaimHash: &hash, + OutputsMerkleRoot: &hash, } return epoch } @@ -261,7 +250,7 @@ func makeSubmittedEvent(app *model.Application, epoch *model.Epoch) *iconsensus. return &iconsensus.IConsensusClaimSubmitted{ LastProcessedBlockNumber: new(big.Int).SetUint64(epoch.LastBlock), AppContract: app.IApplicationAddress, - OutputsMerkleRoot: *epoch.ClaimHash, + OutputsMerkleRoot: *epoch.OutputsMerkleRoot, Raw: types.Log{ TxHash: common.HexToHash(epoch.ClaimTransactionHash.Hex()), }, @@ -272,7 +261,7 @@ func makeAcceptedEvent(app *model.Application, epoch *model.Epoch) *iconsensus.I return &iconsensus.IConsensusClaimAccepted{ LastProcessedBlockNumber: new(big.Int).SetUint64(epoch.LastBlock), AppContract: app.IApplicationAddress, - OutputsMerkleRoot: *epoch.ClaimHash, + OutputsMerkleRoot: *epoch.OutputsMerkleRoot, Raw: types.Log{ TxHash: common.HexToHash(epoch.ClaimTransactionHash.Hex()), }, @@ -571,7 +560,7 @@ func TestSubmitClaimWithAntecessorMismatch(t *testing.T) { prevEvent := &iconsensus.IConsensusClaimSubmitted{ LastProcessedBlockNumber: new(big.Int).SetUint64(prevEpoch.LastBlock + 1), AppContract: app.IApplicationAddress, - OutputsMerkleRoot: *prevEpoch.ClaimHash, + OutputsMerkleRoot: *prevEpoch.OutputsMerkleRoot, } var currEvent *iconsensus.IConsensusClaimSubmitted = nil @@ -738,7 +727,7 @@ func TestAcceptClaimWithAntecessorMismatch(t *testing.T) { prevEvent := &iconsensus.IConsensusClaimAccepted{ LastProcessedBlockNumber: new(big.Int).SetUint64(prevEpoch.LastBlock + 1), AppContract: app.IApplicationAddress, - OutputsMerkleRoot: *prevEpoch.ClaimHash, + OutputsMerkleRoot: *prevEpoch.OutputsMerkleRoot, } var currEvent *iconsensus.IConsensusClaimAccepted = nil diff --git a/internal/config/config.go b/internal/config/config.go index c63fe099d..5a9446e64 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -135,6 +135,18 @@ func ToAddressFromString(s string) (Address, error) { return common.BytesToAddress(b), nil } +func ToHashFromString(s string) (common.Hash, error) { + if len(s) < 3 || (!strings.HasPrefix(s, "0x") && !strings.HasPrefix(s, "0X")) { + return common.Hash{}, fmt.Errorf("invalid hash '%s'", s) + } + s = s[2:] + b, err := hex.DecodeString(s) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b), nil +} + func ToApplicationNameFromString(s string) (string, error) { if s == "" { return "", fmt.Errorf("application name cannot be empty") diff --git a/internal/config/generate/Config.toml b/internal/config/generate/Config.toml index 7d81c8f5f..c19a3386e 100644 --- a/internal/config/generate/Config.toml +++ b/internal/config/generate/Config.toml @@ -150,13 +150,6 @@ The default block to be used by EVM Reader and Claimer when requesting new block One of 'latest', 'pending', 'safe', 'finalized'""" used-by = ["evmreader", "claimer", "node", "prt"] -[blockchain.CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT] -default = "60" -go-type = "uint64" -description = """ -Block subscription timeout in seconds.""" -used-by = ["evmreader", "node"] - [rollups.CARTESI_BLOCKCHAIN_HTTP_MAX_RETRIES] default = "4" go-type = "uint64" diff --git a/internal/config/generated.go b/internal/config/generated.go index 7d0030ba8..443217d51 100644 --- a/internal/config/generated.go +++ b/internal/config/generated.go @@ -33,7 +33,6 @@ const ( BLOCKCHAIN_HTTP_ENDPOINT = "CARTESI_BLOCKCHAIN_HTTP_ENDPOINT" BLOCKCHAIN_ID = "CARTESI_BLOCKCHAIN_ID" BLOCKCHAIN_LEGACY_ENABLED = "CARTESI_BLOCKCHAIN_LEGACY_ENABLED" - BLOCKCHAIN_SUBSCRIPTION_TIMEOUT = "CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT" BLOCKCHAIN_WS_ENDPOINT = "CARTESI_BLOCKCHAIN_WS_ENDPOINT" CONTRACTS_APPLICATION_FACTORY_ADDRESS = "CARTESI_CONTRACTS_APPLICATION_FACTORY_ADDRESS" CONTRACTS_AUTHORITY_FACTORY_ADDRESS = "CARTESI_CONTRACTS_AUTHORITY_FACTORY_ADDRESS" @@ -102,8 +101,6 @@ func SetDefaults() { viper.SetDefault(BLOCKCHAIN_LEGACY_ENABLED, "false") - viper.SetDefault(BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, "60") - // no default for CARTESI_BLOCKCHAIN_WS_ENDPOINT // no default for CARTESI_CONTRACTS_APPLICATION_FACTORY_ADDRESS @@ -499,9 +496,6 @@ type EvmreaderConfig struct { // An unique identifier representing a blockchain network. BlockchainId uint64 `mapstructure:"CARTESI_BLOCKCHAIN_ID"` - // Block subscription timeout in seconds. - BlockchainSubscriptionTimeout uint64 `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` - // WebSocket endpoint for the blockchain RPC provider. BlockchainWsEndpoint URL `mapstructure:"CARTESI_BLOCKCHAIN_WS_ENDPOINT"` @@ -580,13 +574,6 @@ func LoadEvmreaderConfig() (*EvmreaderConfig, error) { return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_ID is required for the evmreader service: %w", err) } - cfg.BlockchainSubscriptionTimeout, err = GetBlockchainSubscriptionTimeout() - if err != nil && err != ErrNotDefined { - return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT: %w", err) - } else if err == ErrNotDefined { - return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT is required for the evmreader service: %w", err) - } - cfg.BlockchainWsEndpoint, err = GetBlockchainWsEndpoint() if err != nil && err != ErrNotDefined { return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_WS_ENDPOINT: %w", err) @@ -774,9 +761,6 @@ type NodeConfig struct { // (instead of EIP-1559). BlockchainLegacyEnabled bool `mapstructure:"CARTESI_BLOCKCHAIN_LEGACY_ENABLED"` - // Block subscription timeout in seconds. - BlockchainSubscriptionTimeout uint64 `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` - // WebSocket endpoint for the blockchain RPC provider. BlockchainWsEndpoint URL `mapstructure:"CARTESI_BLOCKCHAIN_WS_ENDPOINT"` @@ -900,13 +884,6 @@ func LoadNodeConfig() (*NodeConfig, error) { return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_LEGACY_ENABLED is required for the node service: %w", err) } - cfg.BlockchainSubscriptionTimeout, err = GetBlockchainSubscriptionTimeout() - if err != nil && err != ErrNotDefined { - return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT: %w", err) - } else if err == ErrNotDefined { - return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT is required for the node service: %w", err) - } - cfg.BlockchainWsEndpoint, err = GetBlockchainWsEndpoint() if err != nil && err != ErrNotDefined { return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_WS_ENDPOINT: %w", err) @@ -1394,21 +1371,20 @@ func (c *NodeConfig) ToClaimerConfig() *ClaimerConfig { // ToEvmreaderConfig converts a NodeConfig to a EvmreaderConfig. func (c *NodeConfig) ToEvmreaderConfig() *EvmreaderConfig { return &EvmreaderConfig{ - BlockchainDefaultBlock: c.BlockchainDefaultBlock, - BlockchainHttpEndpoint: c.BlockchainHttpEndpoint, - BlockchainId: c.BlockchainId, - BlockchainSubscriptionTimeout: c.BlockchainSubscriptionTimeout, - BlockchainWsEndpoint: c.BlockchainWsEndpoint, - DatabaseConnection: c.DatabaseConnection, - FeatureInputReaderEnabled: c.FeatureInputReaderEnabled, - TelemetryAddress: c.TelemetryAddress, - LogColor: c.LogColor, - LogLevel: c.LogLevel, - BlockchainHttpMaxRetries: c.BlockchainHttpMaxRetries, - BlockchainHttpRetryMaxWait: c.BlockchainHttpRetryMaxWait, - BlockchainHttpRetryMinWait: c.BlockchainHttpRetryMinWait, - BlockchainMaxBlockRange: c.BlockchainMaxBlockRange, - MaxStartupTime: c.MaxStartupTime, + BlockchainDefaultBlock: c.BlockchainDefaultBlock, + BlockchainHttpEndpoint: c.BlockchainHttpEndpoint, + BlockchainId: c.BlockchainId, + BlockchainWsEndpoint: c.BlockchainWsEndpoint, + DatabaseConnection: c.DatabaseConnection, + FeatureInputReaderEnabled: c.FeatureInputReaderEnabled, + TelemetryAddress: c.TelemetryAddress, + LogColor: c.LogColor, + LogLevel: c.LogLevel, + BlockchainHttpMaxRetries: c.BlockchainHttpMaxRetries, + BlockchainHttpRetryMaxWait: c.BlockchainHttpRetryMaxWait, + BlockchainHttpRetryMinWait: c.BlockchainHttpRetryMinWait, + BlockchainMaxBlockRange: c.BlockchainMaxBlockRange, + MaxStartupTime: c.MaxStartupTime, } } @@ -1633,19 +1609,6 @@ func GetBlockchainLegacyEnabled() (bool, error) { return notDefinedbool(), fmt.Errorf("%s: %w", BLOCKCHAIN_LEGACY_ENABLED, ErrNotDefined) } -// GetBlockchainSubscriptionTimeout returns the value for the environment variable CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT. -func GetBlockchainSubscriptionTimeout() (uint64, error) { - s := viper.GetString(BLOCKCHAIN_SUBSCRIPTION_TIMEOUT) - if s != "" { - v, err := toUint64(s) - if err != nil { - return v, fmt.Errorf("failed to parse %s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, err) - } - return v, nil - } - return notDefineduint64(), fmt.Errorf("%s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, ErrNotDefined) -} - // GetBlockchainWsEndpoint returns the value for the environment variable CARTESI_BLOCKCHAIN_WS_ENDPOINT. func GetBlockchainWsEndpoint() (URL, error) { s := viper.GetString(BLOCKCHAIN_WS_ENDPOINT) diff --git a/internal/evmreader/application_adapter.go b/internal/evmreader/application_adapter.go index 2c8e51d1e..b4d0649c9 100644 --- a/internal/evmreader/application_adapter.go +++ b/internal/evmreader/application_adapter.go @@ -17,6 +17,14 @@ import ( "github.com/ethereum/go-ethereum/ethclient" ) +type ApplicationContractAdapter interface { + RetrieveOutputExecutionEvents( + opts *bind.FilterOpts, + ) ([]*iapplication.IApplicationOutputExecuted, error) + GetDeploymentBlockNumber(opts *bind.CallOpts) (*big.Int, error) + GetNumberOfExecutedOutputs(opts *bind.CallOpts) (*big.Int, error) +} + // IApplication Wrapper type ApplicationContractAdapterImpl struct { application *iapplication.IApplication @@ -99,3 +107,7 @@ func (a *ApplicationContractAdapterImpl) RetrieveOutputExecutionEvents( func (a *ApplicationContractAdapterImpl) GetDeploymentBlockNumber(opts *bind.CallOpts) (*big.Int, error) { return a.application.GetDeploymentBlockNumber(opts) } + +func (a *ApplicationContractAdapterImpl) GetNumberOfExecutedOutputs(opts *bind.CallOpts) (*big.Int, error) { + return a.application.GetNumberOfExecutedOutputs(opts) +} diff --git a/internal/evmreader/daveconsensus_adapter.go b/internal/evmreader/daveconsensus_adapter.go new file mode 100644 index 000000000..c013b85b6 --- /dev/null +++ b/internal/evmreader/daveconsensus_adapter.go @@ -0,0 +1,137 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package evmreader + +import ( + "math/big" + + . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/idaveconsensus" + "github.com/cartesi/rollups-node/pkg/ethutil" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +// Interface for DaveConsensus reading +type DaveConsensusAdapter interface { + GetInputBox(opts *bind.CallOpts) (common.Address, error) + GetCurrentSealedEpoch(opts *bind.CallOpts) (struct { + EpochNumber *big.Int + InputIndexLowerBound *big.Int + InputIndexUpperBound *big.Int + Tournament common.Address + }, error) + GetApplicationContract(opts *bind.CallOpts) (common.Address, error) + GetTournamentFactory(opts *bind.CallOpts) (common.Address, error) + GetDeploymentBlockNumber(opts *bind.CallOpts) (*big.Int, error) + RetrieveSealedEpochs(opts *bind.FilterOpts) ([]*idaveconsensus.IDaveConsensusEpochSealed, error) +} + +// DaveConsensus Wrapper +type DaveConsensusAdapterImpl struct { + daveConsensus *idaveconsensus.IDaveConsensus + client *ethclient.Client + daveConsensusAddress common.Address + filter ethutil.Filter +} + +func NewDaveConsensusAdapter( + daveConsensusAddress common.Address, + client *ethclient.Client, + filter ethutil.Filter, +) (DaveConsensusAdapter, error) { + daveConsensusContract, err := idaveconsensus.NewIDaveConsensus(daveConsensusAddress, client) + if err != nil { + return nil, err + } + return &DaveConsensusAdapterImpl{ + daveConsensus: daveConsensusContract, + daveConsensusAddress: daveConsensusAddress, + client: client, + filter: filter, + }, nil +} + +func buildEpochSealedFilterQuery( + opts *bind.FilterOpts, + daveConsensusAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := idaveconsensus.IDaveConsensusMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events[MonitoredEvent_EpochSealed.String()].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{daveConsensusAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (d *DaveConsensusAdapterImpl) GetInputBox(opts *bind.CallOpts) (common.Address, error) { + return d.daveConsensus.GetInputBox(opts) +} + +func (d *DaveConsensusAdapterImpl) GetCurrentSealedEpoch(opts *bind.CallOpts) (struct { + EpochNumber *big.Int + InputIndexLowerBound *big.Int + InputIndexUpperBound *big.Int + Tournament common.Address +}, error) { + return d.daveConsensus.GetCurrentSealedEpoch(opts) +} + +func (d *DaveConsensusAdapterImpl) GetApplicationContract(opts *bind.CallOpts) (common.Address, error) { + return d.daveConsensus.GetApplicationContract(opts) +} + +func (d *DaveConsensusAdapterImpl) GetTournamentFactory(opts *bind.CallOpts) (common.Address, error) { + return d.daveConsensus.GetTournamentFactory(opts) +} + +func (d *DaveConsensusAdapterImpl) GetDeploymentBlockNumber(opts *bind.CallOpts) (*big.Int, error) { + return d.daveConsensus.GetDeploymentBlockNumber(opts) +} + +func (d *DaveConsensusAdapterImpl) RetrieveSealedEpochs( + opts *bind.FilterOpts, +) ([]*idaveconsensus.IDaveConsensusEpochSealed, error) { + q, err := buildEpochSealedFilterQuery(opts, d.daveConsensusAddress) + if err != nil { + return nil, err + } + + itr, err := d.filter.ChunkedFilterLogs(opts.Context, d.client, q) + if err != nil { + return nil, err + } + + var events []*idaveconsensus.IDaveConsensusEpochSealed + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := d.daveConsensus.ParseEpochSealed(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} diff --git a/internal/evmreader/evmreader.go b/internal/evmreader/evmreader.go index 8281dff84..d8b1b45af 100644 --- a/internal/evmreader/evmreader.go +++ b/internal/evmreader/evmreader.go @@ -11,16 +11,12 @@ import ( "time" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/rpc" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" - "github.com/cartesi/rollups-node/pkg/contracts/iapplication" - "github.com/cartesi/rollups-node/pkg/contracts/iinputbox" "github.com/cartesi/rollups-node/pkg/ethutil" ) @@ -29,6 +25,7 @@ type EvmReaderRepository interface { ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination, descending bool) ([]*Application, uint64, error) UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error UpdateEventLastCheckBlock(ctx context.Context, appIDs []int64, event MonitoredEvent, blockNumber uint64) error + GetEventLastCheckBlock(ctx context.Context, appID int64, event MonitoredEvent) (uint64, error) SaveNodeConfigRaw(ctx context.Context, key string, rawJSON []byte) error LoadNodeConfigRaw(ctx context.Context, key string) (rawJSON []byte, createdAt, updatedAt time.Time, err error) @@ -40,10 +37,15 @@ type EvmReaderRepository interface { ) error GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) ListEpochs(ctx context.Context, nameOrAddress string, f repository.EpochFilter, p repository.Pagination, descending bool) ([]*Epoch, uint64, error) + UpdateEpochClaimTransactionHash(ctx context.Context, nameOrAddress string, e *Epoch) error + GetLastNonOpenEpoch(ctx context.Context, nameOrAddress string) (*Epoch, error) + + GetNumberOfInputs(ctx context.Context, nameOrAddress string) (uint64, error) // Output execution monitor GetOutput(ctx context.Context, nameOrAddress string, indexKey uint64) (*Output, error) UpdateOutputsExecution(ctx context.Context, nameOrAddress string, executedOutputs []*Output, blockNumber uint64) error + GetNumberOfExecutedOutputs(ctx context.Context, nameOrAddress string) (uint64, error) } // EthClientInterface defines the methods we need from ethclient.Client @@ -53,22 +55,6 @@ type EthClientInterface interface { ChainID(ctx context.Context) (*big.Int, error) } -type ApplicationContractAdapter interface { - RetrieveOutputExecutionEvents( - opts *bind.FilterOpts, - ) ([]*iapplication.IApplicationOutputExecuted, error) - GetDeploymentBlockNumber(opts *bind.CallOpts) (*big.Int, error) -} - -// Interface for Input reading -type InputSourceAdapter interface { - // Wrapper for FilterInputAdded(), which is automatically generated - // by go-ethereum and cannot be used for testing - RetrieveInputs(opts *bind.FilterOpts, appAddresses []common.Address, index []*big.Int, - ) ([]iinputbox.IInputBoxInputAdded, error) - GetNumberOfInputs(opts *bind.CallOpts, appContract common.Address) (*big.Int, error) -} - type SubscriptionError struct { Cause error } @@ -82,18 +68,32 @@ type appContracts struct { application *Application applicationContract ApplicationContractAdapter inputSource InputSourceAdapter + daveConsensus DaveConsensusAdapter } func (r *Service) Run(ctx context.Context, ready chan struct{}) error { - for { + for attempt := uint64(1); ; attempt++ { err := r.watchForNewBlocks(ctx, ready) - // If the error is a SubscriptionError, re run watchForNewBlocks - // that it will restart the websocket subscription - if _, ok := err.(*SubscriptionError); !ok { + r.Logger.Error(err.Error()) + + if attempt > r.blockchainMaxRetries { + r.Logger.Error("Max attempts reached for subscription restart. Exititng", + "max_retries", r.blockchainMaxRetries, + ) return err } - r.Logger.Error(err.Error()) - r.Logger.Info("Restarting subscription") + + r.Logger.Info("Restarting subscription", + "attempt", attempt, + "remaining", r.blockchainMaxRetries-attempt, + "time_between_attempts", r.blockchainSubscriptionRetryInterval, + ) + + // sleep or cancel + select { + case <-ctx.Done(): + case <-time.After(r.blockchainSubscriptionRetryInterval): + } } } @@ -104,6 +104,26 @@ func getAllRunningApplications(ctx context.Context, er EvmReaderRepository) ([]* return er.ListApplications(ctx, f, repository.Pagination{}, false) } +// setApplicationInoperable marks an application as inoperable with the given reason, +// logs any error that occurs during the update, and returns an error with the reason. +func (r *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...any) error { + reason := fmt.Sprintf(reasonFmt, args...) + appAddress := app.IApplicationAddress.String() + + // Log the reason first + r.Logger.Error(reason, "application", app.Name, "address", appAddress) + + // Update application state + err := r.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) + if err != nil { + r.Logger.Error("failed to update application state to inoperable", + "application", app.Name, + "address", appAddress, "err", err) + } + // Return the error with the reason + return errors.New(reason) +} + // watchForNewBlocks watches for new blocks and reads new inputs based on the // default block configuration, which have not been processed yet. func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) error { @@ -151,8 +171,10 @@ func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) // Build Contracts var apps []appContracts + var daveConsensusApps []appContracts + var iconsensusApps []appContracts for _, app := range runningApps { - applicationContract, inputSource, err := r.adapterFactory.CreateAdapters(app, r.client) + applicationContract, inputSource, daveConsensus, err := r.adapterFactory.CreateAdapters(app, r.client) if err != nil { r.Logger.Error("Error retrieving application contracts", "app", app, "error", err) @@ -162,9 +184,15 @@ func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) application: app, applicationContract: applicationContract, inputSource: inputSource, + daveConsensus: daveConsensus, } apps = append(apps, aContracts) + if app.IsDaveConsensus() { + daveConsensusApps = append(daveConsensusApps, aContracts) + } else { + iconsensusApps = append(iconsensusApps, aContracts) + } } if len(apps) == 0 { @@ -190,7 +218,9 @@ func (r *Service) watchForNewBlocks(ctx context.Context, ready chan<- struct{}) mostRecentHeader.Number.Uint64(), header.Number.Uint64(), r.defaultBlock)) } - r.checkForNewInputs(ctx, apps, blockNumber) + r.checkForEpochsAndInputs(ctx, daveConsensusApps, blockNumber) + + r.checkForNewInputs(ctx, iconsensusApps, blockNumber) r.checkForOutputExecution(ctx, apps, blockNumber) @@ -234,39 +264,53 @@ func (r *Service) fetchMostRecentHeader( } type AdapterFactory interface { - CreateAdapters(app *Application, client EthClientInterface) (ApplicationContractAdapter, InputSourceAdapter, error) + CreateAdapters(app *Application, client EthClientInterface) (ApplicationContractAdapter, InputSourceAdapter, DaveConsensusAdapter, error) } type DefaultAdapterFactory struct { Filter ethutil.Filter } -func (f *DefaultAdapterFactory) CreateAdapters(app *Application, client EthClientInterface) (ApplicationContractAdapter, InputSourceAdapter, error) { +func (f *DefaultAdapterFactory) CreateAdapters(app *Application, client EthClientInterface) (ApplicationContractAdapter, InputSourceAdapter, DaveConsensusAdapter, error) { if app == nil { - return nil, nil, fmt.Errorf("Application reference is nil. Should never happen") + return nil, nil, nil, fmt.Errorf("Application reference is nil. Should never happen") } // Type assertion to get the concrete client if possible ethClient, ok := client.(*ethclient.Client) if !ok { - return nil, nil, fmt.Errorf("client is not an *ethclient.Client, cannot create adapters") + return nil, nil, nil, fmt.Errorf("client is not an *ethclient.Client, cannot create adapters") } applicationContract, err := NewApplicationContractAdapter(app.IApplicationAddress, ethClient, f.Filter) if err != nil { - return nil, nil, errors.Join( + return nil, nil, nil, errors.Join( fmt.Errorf("error building application contract"), err, ) } - inputSource, err := NewInputSourceAdapter(app.IInputBoxAddress, ethClient, f.Filter) - if err != nil { - return nil, nil, errors.Join( - fmt.Errorf("error building inputbox contract"), - err, - ) + var inputSource InputSourceAdapter + if app.HasDataAvailabilitySelector(DataAvailability_InputBox) { + inputSource, err = NewInputSourceAdapter(app.IInputBoxAddress, ethClient, f.Filter) + if err != nil { + return nil, nil, nil, errors.Join( + fmt.Errorf("error building inputbox contract"), + err, + ) + } + } + + var daveConsensus DaveConsensusAdapter + if app.IsDaveConsensus() { + daveConsensus, err = NewDaveConsensusAdapter(app.IConsensusAddress, ethClient, f.Filter) + if err != nil { + return nil, nil, nil, errors.Join( + fmt.Errorf("error building daveconsensus contract"), + err, + ) + } } - return applicationContract, inputSource, nil + return applicationContract, inputSource, daveConsensus, nil } diff --git a/internal/evmreader/evmreader_test.go b/internal/evmreader/evmreader_test.go index 2f1733821..bad40aa20 100644 --- a/internal/evmreader/evmreader_test.go +++ b/internal/evmreader/evmreader_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/cartesi/rollups-node/internal/config" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/pkg/contracts/iapplication" @@ -50,12 +51,14 @@ var header1JsonBytes []byte //go:embed testdata/header_2.json var header2JsonBytes []byte +//go:embed testdata/header_3.json +var header3JsonBytes []byte + var ( header0 = types.Header{} header1 = types.Header{} header2 = types.Header{} - - block0 = types.Block{} + header3 = types.Header{} inputAddedEvent0 = iinputbox.IInputBoxInputAdded{} inputAddedEvent1 = iinputbox.IInputBoxInputAdded{} @@ -65,15 +68,40 @@ var ( subscription0 = newMockSubscription() ) +var applications = []*Application{{ + Name: "my-app-1", + IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), + IConsensusAddress: common.HexToAddress("0xdeadbeef"), + IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), + DataAvailability: DataAvailability_InputBox[:], + IInputBoxBlock: 0x01, + EpochLength: 10, + LastInputCheckBlock: 0x00, + LastOutputCheckBlock: 0x00, +}, { + Name: "my-app-2", + IApplicationAddress: common.HexToAddress("0x78c716FDaE477595a820D86D0eFAfe0eE54dF7dB"), + IConsensusAddress: common.HexToAddress("0xdeadbeef"), + IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), + DataAvailability: []byte{0x11, 0x32, 0x45, 0x56}, + IInputBoxBlock: 0x01, + EpochLength: 10, + LastInputCheckBlock: 0x00, + LastOutputCheckBlock: 0x00, +}} + type EvmReaderSuite struct { suite.Suite - ctx context.Context - cancel context.CancelFunc - client *MockEthClient - wsClient *MockEthClient - repository *MockRepository - evmReader *Service - contractFactory *MockAdapterFactory + ctx context.Context + cancel context.CancelFunc + client *MockEthClient + wsClient *MockEthClient + repository *MockRepository + evmReader *Service + contractFactory *MockAdapterFactory + applicationContract1 *MockApplicationContract + applicationContract2 *MockApplicationContract + inputBox *MockInputBox } func TestEvmReaderSuite(t *testing.T) { @@ -82,6 +110,7 @@ func TestEvmReaderSuite(t *testing.T) { func (s *EvmReaderSuite) SetupSuite() { s.ctx, s.cancel = context.WithTimeout(context.Background(), suiteTimeout) + config.SetDefaults() err := json.Unmarshal(header0JsonBytes, &header0) s.Require().Nil(err) @@ -89,8 +118,8 @@ func (s *EvmReaderSuite) SetupSuite() { s.Require().Nil(err) err = json.Unmarshal(header2JsonBytes, &header2) s.Require().Nil(err) - - block0 = *types.NewBlockWithHeader(&header0) + err = json.Unmarshal(header3JsonBytes, &header3) + s.Require().Nil(err) err = json.Unmarshal(inputAddedEvent0JsonBytes, &inputAddedEvent0) s.Require().Nil(err) @@ -106,25 +135,33 @@ func (s *EvmReaderSuite) TearDownSuite() { s.cancel() } -func (me *EvmReaderSuite) SetupTest() { - me.client = newMockEthClient() - me.client.On("ChainID", mock.Anything).Return(big.NewInt(1), nil) - me.wsClient = me.client - me.repository = newMockRepository() - me.contractFactory = newMockAdapterFactory() - - me.evmReader = &Service{ - client: me.client, - wsClient: me.wsClient, - repository: me.repository, - defaultBlock: DefaultBlock_Latest, - adapterFactory: me.contractFactory, - hasEnabledApps: true, - inputReaderEnabled: true, +func (s *EvmReaderSuite) SetupTest() { + s.client = newMockEthClient().SetupDefaultBehavior() + s.wsClient = newMockEthClient().SetupDefaultWsBehavior() + s.repository = newMockRepository().SetupDefaultBehavior() + s.applicationContract1 = newMockApplicationContract().SetupDefaultBehavior() + s.applicationContract2 = newMockApplicationContract().SetupDefaultBehavior() + s.inputBox = newMockInputBox().SetupDefaultBehavior(s.ctx) + s.contractFactory = newMockAdapterFactory().SetupDefaultBehavior(s.applicationContract1, s.applicationContract2, s.inputBox) + + s.evmReader = &Service{ + client: s.client, + wsClient: s.wsClient, + repository: s.repository, + defaultBlock: DefaultBlock_Latest, + adapterFactory: s.contractFactory, + hasEnabledApps: true, + inputReaderEnabled: true, + blockchainMaxRetries: 0, + blockchainSubscriptionRetryInterval: time.Second, } - serviceArgs := &service.CreateInfo{Name: "evm-reader", Impl: me.evmReader} - err := service.Create(context.Background(), serviceArgs, &me.evmReader.Service) - me.Require().Nil(err) + + logLevel, err := config.GetLogLevel() + s.Require().Nil(err) + + serviceArgs := &service.CreateInfo{Name: "evm-reader", Impl: s.evmReader, LogLevel: logLevel} + err = service.Create(context.Background(), serviceArgs, &s.evmReader.Service) + s.Require().Nil(err) } // Service tests @@ -156,18 +193,19 @@ func (s *EvmReaderSuite) TestItEventuallyBecomesReady() { } func (s *EvmReaderSuite) TestItFailsToSubscribeForNewInputsOnStart() { - s.client.Unset("SubscribeNewHead") + s.wsClient.Unset("ChainID") + s.wsClient.Unset("SubscribeNewHead") emptySubscription := &MockSubscription{} - s.client.On( + s.wsClient.On( "SubscribeNewHead", mock.Anything, mock.Anything, ).Return(emptySubscription, fmt.Errorf("expected failure")) - s.Require().ErrorContains( - s.evmReader.Run(s.ctx, make(chan struct{}, 1)), - "expected failure") - s.client.AssertNumberOfCalls(s.T(), "SubscribeNewHead", 1) + err := s.evmReader.Run(s.ctx, make(chan struct{}, 1)) + s.Require().ErrorContains(err, "expected failure") + s.wsClient.AssertNumberOfCalls(s.T(), "SubscribeNewHead", 1) + s.wsClient.AssertExpectations(s.T()) } func (s *EvmReaderSuite) TestIndexApps() { @@ -245,27 +283,37 @@ type MockEthClient struct { } func newMockEthClient() *MockEthClient { - client := &MockEthClient{} + return &MockEthClient{} +} - client.On("HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil) +func (m *MockEthClient) SetupDefaultBehavior() *MockEthClient { + return m +} - client.On("SubscribeNewHead", +func (m *MockEthClient) SetupDefaultWsBehavior() *MockEthClient { + m.On("ChainID", mock.Anything).Return(big.NewInt(1), nil) + m.On("SubscribeNewHead", mock.Anything, mock.Anything, ).Return(subscription0, nil) - - return client + return m } -func (m *MockEthClient) Unset(methodName string) { +func UnsetAll(m *mock.Mock, methodName string) { + // Assuming no multithreading issues for test purposes + var index int for _, call := range m.ExpectedCalls { if call.Method == methodName { - call.Unset() + continue } + m.ExpectedCalls[index] = call + index++ } + m.ExpectedCalls = m.ExpectedCalls[:index] +} + +func (m *MockEthClient) Unset(methodName string) { + UnsetAll(&m.Mock, methodName) } func (m *MockEthClient) HeaderByNumber( @@ -320,7 +368,7 @@ type FakeWSEhtClient struct { } func (f *FakeWSEhtClient) SubscribeNewHead( - ctx context.Context, + _ context.Context, ch chan<- *types.Header, ) (ethereum.Subscription, error) { f.ch = ch @@ -328,13 +376,13 @@ func (f *FakeWSEhtClient) SubscribeNewHead( } func (f *FakeWSEhtClient) HeaderByNumber( - ctx context.Context, - number *big.Int, + _ context.Context, + _ *big.Int, ) (*types.Header, error) { return &header0, nil } -func (f *FakeWSEhtClient) ChainID(ctx context.Context) (*big.Int, error) { +func (f *FakeWSEhtClient) ChainID(_ context.Context) (*big.Int, error) { return big.NewInt(1), nil } @@ -347,25 +395,72 @@ type MockInputBox struct { mock.Mock } -func newMockInputBox() *MockInputBox { - inputSource := &MockInputBox{} +func (m *MockInputBox) SetupDefaultBehavior(ctx context.Context) *MockInputBox { + events0 := []iinputbox.IInputBoxInputAdded{inputAddedEvent0} + retrieveInputsOpts0 := bind.FilterOpts{ + Context: ctx, + Start: 0x11, + End: Pointer(uint64(0x11)), + } + m.On("RetrieveInputs", + &retrieveInputsOpts0, + mock.Anything, + mock.Anything, + ).Return(events0, nil).Once() - events := []iinputbox.IInputBoxInputAdded{inputAddedEvent0} - inputSource.On("RetrieveInputs", + events1 := []iinputbox.IInputBoxInputAdded{inputAddedEvent1} + retrieveInputsOpts1 := bind.FilterOpts{ + Context: ctx, + Start: 0x12, + End: Pointer(uint64(0x12)), + } + m.On("RetrieveInputs", + &retrieveInputsOpts1, + mock.Anything, mock.Anything, + ).Return(events1, nil).Once() + + events2 := []iinputbox.IInputBoxInputAdded{inputAddedEvent2, inputAddedEvent3} + retrieveInputsOpts2 := bind.FilterOpts{ + Context: ctx, + Start: 0x13, + End: Pointer(uint64(0x13)), + } + m.On("RetrieveInputs", + &retrieveInputsOpts2, mock.Anything, mock.Anything, - ).Return(events, nil) + ).Return(events2, nil).Once() - return inputSource + m.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + m.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + m.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Times(4) + m.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(2), nil).Once() + m.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(4), nil).Once() + return m +} + +func newMockInputBox() *MockInputBox { + return &MockInputBox{} } func (m *MockInputBox) Unset(methodName string) { - for _, call := range m.ExpectedCalls { - if call.Method == methodName { - call.Unset() - } - } + UnsetAll(&m.Mock, methodName) } func (m *MockInputBox) RetrieveInputs( @@ -387,101 +482,112 @@ type MockRepository struct { mock.Mock } -func newMockRepository() *MockRepository { - repo := &MockRepository{} +func copyApplications(apps []*Application) []*Application { + copies := make([]*Application, len(apps)) + for i, app := range apps { + if app == nil { + continue + } + copyApp := *app + copies[i] = ©App + } + return copies +} - repo.On("CreateEpochsAndInputs", +func (m *MockRepository) SetupDefaultBehavior() *MockRepository { + + apps := copyApplications(applications) + m.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, - mock.Anything).Return(nil) + false, + ).Return(apps, uint64(2), nil).Once() - repo.On("GetEpoch", + apps = copyApplications(applications) + apps[0].LastInputCheckBlock = 0x11 + apps[0].LastOutputCheckBlock = 0x11 + apps[1].LastOutputCheckBlock = 0x11 + m.On("ListApplications", mock.Anything, mock.Anything, - uint64(0)).Return( - &Epoch{ - Index: 0, - FirstBlock: 0, - LastBlock: 9, - Status: EpochStatus_Open, - ClaimHash: nil, - ClaimTransactionHash: nil, - }, nil) - repo.On("GetEpoch", mock.Anything, + false, + ).Return(apps, uint64(2), nil).Once() + + apps = copyApplications(applications) + apps[0].LastInputCheckBlock = 0x12 + apps[0].LastOutputCheckBlock = 0x12 + apps[1].LastOutputCheckBlock = 0x12 + m.On("ListApplications", mock.Anything, - uint64(1)).Return( - &Epoch{ - Index: 1, - FirstBlock: 10, - LastBlock: 19, - Status: EpochStatus_Open, - ClaimHash: nil, - ClaimTransactionHash: nil, - }, nil) - repo.On("GetEpoch", mock.Anything, mock.Anything, - uint64(2)).Return( - &Epoch{ - Index: 2, - FirstBlock: 20, - LastBlock: 29, - Status: EpochStatus_Open, - ClaimHash: nil, - ClaimTransactionHash: nil, - }, nil) + false, + ).Return(apps, uint64(2), nil).Once() - repo.On("ListEpochs", + m.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, + MonitoredEvent_InputAdded, mock.Anything, + ).Return(nil).Times(1) + m.On("UpdateEventLastCheckBlock", mock.Anything, - false, - ).Return([]*Epoch{}, uint64(0), nil) + mock.Anything, + MonitoredEvent_OutputExecuted, + mock.Anything, + ).Return(nil).Times(8) - repo.On("UpdateOutputsExecution", + m.On("GetNumberOfInputs", mock.Anything, mock.Anything, + ).Once().Return(uint64(0), nil) + m.On("GetNumberOfInputs", mock.Anything, - mock.Anything).Return(nil) + mock.Anything, + ).Once().Return(uint64(1), nil) + m.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Once().Return(uint64(2), nil) - outputHash := common.HexToHash("0xAABBCCDDEE") - repo.On("GetOutput", + m.On("GetNumberOfExecutedOutputs", mock.Anything, - common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E").String(), - 0).Return( - &Output{ - Index: 0, - RawData: common.Hex2Bytes("0xdeadbeef"), - Hash: &outputHash, - InputIndex: 1, - OutputHashesSiblings: nil, - ExecutionTransactionHash: nil, - }, - ) + mock.Anything, + ).Return(uint64(0), nil).Times(6) - return repo + m.On("CreateEpochsAndInputs", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + m.On("GetEpoch", + mock.Anything, + mock.Anything, + uint64(0)).Return(nil, nil).Once() + m.On("GetEpoch", + mock.Anything, + mock.Anything, + uint64(1)).Return( + &Epoch{ + Index: 1, + FirstBlock: 11, + LastBlock: 20, + Status: EpochStatus_Open, + OutputsMerkleRoot: nil, + ClaimTransactionHash: nil, + }, nil).Twice() + return m } -func (m *MockRepository) Unset(methodName string) { - for _, call := range m.ExpectedCalls { - if call.Method == methodName { - call.Unset() - } - } +func newMockRepository() *MockRepository { + return &MockRepository{} } -func (m *MockRepository) CreateEpochAndInputs( - ctx context.Context, - nameOrAddress string, - epochInputMap map[*Epoch][]*Input, - blockNumber uint64, -) (err error) { - args := m.Called(ctx, nameOrAddress, epochInputMap, blockNumber) - return args.Error(0) +func (m *MockRepository) Unset(methodName string) { + UnsetAll(&m.Mock, methodName) } func (m *MockRepository) ListApplications( @@ -536,6 +642,26 @@ func (m *MockRepository) GetOutput(ctx context.Context, nameOrAddress string, in return obj.(*Output), args.Error(1) } +func (m *MockRepository) UpdateEpochClaimTransactionHash(ctx context.Context, nameOrAddress string, e *Epoch) error { + args := m.Called(ctx, nameOrAddress, e) + return args.Error(0) +} + +func (m *MockRepository) GetLastNonOpenEpoch(ctx context.Context, nameOrAddress string) (*Epoch, error) { + args := m.Called(ctx, nameOrAddress) + return args.Get(0).(*Epoch), args.Error(1) +} + +func (m *MockRepository) GetNumberOfInputs(ctx context.Context, nameOrAddress string) (uint64, error) { + args := m.Called(ctx, nameOrAddress) + return args.Get(0).(uint64), args.Error(1) +} + +func (m *MockRepository) GetNumberOfExecutedOutputs(ctx context.Context, nameOrAddress string) (uint64, error) { + args := m.Called(ctx, nameOrAddress) + return args.Get(0).(uint64), args.Error(1) +} + func (m *MockRepository) UpdateOutputsExecution(ctx context.Context, nameOrAddress string, executedOutputs []*Output, blockNumber uint64) error { args := m.Called(ctx, nameOrAddress, executedOutputs, blockNumber) @@ -553,16 +679,27 @@ func (m *MockRepository) UpdateEventLastCheckBlock(ctx context.Context, appIDs [ return args.Error(0) } +func (m *MockRepository) GetEventLastCheckBlock(ctx context.Context, appID int64, event MonitoredEvent) (uint64, error) { + args := m.Called(ctx, appID, event) + return args.Get(0).(uint64), args.Error(1) +} + type MockApplicationContract struct { mock.Mock } +func (m *MockApplicationContract) SetupDefaultBehavior() *MockApplicationContract { + m.On("GetDeploymentBlockNumber", + mock.Anything, + ).Return(new(big.Int).SetUint64(0x10), nil).Once() + m.On("GetNumberOfExecutedOutputs", + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Times(4) + return m +} + func (m *MockApplicationContract) Unset(methodName string) { - for _, call := range m.ExpectedCalls { - if call.Method == methodName { - call.Unset() - } - } + UnsetAll(&m.Mock, methodName) } func (m *MockApplicationContract) RetrieveOutputExecutionEvents( @@ -577,22 +714,27 @@ func (m *MockApplicationContract) GetDeploymentBlockNumber(opts *bind.CallOpts) return args.Get(0).(*big.Int), args.Error(1) } +func (m *MockApplicationContract) GetNumberOfExecutedOutputs(opts *bind.CallOpts) (*big.Int, error) { + args := m.Called(opts) + return args.Get(0).(*big.Int), args.Error(1) +} + +func newMockApplicationContract() *MockApplicationContract { + return &MockApplicationContract{} +} + type MockAdapterFactory struct { mock.Mock } func (m *MockAdapterFactory) Unset(methodName string) { - for _, call := range m.ExpectedCalls { - if call.Method == methodName { - call.Unset() - } - } + UnsetAll(&m.Mock, methodName) } func (m *MockAdapterFactory) CreateAdapters( app *Application, client EthClientInterface, -) (ApplicationContractAdapter, InputSourceAdapter, error) { +) (ApplicationContractAdapter, InputSourceAdapter, DaveConsensusAdapter, error) { args := m.Called(app, client) // Safely handle nil values to prevent interface conversion panic @@ -608,28 +750,54 @@ func (m *MockAdapterFactory) CreateAdapters( inputSource = newMockInputBox() } - return appContract, inputSource, args.Error(2) + return appContract, inputSource, nil, args.Error(2) } -func newMockAdapterFactory() *MockAdapterFactory { - applicationContract := &MockApplicationContract{} - applicationContract.On("RetrieveOutputExecutionEvents", - mock.Anything, - ).Return([]*iapplication.IApplicationOutputExecuted{}, nil) +func (m *MockAdapterFactory) SetupDefaultBehavior( + appContract1 *MockApplicationContract, + appContract2 *MockApplicationContract, + inputBox1 *MockInputBox, +) *MockAdapterFactory { - inputBox := newMockInputBox() - inputBox.On("RetrieveInputs", + // Set up a default behavior that always returns valid non-nil interfaces + m.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(appContract1, inputBox1, nil).Once() + m.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(appContract2, nil, nil).Once() + m.On("CreateAdapters", mock.Anything, mock.Anything, + ).Return(appContract1, inputBox1, nil).Once() + m.On("CreateAdapters", mock.Anything, - ).Return([]iinputbox.IInputBoxInputAdded{}, nil) + mock.Anything, + ).Return(appContract2, nil, nil).Once() + m.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(appContract1, inputBox1, nil).Once() + m.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(appContract2, nil, nil).Once() + return m +} - factory := &MockAdapterFactory{} +func (m *MockAdapterFactory) SetupDefaultBehaviorSingleApp( + appContract *MockApplicationContract, + inputBox *MockInputBox) *MockAdapterFactory { // Set up a default behavior that always returns valid non-nil interfaces - factory.On("CreateAdapters", + m.On("CreateAdapters", mock.Anything, mock.Anything, - ).Return(applicationContract, inputBox, nil) + ).Return(appContract, inputBox, nil) + return m +} - return factory +func newMockAdapterFactory() *MockAdapterFactory { + return &MockAdapterFactory{} } diff --git a/internal/evmreader/input.go b/internal/evmreader/input.go index e64453b86..8da87e58c 100644 --- a/internal/evmreader/input.go +++ b/internal/evmreader/input.go @@ -10,81 +10,32 @@ import ( "math/big" . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/ethutil" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ) -// Find the last block number that should be considered checked when scanning the blockchain -// for inputs of this application. The next search should start from (lastBlockChecked + 1). -// -// The main purpose of this function is to reduce the range of blocks that need to be scanned, -// avoiding unnecessary lookups. Currently, it is only used when handling new applications. -// -// Rules applied in order: -// 1. Default fallback: the block before the InputBox was deployed. -// 2. If the application has never received inputs, use the most recent block. -// 3. If the application has no inputs since its deployment, use the deployment block. -// 4. Otherwise, return the block before the InputBox deployment (conservative bound). -// -// Note: this function returns the *last block checked*, not the first block to search. -// Example: -// -// lastBlockChecked := findLastInputCheckBlock(...) -// nextSearchBlock := lastBlockChecked + 1 -func findLastInputCheckBlock(app *appContracts, mostRecentBlockNumber uint64, mostRecentBlockNumberCallOpts *bind.CallOpts) uint64 { - var noiBig *big.Int - var err error - blockBeforeInputBox := app.application.IInputBoxBlock - 1 - - // find if the application has ever received any input. sync to present if not - noiBig, err = app.inputSource.GetNumberOfInputs( - mostRecentBlockNumberCallOpts, - app.application.IApplicationAddress, - ) - if err != nil { - return blockBeforeInputBox - } - if noiBig.Uint64() == 0 { - return mostRecentBlockNumber - } - - // find if the application has received an input since its deployment. sync to that block if not - // we'll need its deployment block number to do that - deploymentBlockNumberBig, err := app.applicationContract.GetDeploymentBlockNumber(mostRecentBlockNumberCallOpts) - if err != nil { - return blockBeforeInputBox - } - - noiBig, err = app.inputSource.GetNumberOfInputs(&bind.CallOpts{ - BlockNumber: deploymentBlockNumberBig, - }, - app.application.IApplicationAddress, - ) - if err != nil { - return blockBeforeInputBox - } - if noiBig.Uint64() == 0 { - return deploymentBlockNumberBig.Uint64() - } - - // TODO(mpolitzer): Application has inputs previous to its deployment. We can reduce the number of blocks to scan by - // doing a binary search over GetNumberOfInputs and finding the block where 0 -> 1 transition happens. As a simpler, - // also correct implementation. We return the first possible block an input could appear on. - return blockBeforeInputBox -} - // initializeNewApplicationInputSync initializes input synchronization for a new application // by finding the appropriate starting block and updating the database func (r *Service) initializeNewApplicationInputSync( ctx context.Context, app *appContracts, mostRecentBlockNumber uint64, - mostRecentBlockNumberCallOpts *bind.CallOpts, ) (uint64, error) { - lastInputCheckBlock := findLastInputCheckBlock(app, - mostRecentBlockNumber, - mostRecentBlockNumberCallOpts, + r.Logger.Info("Initializing application input sync", + "application", app.application.Name, + "inputbox_block", app.application.IInputBoxBlock, + "current_block", mostRecentBlockNumber, ) + if app.application.IInputBoxBlock == 0 { + r.Logger.Error("Application has no InputBox block number defined", + "application", app.application.Name, + "inputbox", app.application.IInputBoxAddress, + "iinputbox_block", app.application.IInputBoxBlock, + ) + return 0, errors.New("application has no InputBox block number defined") + } + lastInputCheckBlock := app.application.IInputBoxBlock - 1 err := r.repository.UpdateEventLastCheckBlock(ctx, []int64{app.application.ID}, MonitoredEvent_InputAdded, lastInputCheckBlock) if err != nil { @@ -95,10 +46,10 @@ func (r *Service) initializeNewApplicationInputSync( ) return 0, err } - - r.Logger.Info("Initializing application input sync", + r.Logger.Debug("Application input sync initialized", "application", app.application.Name, "inputbox_block", app.application.IInputBoxBlock, + "last_input_check_block", lastInputCheckBlock, "next_search_block", lastInputCheckBlock+1, "current_block", mostRecentBlockNumber, ) @@ -119,10 +70,6 @@ func (r *Service) checkForNewInputs( r.Logger.Debug("Checking for new inputs") - mostRecentBlockNumberCallOpts := &bind.CallOpts{ - BlockNumber: new(big.Int).SetUint64(mostRecentBlockNumber), - } - appsByInputBox := map[common.Address][]appContracts{} for _, app := range applications { if !app.application.HasDataAvailabilitySelector(DataAvailability_InputBox) { @@ -135,7 +82,7 @@ func (r *Service) checkForNewInputs( for inputBoxAddress, inputBoxApps := range appsByInputBox { r.Logger.Debug("Checking inputs for applications with the same InputBox", "inputbox_address", inputBoxAddress, - "most recent block", mostRecentBlockNumber, + "most_recent_block", mostRecentBlockNumber, ) appsByLastInputCheckBlock := make(map[uint64][]appContracts) @@ -143,9 +90,13 @@ func (r *Service) checkForNewInputs( lastInputCheckBlock := app.application.LastInputCheckBlock if lastInputCheckBlock == 0 { // New application. Find a safe start block to scan for inputs var err error - lastInputCheckBlock, err = r.initializeNewApplicationInputSync(ctx, &app, - mostRecentBlockNumber, mostRecentBlockNumberCallOpts) + lastInputCheckBlock, err = r.initializeNewApplicationInputSync(ctx, &app, mostRecentBlockNumber) if err != nil { + r.Logger.Error("Failed to initialize application input sync", + "application", app.application.Name, + "most_recent_block", mostRecentBlockNumber, + "error", err, + ) continue } } @@ -230,7 +181,12 @@ func (r *Service) readAndStoreInputs( "address", address) continue } + epochLength := app.application.EpochLength + if epochLength == 0 { + _ = r.setApplicationInoperable(ctx, app.application, "Application has epoch length of zero") + continue + } // Retrieves last open epoch from DB currentEpoch, err := r.repository.GetEpoch(ctx, address.String(), calculateEpochIndex(epochLength, lastProcessedBlock)) @@ -262,22 +218,15 @@ func (r *Service) readAndStoreInputs( if currentEpoch.Index == inputEpochIndex { // Input can only be added to open epochs if currentEpoch.Status != EpochStatus_Open { - reason := "Received inputs for an epoch that was not open. Should never happen" - r.Logger.Error(reason, - "application", app.application.Name, - "address", address, - "epoch_index", currentEpoch.Index, - "status", currentEpoch.Status, - ) - err := r.repository.UpdateApplicationState(ctx, app.application.ID, ApplicationState_Inoperable, &reason) - if err != nil { - r.Logger.Error("failed to update application state to inoperable", "application", app.application.Name, "err", err) - } - return errors.New(reason) + return r.setApplicationInoperable(ctx, app.application, + "Received inputs for an epoch that was not open. Should never happen. Epoch %d Status %s, Input %d", + currentEpoch.Index, currentEpoch.Status, input.Index) } + currentEpoch.InputIndexUpperBound = input.Index + 1 } else { if currentEpoch.Status == EpochStatus_Open { currentEpoch.Status = EpochStatus_Closed + currentEpoch.InputIndexUpperBound = input.Index r.Logger.Info("Closing epoch", "application", app.application.Name, "address", address, @@ -294,10 +243,12 @@ func (r *Service) readAndStoreInputs( } if currentEpoch == nil { currentEpoch = &Epoch{ - Index: inputEpochIndex, - FirstBlock: inputEpochIndex * epochLength, - LastBlock: (inputEpochIndex * epochLength) + epochLength - 1, - Status: EpochStatus_Open, + Index: inputEpochIndex, + FirstBlock: inputEpochIndex * epochLength, + LastBlock: (inputEpochIndex * epochLength) + epochLength - 1, + InputIndexLowerBound: input.Index, + InputIndexUpperBound: input.Index + 1, + Status: EpochStatus_Open, } epochInputMap[currentEpoch] = []*Input{} } @@ -334,30 +285,29 @@ func (r *Service) readAndStoreInputs( } } - err = r.repository.CreateEpochsAndInputs( - ctx, - address.String(), - epochInputMap, - mostRecentBlockNumber, - ) - if err != nil { - r.Logger.Error("Error storing inputs and epochs", - "application", app.application.Name, - "address", address, - "error", err, - ) - continue - } - // Store everything if len(epochInputMap) > 0 { + err = r.repository.CreateEpochsAndInputs( + ctx, + address.String(), + epochInputMap, + mostRecentBlockNumber, + ) + if err != nil { + r.Logger.Error("Error storing inputs and epochs", + "application", app.application.Name, + "address", address, + "error", err, + ) + continue + } r.Logger.Debug("Inputs and epochs stored successfully", "application", app.application.Name, "address", address, - "start-block", nextSearchBlock, - "end-block", mostRecentBlockNumber, - "total epochs", len(epochInputMap), - "total inputs", len(inputs), + "start_block", nextSearchBlock, + "end_block", mostRecentBlockNumber, + "epoch_count", len(epochInputMap), + "input_count", len(inputs), ) } else { r.Logger.Debug("No inputs or epochs to store") @@ -398,7 +348,6 @@ func (r *Service) readAndStoreInputs( return nil } -// readInputsFromBlockchain read the inputs from the blockchain ordered by Input index func (r *Service) readInputsFromBlockchain( ctx context.Context, apps []appContracts, @@ -407,40 +356,95 @@ func (r *Service) readInputsFromBlockchain( // Initialize app input map var appInputsMap = make(map[common.Address][]*Input) - var appsAddresses = []common.Address{} + for _, app := range apps { - appInputsMap[app.application.IApplicationAddress] = []*Input{} - appsAddresses = append(appsAddresses, app.application.IApplicationAddress) + inputs, err := r.fetchApplicationInputs(ctx, app, startBlock, endBlock) + if err != nil { + r.Logger.Error("Error fetching inputs for application", + "application", app.application.Name, + "start_block", startBlock, + "end_block", endBlock, + "error", err.Error(), + ) + continue + } + appInputsMap[app.application.IApplicationAddress] = inputs } - inputSource := apps[0].inputSource - opts := bind.FilterOpts{ - Context: ctx, - Start: startBlock, - End: &endBlock, - } - inputsEvents, err := inputSource.RetrieveInputs(&opts, appsAddresses, nil) - if err != nil { - return nil, err + return appInputsMap, nil +} + +func (r *Service) fetchApplicationInputs( + ctx context.Context, + app appContracts, + startBlock, endBlock uint64, +) ([]*Input, error) { + r.Logger.Debug("Fetching inputs for application", + "application", app.application.Name, + "start_block", startBlock, + "end_block", endBlock, + ) + + // Define oracle function that returns the number of inputs at a given block + oracle := func(ctx context.Context, block uint64) (*big.Int, error) { + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(block), + } + numInputs, err := app.inputSource.GetNumberOfInputs(callOpts, app.application.IApplicationAddress) + if err != nil { + return nil, fmt.Errorf("failed to get number of inputs at block %d: %w", block, err) + } + return numInputs, nil } - // Order inputs as order is not enforced by RetrieveInputs method nor the APIs - for _, event := range inputsEvents { - r.Logger.Debug("Received input", - "address", event.AppContract, - "index", event.Index, - "block", event.Raw.BlockNumber) - input := &Input{ - Index: event.Index.Uint64(), - Status: InputCompletionStatus_None, - RawData: event.Input, - BlockNumber: event.Raw.BlockNumber, - TransactionReference: common.BigToHash(event.Index), + var sortedInputs []*Input + // Define onHit function that accumulates inputs at transition blocks + onHit := func(block uint64) error { + filterOpts := &bind.FilterOpts{ + Context: ctx, + Start: block, + End: &block, + } + inputEvents, err := app.inputSource.RetrieveInputs( + filterOpts, + []common.Address{app.application.IApplicationAddress}, + nil, + ) + if err != nil { + return fmt.Errorf("failed to retrieve inputs at block %d: %w", block, err) + } + for _, event := range inputEvents { + input := &Input{ + Index: event.Index.Uint64(), + Status: InputCompletionStatus_None, + RawData: event.Input, + BlockNumber: event.Raw.BlockNumber, + TransactionReference: event.Raw.TxHash, + } + sortedInputs = insertSorted(sortByInputIndex, sortedInputs, input) } + return nil + } - // Insert Sorted - appInputsMap[event.AppContract] = insertSorted( - sortByInputIndex, appInputsMap[event.AppContract], input) + inputCount, err := r.repository.GetNumberOfInputs(ctx, app.application.IApplicationAddress.String()) + if err != nil { + return nil, fmt.Errorf("failed to get number of inputs from repository: %w", err) } - return appInputsMap, nil + prevValue := new(big.Int).SetUint64(inputCount) + + // Use FindTransitions to find blocks where inputs were added + _, err = ethutil.FindTransitions(ctx, startBlock, endBlock, prevValue, oracle, onHit) + if err != nil { + return nil, fmt.Errorf("failed to walk input transitions: %w", err) + } + + r.Logger.Debug("Fetched inputs for application", + "application", app.application.Name, + "start_block", startBlock, + "end_block", endBlock, + "prev_input_count", prevValue.Uint64(), + "new_inputs", len(sortedInputs), + ) + return sortedInputs, nil } diff --git a/internal/evmreader/input_test.go b/internal/evmreader/input_test.go index bd7dea9de..1d66c2cec 100644 --- a/internal/evmreader/input_test.go +++ b/internal/evmreader/input_test.go @@ -8,7 +8,6 @@ import ( "time" . "github.com/cartesi/rollups-node/internal/model" - "github.com/cartesi/rollups-node/pkg/contracts/iapplication" "github.com/cartesi/rollups-node/pkg/contracts/iinputbox" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -16,143 +15,58 @@ import ( ) func (s *EvmReaderSuite) TestItReadsInputsFromNewBlocksFilteredByDA() { - //New EVM Reader wsClient := FakeWSEhtClient{} s.evmReader.wsClient = &wsClient - otherDA := DataAvailability_InputBox - otherDA[0]++ + // Start service + ready := make(chan struct{}, 1) + errChannel := make(chan error, 1) - // Prepare repository - s.repository.Unset("ListApplications") - s.repository.On( - "ListApplications", - mock.Anything, - mock.Anything, - mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: DataAvailability_InputBox[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastInputCheckBlock: 0x00, - }}, uint64(1), nil).Once() - s.repository.On( - "ListApplications", - mock.Anything, - mock.Anything, - mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: otherDA[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastInputCheckBlock: 0x11, - }}, uint64(1), nil).Once() + go func() { + errChannel <- s.evmReader.Run(s.ctx, ready) + }() - s.repository.Unset("UpdateEventLastCheckBlock") - s.repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_InputAdded, - mock.Anything, - ).Once().Return(nil) - s.repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_OutputExecuted, - mock.Anything, - ).Once().Return(nil) - s.repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_OutputExecuted, - mock.Anything, - ).Once().Return(nil) + select { + case <-ready: + break + case err := <-errChannel: + s.FailNow("unexpected error signal", err) + } - // Prepare Client - s.client.Unset("HeaderByNumber") - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil).Once() - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header1, nil).Once() - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header2, nil).Once() + wsClient.fireNewHead(&header0) + wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header2) + time.Sleep(time.Second) - inputBox := newMockInputBox() - applicationContract := &MockApplicationContract{} - applicationContract.On("RetrieveOutputExecutionEvents", - mock.Anything, - ).Return([]*iapplication.IApplicationOutputExecuted{}, nil) + s.repository.AssertNumberOfCalls(s.T(), "CreateEpochsAndInputs", 3) + s.repository.AssertNumberOfCalls(s.T(), "UpdateEventLastCheckBlock", 9) + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 0) + s.repository.AssertExpectations(s.T()) - s.contractFactory.Unset("CreateAdapters") - s.contractFactory.On("CreateAdapters", - mock.Anything, - mock.Anything, - ).Return(applicationContract, inputBox, nil) + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) +} - // Prepare sequence of inputs - inputBox.Unset("RetrieveInputs") - events_0 := []iinputbox.IInputBoxInputAdded{inputAddedEvent0} - mostRecentBlockNumber_0 := uint64(0x11) - retrieveInputsOpts_0 := bind.FilterOpts{ - Context: s.ctx, - Start: 0x10, - End: &mostRecentBlockNumber_0, - } - inputBox.On( - "RetrieveInputs", - &retrieveInputsOpts_0, - mock.Anything, - mock.Anything, - ).Return(events_0, nil) +func (s *EvmReaderSuite) TestItReadsInputsFromNewFinalizedBlocks() { + wsClient := FakeWSEhtClient{} + s.evmReader.wsClient = &wsClient + s.evmReader.defaultBlock = DefaultBlock_Finalized - events_1 := []iinputbox.IInputBoxInputAdded{inputAddedEvent1} - mostRecentBlockNumber_1 := uint64(0x12) - retrieveInputsOpts_1 := bind.FilterOpts{ - Context: s.ctx, - Start: 0x12, - End: &mostRecentBlockNumber_1, - } - inputBox.On( - "RetrieveInputs", - &retrieveInputsOpts_1, - mock.Anything, + s.client.On("HeaderByNumber", mock.Anything, - ).Return(events_1, nil) - - inputBox.Unset("GetNumberOfInputs") - inputBox.On( - "GetNumberOfInputs", mock.Anything, + ).Return(&header0, nil).Once() + s.client.On("HeaderByNumber", mock.Anything, - ).Return(new(big.Int).SetUint64(2), nil) - - applicationContract.On( - "GetDeploymentBlockNumber", mock.Anything, - ).Return(new(big.Int).SetUint64(10), nil) - - inputBox.On( - "GetNumberOfInputs", + ).Return(&header1, nil).Once() + s.client.On("HeaderByNumber", mock.Anything, mock.Anything, - ).Return(new(big.Int).SetUint64(0), nil) + ).Return(&header2, nil).Once() // Start service ready := make(chan struct{}, 1) @@ -169,17 +83,21 @@ func (s *EvmReaderSuite) TestItReadsInputsFromNewBlocksFilteredByDA() { s.FailNow("unexpected error signal", err) } - wsClient.fireNewHead(&header0) - wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header3) + wsClient.fireNewHead(&header3) + wsClient.fireNewHead(&header3) time.Sleep(time.Second) - // retrieve inputs only for the application with: DataAvailability_InputBox - inputBox.AssertNumberOfCalls(s.T(), "RetrieveInputs", 1) - s.repository.AssertNumberOfCalls( - s.T(), - "CreateEpochsAndInputs", - 1, - ) + s.repository.AssertNumberOfCalls(s.T(), "CreateEpochsAndInputs", 3) + s.repository.AssertNumberOfCalls(s.T(), "UpdateEventLastCheckBlock", 9) + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 0) + s.repository.AssertExpectations(s.T()) + + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) } func (s *EvmReaderSuite) TestItUpdatesLastInputCheckBlockWhenThereIsNoInputs() { @@ -187,45 +105,19 @@ func (s *EvmReaderSuite) TestItUpdatesLastInputCheckBlockWhenThereIsNoInputs() { s.evmReader.wsClient = &wsClient // Prepare repository - s.repository.Unset("ListApplications") - s.repository.On( - "ListApplications", - mock.Anything, - mock.Anything, - mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: DataAvailability_InputBox[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastInputCheckBlock: 0x00, - }}, uint64(1), nil).Once() - s.repository.On( - "ListApplications", + s.repository.Unset("UpdateEventLastCheckBlock") + s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, + MonitoredEvent_InputAdded, mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: DataAvailability_InputBox[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastInputCheckBlock: 0x11, - }}, uint64(1), nil).Once() - - s.repository.Unset("UpdateEventLastCheckBlock") + ).Return(nil).Times(2) s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, - MonitoredEvent_InputAdded, + MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Times(4) s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, @@ -237,7 +129,7 @@ func (s *EvmReaderSuite) TestItUpdatesLastInputCheckBlockWhenThereIsNoInputs() { mock.Anything, MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Times(2) s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, @@ -249,86 +141,23 @@ func (s *EvmReaderSuite) TestItUpdatesLastInputCheckBlockWhenThereIsNoInputs() { mock.Anything, MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Times(2) - // Prepare Client - s.client.Unset("HeaderByNumber") - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil).Once() - s.client.On( - "HeaderByNumber", + s.repository.Unset("GetNumberOfInputs") + s.repository.On("GetNumberOfInputs", mock.Anything, mock.Anything, - ).Return(&header1, nil).Once() - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header2, nil).Once() - - inputBox := newMockInputBox() - // Setup adapter factory - s.contractFactory.Unset("CreateAdapters") - applicationContract := &MockApplicationContract{} - applicationContract.On("RetrieveOutputExecutionEvents", - mock.Anything, - ).Return([]*iapplication.IApplicationOutputExecuted{}, nil) - - s.contractFactory.On("CreateAdapters", - mock.Anything, - mock.Anything, - ).Return(applicationContract, inputBox, nil) + ).Return(uint64(0), nil).Times(3) + s.repository.Unset("CreateEpochsAndInputs") // Prepare sequence of inputs - inputBox.Unset("RetrieveInputs") - events_0 := []iinputbox.IInputBoxInputAdded{} - mostRecentBlockNumber_0 := uint64(0x11) - retrieveInputsOpts_0 := bind.FilterOpts{ - Context: s.ctx, - Start: 0x10, - End: &mostRecentBlockNumber_0, - } - inputBox.On( - "RetrieveInputs", - &retrieveInputsOpts_0, - mock.Anything, - mock.Anything, - ).Return(events_0, nil) - - inputBox.Unset("GetNumberOfInputs") - inputBox.On( - "GetNumberOfInputs", - mock.Anything, - mock.Anything, - ).Return(new(big.Int).SetUint64(1), nil) - - applicationContract.On( - "GetDeploymentBlockNumber", - mock.Anything, - ).Return(new(big.Int).SetUint64(10), nil) - - inputBox.On( + s.inputBox.Unset("RetrieveInputs") + s.inputBox.Unset("GetNumberOfInputs") + s.inputBox.On( "GetNumberOfInputs", mock.Anything, mock.Anything, - ).Return(new(big.Int).SetUint64(0), nil) - - events_1 := []iinputbox.IInputBoxInputAdded{} - mostRecentBlockNumber_1 := uint64(0x12) - retrieveInputsOpts_1 := bind.FilterOpts{ - Context: s.ctx, - Start: 0x12, - End: &mostRecentBlockNumber_1, - } - inputBox.On( - "RetrieveInputs", - &retrieveInputsOpts_1, - mock.Anything, - mock.Anything, - ).Return(events_1, nil) + ).Return(new(big.Int).SetUint64(0), nil).Times(4) // Start service ready := make(chan struct{}, 1) @@ -347,44 +176,34 @@ func (s *EvmReaderSuite) TestItUpdatesLastInputCheckBlockWhenThereIsNoInputs() { wsClient.fireNewHead(&header0) wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header2) time.Sleep(time.Second) - inputBox.AssertNumberOfCalls(s.T(), "RetrieveInputs", 2) - s.repository.AssertNumberOfCalls( - s.T(), - "CreateEpochsAndInputs", - 2, - ) + s.repository.AssertNumberOfCalls(s.T(), "CreateEpochsAndInputs", 0) + s.repository.AssertExpectations(s.T()) + + s.inputBox.AssertNumberOfCalls(s.T(), "RetrieveInputs", 0) + s.inputBox.AssertExpectations(s.T()) + + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) } func (s *EvmReaderSuite) TestItReadsMultipleInputsFromSingleNewBlock() { - //New EVM Reader wsClient := FakeWSEhtClient{} s.evmReader.wsClient = &wsClient - // Prepare Client - s.client.Unset("HeaderByNumber") - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header2, nil).Once() - - inputBox := newMockInputBox() - s.contractFactory.Unset("CreateAdapters") - applicationContract := &MockApplicationContract{} - applicationContract.On("RetrieveOutputExecutionEvents", + s.applicationContract1.Unset("GetDeploymentBlockNumber") + s.applicationContract1.Unset("GetNumberOfExecutedOutputs") + s.applicationContract1.On("GetNumberOfExecutedOutputs", mock.Anything, - ).Return([]*iapplication.IApplicationOutputExecuted{}, nil) - - s.contractFactory.On("CreateAdapters", - mock.Anything, - mock.Anything, - ).Return(applicationContract, inputBox, nil) + ).Return(new(big.Int).SetUint64(0), nil).Once() // Prepare sequence of inputs - inputBox.Unset("RetrieveInputs") + s.inputBox.Unset("RetrieveInputs") events_2 := []iinputbox.IInputBoxInputAdded{inputAddedEvent2, inputAddedEvent3} mostRecentBlockNumber_2 := uint64(0x13) retrieveInputsOpts_2 := bind.FilterOpts{ @@ -392,45 +211,43 @@ func (s *EvmReaderSuite) TestItReadsMultipleInputsFromSingleNewBlock() { Start: 0x13, End: &mostRecentBlockNumber_2, } - inputBox.On( + s.inputBox.On( "RetrieveInputs", &retrieveInputsOpts_2, mock.Anything, mock.Anything, ).Return(events_2, nil) - inputBox.Unset("GetNumberOfInputs") - inputBox.On( - "GetNumberOfInputs", + s.inputBox.Unset("GetNumberOfInputs") + s.inputBox.On("GetNumberOfInputs", mock.Anything, mock.Anything, ).Return(new(big.Int).SetUint64(2), nil) - applicationContract.On( - "GetDeploymentBlockNumber", - mock.Anything, - ).Return(new(big.Int).SetUint64(10), nil) + s.contractFactory = newMockAdapterFactory().SetupDefaultBehaviorSingleApp(s.applicationContract1, s.inputBox) + s.evmReader.adapterFactory = s.contractFactory // Prepare Repo s.repository.Unset("ListApplications") - s.repository.On( - "ListApplications", + s.repository.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, false, ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: DataAvailability_InputBox[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastInputCheckBlock: 0x12, + Name: "my-app-1", + IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), + IConsensusAddress: common.HexToAddress("0xdeadbeef"), + IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), + DataAvailability: DataAvailability_InputBox[:], + IInputBoxBlock: 0x10, + EpochLength: 10, + LastInputCheckBlock: 0x12, + LastOutputCheckBlock: 0x12, }}, uint64(1), nil).Once() + s.repository.Unset("CreateEpochsAndInputs") - s.repository.On( - "CreateEpochsAndInputs", + s.repository.On("CreateEpochsAndInputs", mock.Anything, mock.Anything, mock.Anything, @@ -453,7 +270,25 @@ func (s *EvmReaderSuite) TestItReadsMultipleInputsFromSingleNewBlock() { mock.Anything, MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Once() + + s.repository.Unset("GetNumberOfInputs") + s.repository.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Once() + + s.repository.Unset("GetEpoch") + s.repository.On("GetEpoch", + mock.Anything, + mock.Anything, + uint64(1)).Return(nil, nil).Once() + + s.repository.Unset("GetNumberOfExecutedOutputs") + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Once() // Start service ready := make(chan struct{}, 1) @@ -474,36 +309,30 @@ func (s *EvmReaderSuite) TestItReadsMultipleInputsFromSingleNewBlock() { // Give a time for time.Sleep(1 * time.Second) - inputBox.AssertNumberOfCalls(s.T(), "RetrieveInputs", 1) - s.repository.AssertNumberOfCalls( - s.T(), - "CreateEpochsAndInputs", - 1, - ) + s.repository.AssertNumberOfCalls(s.T(), "CreateEpochsAndInputs", 1) + s.repository.AssertExpectations(s.T()) + + s.inputBox.AssertNumberOfCalls(s.T(), "RetrieveInputs", 1) + s.inputBox.AssertExpectations(s.T()) + + s.applicationContract1.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) } -func (s *EvmReaderSuite) TestItStartsWhenLasProcessedBlockIsTheMostRecentBlock() { - //New EVM Reader +func (s *EvmReaderSuite) TestItStartsWhenLastProcessedBlockIsTheMostRecentBlock() { wsClient := FakeWSEhtClient{} s.evmReader.wsClient = &wsClient - // Prepare Client - s.client.Unset("HeaderByNumber") - s.client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil).Once() - // Prepare Repo s.repository.Unset("ListApplications") - s.repository.On( - "ListApplications", + s.repository.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, false, ).Return([]*Application{{ + Name: "my-app-1", IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), IConsensusAddress: common.HexToAddress("0xdeadbeef"), IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), @@ -514,25 +343,24 @@ func (s *EvmReaderSuite) TestItStartsWhenLasProcessedBlockIsTheMostRecentBlock() LastOutputCheckBlock: 0x13, }}, uint64(1), nil).Once() + s.repository.Unset("CreateEpochsAndInputs") s.repository.Unset("UpdateEventLastCheckBlock") - s.repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_InputAdded, - mock.Anything, - ).Once().Return(nil) - s.repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_OutputExecuted, - mock.Anything, - ).Once().Return(nil) + s.repository.Unset("GetEpoch") + s.repository.Unset("GetNumberOfInputs") + s.repository.Unset("GetNumberOfExecutedOutputs") + + s.inputBox.Unset("RetrieveInputs") + s.inputBox.Unset("GetNumberOfInputs") + + s.applicationContract1.Unset("GetDeploymentBlockNumber") + s.applicationContract1.Unset("RetrieveOutputExecutionEvents") + s.applicationContract1.Unset("GetNumberOfExecutedOutputs") - inputBox := newMockInputBox() - s.contractFactory.Unset("NewInputSource") - s.contractFactory.On("NewInputSource", + s.contractFactory.Unset("CreateAdapters") + s.contractFactory.On("CreateAdapters", + mock.Anything, mock.Anything, - ).Return(inputBox, nil) + ).Return(s.applicationContract1, s.inputBox, nil).Once() // Start service ready := make(chan struct{}, 1) @@ -552,10 +380,9 @@ func (s *EvmReaderSuite) TestItStartsWhenLasProcessedBlockIsTheMostRecentBlock() wsClient.fireNewHead(&header2) time.Sleep(1 * time.Second) - inputBox.AssertNumberOfCalls(s.T(), "RetrieveInputs", 0) - s.repository.AssertNumberOfCalls( - s.T(), - "CreateEpochsAndInputs", - 0, - ) + s.repository.AssertExpectations(s.T()) + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) } diff --git a/internal/evmreader/inputsource_adapter.go b/internal/evmreader/inputsource_adapter.go index 4a15c7a35..0ae3204a1 100644 --- a/internal/evmreader/inputsource_adapter.go +++ b/internal/evmreader/inputsource_adapter.go @@ -17,6 +17,15 @@ import ( "github.com/ethereum/go-ethereum/ethclient" ) +// Interface for Input reading +type InputSourceAdapter interface { + // Wrapper for FilterInputAdded(), which is automatically generated + // by go-ethereum and cannot be used for testing + RetrieveInputs(opts *bind.FilterOpts, appAddresses []common.Address, index []*big.Int, + ) ([]iinputbox.IInputBoxInputAdded, error) + GetNumberOfInputs(opts *bind.CallOpts, appContract common.Address) (*big.Int, error) +} + // InputBox Wrapper type InputSourceAdapterImpl struct { inputbox *iinputbox.IInputBox diff --git a/internal/evmreader/output.go b/internal/evmreader/output.go index 3a2f2b433..d1a8ba60d 100644 --- a/internal/evmreader/output.go +++ b/internal/evmreader/output.go @@ -6,21 +6,69 @@ package evmreader import ( "bytes" "context" + "encoding/hex" + "errors" + "fmt" "math/big" . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/iapplication" + "github.com/cartesi/rollups-node/pkg/ethutil" "github.com/ethereum/go-ethereum/accounts/abi/bind" ) // Find an appropriate value to start scanning the blockchain for executed outputs of this application. // Application deployment block is a safe block to start since this event is emitted by the application contract itself. // We use input box block as a fallback. -func findLastOutputCheckBlock(app *appContracts, mostRecentBlockNumberCallOpts *bind.CallOpts) uint64 { - deploymentBlockNumberBig, err := app.applicationContract.GetDeploymentBlockNumber(mostRecentBlockNumberCallOpts) +func (r *Service) initializeNewApplicationOutputExecutionSync( + ctx context.Context, + app *appContracts, + mostRecentBlockNumber uint64, +) (uint64, error) { + r.Logger.Info("Initializing application output execution sync", + "application", app.application.Name, + "current_block", mostRecentBlockNumber, + ) + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(mostRecentBlockNumber), + } + deploymentBlock, err := app.applicationContract.GetDeploymentBlockNumber(callOpts) + if err != nil { + r.Logger.Error("Error retrieving application deployment block number", + "application", app.application.Name, + "address", app.application.IApplicationAddress, + "error", err, + ) + return 0, err + } + if deploymentBlock.Sign() <= 0 { + r.Logger.Error("Invalid application deployment block number retrieved", + "application", app.application.Name, + "address", app.application.IApplicationAddress, + "block_number", deploymentBlock.Uint64(), + ) + return 0, errors.New("invalid application deployment block number retrieved") + } + + lastOutputCheckBlock := deploymentBlock.Uint64() - 1 + err = r.repository.UpdateEventLastCheckBlock(ctx, []int64{app.application.ID}, MonitoredEvent_OutputExecuted, lastOutputCheckBlock) if err != nil { - return app.application.IInputBoxBlock - 1 + r.Logger.Error("Failed to update application LastOutputCheckBlock", + "application", app.application.Name, + "last_output_check_block", lastOutputCheckBlock, + "error", err, + ) + return 0, err } - return deploymentBlockNumberBig.Uint64() - 1 + r.Logger.Debug("Application output execution sync initialized", + "application", app.application.Name, + "deployment_block", deploymentBlock.Uint64(), + "next_search_block", lastOutputCheckBlock+1, + "current_block", mostRecentBlockNumber, + ) + app.application.LastOutputCheckBlock = lastOutputCheckBlock + return app.application.LastOutputCheckBlock, nil } func (r *Service) checkForOutputExecution( @@ -33,28 +81,26 @@ func (r *Service) checkForOutputExecution( r.Logger.Debug("Checking for new Output Executed Events", "apps", appAddresses) - mostRecentBlockNumberCallOpts := &bind.CallOpts{ - BlockNumber: new(big.Int).SetUint64(mostRecentBlockNumber), - } - for _, app := range apps { lastOutputCheck := app.application.LastOutputCheckBlock if lastOutputCheck == 0 { // New application. Find a safe start block to scan for outputs - lastOutputCheck = findLastOutputCheckBlock(&app, mostRecentBlockNumberCallOpts) - r.Logger.Info("Initializing application output execution sync", - "application", app.application.Name, - "inputbox_block", app.application.IInputBoxBlock, - "next_search_block", lastOutputCheck+1, - "current_block", mostRecentBlockNumber, - ) - app.application.LastOutputCheckBlock = lastOutputCheck + var err error + lastOutputCheck, err = r.initializeNewApplicationOutputExecutionSync(ctx, &app, mostRecentBlockNumber) + if err != nil { + r.Logger.Error("Failed to initialize application output execution sync", + "application", app.application.Name, + "most_recent_block", mostRecentBlockNumber, + "error", err, + ) + continue + } } if mostRecentBlockNumber > lastOutputCheck { r.Logger.Debug("Checking output execution for application", "application", app.application.Name, "address", app.application.IApplicationAddress, - "last_output_check block", lastOutputCheck, + "last_output_check_block", lastOutputCheck, "most_recent_block", mostRecentBlockNumber) r.readAndUpdateOutputs(ctx, app, lastOutputCheck, mostRecentBlockNumber) @@ -67,10 +113,10 @@ func (r *Service) checkForOutputExecution( "most_recent_block", mostRecentBlockNumber, ) } else { - r.Logger.Warn("Not reading output execution: already checked the most recent blocks", + r.Logger.Debug("Not reading output execution: already checked the most recent blocks", "application", app.application.Name, "address", app.application.IApplicationAddress, - "last output check block", lastOutputCheck, - "most recent block", mostRecentBlockNumber, + "last_output_check_block", lastOutputCheck, + "most_recent_block", mostRecentBlockNumber, ) } } @@ -80,15 +126,8 @@ func (r *Service) checkForOutputExecution( func (r *Service) readAndUpdateOutputs( ctx context.Context, app appContracts, lastOutputCheck, mostRecentBlockNumber uint64) { - contract := app.applicationContract - - opts := &bind.FilterOpts{ - Context: ctx, - Start: lastOutputCheck + 1, - End: &mostRecentBlockNumber, - } - - outputExecutedEvents, err := contract.RetrieveOutputExecutionEvents(opts) + nextSearchBlock := lastOutputCheck + 1 + outputExecutedEvents, err := r.readOutputExecutionsFromBlockChain(ctx, app, nextSearchBlock, mostRecentBlockNumber) if err != nil { r.Logger.Error("Error reading output events", "application", app.application.Name, "address", app.application.IApplicationAddress, @@ -117,9 +156,14 @@ func (r *Service) readAndUpdateOutputs( } return } + r.Logger.Debug("Found output executed events", + "application", app.application.Name, + "address", app.application.IApplicationAddress, + "output_executed_events", len(outputExecutedEvents), + ) // Should we check the output hash?? - var executedOutputs []*Output + executedOutputs := make([]*Output, 0, len(outputExecutedEvents)) for _, event := range outputExecutedEvents { // Compare output to check it is the correct one @@ -140,16 +184,12 @@ func (r *Service) readAndUpdateOutputs( } if !bytes.Equal(output.RawData, event.Output) { - r.Logger.Debug("Output mismatch", - "application", app.application.Name, "address", app.application.IApplicationAddress, - "index", event.OutputIndex, - "actual", output.RawData, - "event's", event.Output) - - r.Logger.Error("Output mismatch. Application is in an invalid state", - "application", app.application.Name, "address", app.application.IApplicationAddress, - "index", event.OutputIndex) - + _ = r.setApplicationInoperable(ctx, app.application, + "Output mismatch. Application is in an invalid state. Output Index %d, raw data %s != event data %s", + output.Index, + "0x"+hex.EncodeToString(output.RawData), + "0x"+hex.EncodeToString(event.Output), + ) return } @@ -169,3 +209,66 @@ func (r *Service) readAndUpdateOutputs( } } + +func (r *Service) readOutputExecutionsFromBlockChain( + ctx context.Context, + app appContracts, + startBlock, endBlock uint64, +) ([]*iapplication.IApplicationOutputExecuted, error) { + r.Logger.Debug("Fetching Output Execution events for application", + "application", app.application.Name, + "start_block", startBlock, + "end_block", endBlock, + ) + + // Define oracle function that returns the number of output execution events at a given block + oracle := func(ctx context.Context, block uint64) (*big.Int, error) { + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(block), + } + numInputs, err := app.applicationContract.GetNumberOfExecutedOutputs(callOpts) + if err != nil { + return nil, fmt.Errorf("failed to get number of Executed outputs at block %d: %w", block, err) + } + return numInputs, nil + } + + var executedOutputs []*iapplication.IApplicationOutputExecuted + // Define onHit function that accumulates inputs at transition blocks + onHit := func(block uint64) error { + filterOpts := &bind.FilterOpts{ + Context: ctx, + Start: block, + End: &block, + } + execEvents, err := app.applicationContract.RetrieveOutputExecutionEvents(filterOpts) + if err != nil { + return fmt.Errorf("failed to retrieve inputs at block %d: %w", block, err) + } + executedOutputs = append(executedOutputs, execEvents...) + return nil + } + + prevValue := &big.Int{} + execCount, err := r.repository.GetNumberOfExecutedOutputs(ctx, app.application.IApplicationAddress.String()) + if err != nil { + return nil, fmt.Errorf("failed to get number of executed outputs from repository: %w", err) + } + prevValue.SetUint64(execCount) + + // Use FindTransitions to find blocks where inputs were added + _, err = ethutil.FindTransitions(ctx, startBlock, endBlock, prevValue, oracle, onHit) + if err != nil { + return nil, fmt.Errorf("failed to walk input transitions: %w", err) + } + + r.Logger.Debug("Fetched output executed events for application", + "application", app.application.Name, + "start_block", startBlock, + "end_block", endBlock, + "prev_executed_output_count", prevValue.Uint64(), + "new_executed_outputs", len(executedOutputs), + ) + return executedOutputs, nil +} diff --git a/internal/evmreader/output_test.go b/internal/evmreader/output_test.go index 3c1f09f6e..99de38847 100644 --- a/internal/evmreader/output_test.go +++ b/internal/evmreader/output_test.go @@ -6,57 +6,331 @@ package evmreader import ( "context" "errors" + "math/big" "time" + "github.com/cartesi/rollups-node/internal/config" . "github.com/cartesi/rollups-node/internal/model" - appcontract "github.com/cartesi/rollups-node/pkg/contracts/iapplication" + "github.com/cartesi/rollups-node/pkg/contracts/iapplication" "github.com/cartesi/rollups-node/pkg/contracts/iinputbox" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/mock" ) +// Prepare Output Executed Events +var output0 = &Output{ + Index: 0, + RawData: common.Hex2Bytes("AABBCCDDEE"), +} +var output1 = &Output{ + Index: 1, + RawData: common.Hex2Bytes("AABBCCDDEE"), +} +var outputExecution0 = &iapplication.IApplicationOutputExecuted{ + OutputIndex: output0.Index, + Output: output0.RawData, + Raw: types.Log{ + TxHash: common.HexToHash("0xdeadbeef"), + }, +} +var outputExecution1 = &iapplication.IApplicationOutputExecuted{ + OutputIndex: output1.Index, + Output: output1.RawData, + Raw: types.Log{ + TxHash: common.HexToHash("0xbeefbeef"), + }, +} + +func (s *EvmReaderSuite) setupOutputExecution() { + s.applicationContract1.Unset("GetNumberOfExecutedOutputs") + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(2), nil).Once() + + s.applicationContract1.On("RetrieveOutputExecutionEvents", + mock.Anything, + ).Return([]*iapplication.IApplicationOutputExecuted{outputExecution0}, nil).Once() + s.applicationContract1.On("RetrieveOutputExecutionEvents", + mock.Anything, + ).Return([]*iapplication.IApplicationOutputExecuted{outputExecution1}, nil).Once() + + s.repository.Unset("UpdateEventLastCheckBlock") + s.repository.On("UpdateEventLastCheckBlock", + mock.Anything, + mock.Anything, + MonitoredEvent_InputAdded, + mock.Anything, + ).Return(nil).Times(1) + s.repository.On("UpdateEventLastCheckBlock", + mock.Anything, + mock.Anything, + MonitoredEvent_OutputExecuted, + mock.Anything, + ).Return(nil).Times(6) + + s.repository.Unset("GetOutput") + s.repository.On("GetOutput", + mock.Anything, + mock.Anything, + mock.Anything).Return(output0, nil).Once() + s.repository.On("GetOutput", + mock.Anything, + mock.Anything, + mock.Anything).Return(output1, nil).Once() + + s.repository.Unset("GetNumberOfExecutedOutputs") + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Twice() + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(1), nil).Once() + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Once() + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(1), nil).Once() + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Once() + + s.repository.Unset("UpdateOutputsExecution") + s.repository.On("UpdateOutputsExecution", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Run(func(arguments mock.Arguments) { + obj := arguments.Get(2) + outputs, ok := obj.([]*Output) + s.Require().True(ok) + s.Require().Equal(1, len(outputs)) + output := outputs[0] + s.Require().NotNil(output) + s.Require().Equal(uint64(0), output.Index) + s.Require().Equal(outputExecution0.Raw.TxHash, *output.ExecutionTransactionHash) + }).Return(nil).Once() + s.repository.On("UpdateOutputsExecution", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Run(func(arguments mock.Arguments) { + obj := arguments.Get(2) + outputs, ok := obj.([]*Output) + s.Require().True(ok) + s.Require().Equal(1, len(outputs)) + output := outputs[0] + s.Require().NotNil(output) + s.Require().Equal(uint64(1), output.Index) + s.Require().Equal(outputExecution1.Raw.TxHash, *output.ExecutionTransactionHash) + }).Return(nil).Once() +} + func (s *EvmReaderSuite) TestOutputExecution() { wsClient := FakeWSEhtClient{} s.evmReader.wsClient = &wsClient - otherDA := DataAvailability_InputBox - otherDA[0]++ + s.setupOutputExecution() + + // Start service + ready := make(chan struct{}, 1) + errChannel := make(chan error, 1) + + go func() { + errChannel <- s.evmReader.Run(s.ctx, ready) + }() + + select { + case <-ready: + break + case err := <-errChannel: + s.FailNow("unexpected error signal", err) + } + + wsClient.fireNewHead(&header0) + wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header2) + time.Sleep(1 * time.Second) + + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 2) + s.repository.AssertExpectations(s.T()) + + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) + +} + +func (s *EvmReaderSuite) TestOutputExecutionOnFinalizedBlocks() { + wsClient := FakeWSEhtClient{} + s.evmReader.wsClient = &wsClient + + s.evmReader.defaultBlock = DefaultBlock_Finalized + + s.client.On("HeaderByNumber", + mock.Anything, + mock.Anything, + ).Return(&header0, nil).Once() + s.client.On("HeaderByNumber", + mock.Anything, + mock.Anything, + ).Return(&header1, nil).Once() + s.client.On("HeaderByNumber", + mock.Anything, + mock.Anything, + ).Return(&header2, nil).Once() + + s.setupOutputExecution() + + // Start service + ready := make(chan struct{}, 1) + errChannel := make(chan error, 1) + + go func() { + errChannel <- s.evmReader.Run(s.ctx, ready) + }() + + select { + case <-ready: + break + case err := <-errChannel: + s.FailNow("unexpected error signal", err) + } + + wsClient.fireNewHead(&header3) + wsClient.fireNewHead(&header3) + wsClient.fireNewHead(&header3) + time.Sleep(1 * time.Second) + + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 2) + s.repository.AssertExpectations(s.T()) + + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) + +} + +func (s *EvmReaderSuite) TestCheckOutputFailsWhenRetrieveOutputsFails() { + wsClient := FakeWSEhtClient{} + s.evmReader.wsClient = &wsClient + + s.setupOutputExecution() + + s.applicationContract1.Unset("RetrieveOutputExecutionEvents") + s.applicationContract1.On("RetrieveOutputExecutionEvents", + mock.Anything, + ).Return([]*iapplication.IApplicationOutputExecuted{}, errors.New("No outputs for you")).Times(3) + + // If retrieving outputs fails, it does not update the database and keep scanning the ranges + s.applicationContract1.Unset("GetNumberOfExecutedOutputs") + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Twice() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Twice() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Twice() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(2), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() - // Prepare repository + apps := copyApplications(applications) s.repository.Unset("ListApplications") - s.repository.On( - "ListApplications", + s.repository.On("ListApplications", + mock.Anything, + mock.Anything, + mock.Anything, + false, + ).Return(apps, uint64(2), nil).Once() + + apps = copyApplications(applications) + apps[0].LastInputCheckBlock = 0x11 + apps[0].LastOutputCheckBlock = 0x0F + apps[1].LastOutputCheckBlock = 0x11 + s.repository.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, false, - ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: DataAvailability_InputBox[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastInputCheckBlock: 0x01, // don't fast sync inputs - LastOutputCheckBlock: 0x10, - }}, uint64(1), nil).Once() - s.repository.On( - "ListApplications", + ).Return(apps, uint64(2), nil).Once() + + apps = copyApplications(applications) + apps[0].LastInputCheckBlock = 0x12 + apps[0].LastOutputCheckBlock = 0x0F + apps[1].LastOutputCheckBlock = 0x12 + s.repository.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, false, - ).Return([]*Application{{ - IApplicationAddress: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - DataAvailability: otherDA[:], - IInputBoxBlock: 0x10, - EpochLength: 10, - LastOutputCheckBlock: 0x11, - }}, uint64(1), nil).Once() + ).Return(apps, uint64(2), nil).Once() + + s.repository.Unset("GetNumberOfExecutedOutputs") + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Times(6) + + s.repository.Unset("GetOutput") + s.repository.Unset("UpdateOutputsExecution") s.repository.Unset("UpdateEventLastCheckBlock") s.repository.On("UpdateEventLastCheckBlock", @@ -64,47 +338,156 @@ func (s *EvmReaderSuite) TestOutputExecution() { mock.Anything, MonitoredEvent_InputAdded, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Times(1) s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) - s.repository.On("UpdateEventLastCheckBlock", + ).Return(nil).Times(5) + + // Start service + ready := make(chan struct{}, 1) + errChannel := make(chan error, 1) + + go func() { + errChannel <- s.evmReader.Run(s.ctx, ready) + }() + + select { + case <-ready: + break + case err := <-errChannel: + s.FailNow("unexpected error signal", err) + } + + wsClient.fireNewHead(&header0) + wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header2) + time.Sleep(1 * time.Second) + + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 0) + s.repository.AssertExpectations(s.T()) + + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) + +} + +func (s *EvmReaderSuite) TestCheckOutputFailsWhenGetOutputsFails() { + wsClient := FakeWSEhtClient{} + s.evmReader.wsClient = &wsClient + + s.setupOutputExecution() + + s.repository.Unset("GetOutput") + s.repository.On("GetOutput", mock.Anything, mock.Anything, - MonitoredEvent_InputAdded, + mock.Anything).Return(nil, errors.New("no output for you")).Times(3) + + // If retrieving outputs fails, it does not update the database and keep scanning the ranges + s.applicationContract1.Unset("GetNumberOfExecutedOutputs") + s.applicationContract1.On("GetNumberOfExecutedOutputs", mock.Anything, - ).Once().Return(nil) - s.repository.On("UpdateEventLastCheckBlock", mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", mock.Anything, - MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Twice() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Twice() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Twice() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(2), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() - inputBox := newMockInputBox() + s.applicationContract1.Unset("RetrieveOutputExecutionEvents") + s.applicationContract1.On("RetrieveOutputExecutionEvents", + mock.Anything, + ).Return([]*iapplication.IApplicationOutputExecuted{outputExecution0}, nil).Times(3) + s.applicationContract1.On("RetrieveOutputExecutionEvents", + mock.Anything, + ).Return([]*iapplication.IApplicationOutputExecuted{outputExecution1}, nil).Once() - inputBox.Unset("RetrieveInputs") - inputBox.On("RetrieveInputs", + apps := copyApplications(applications) + s.repository.Unset("ListApplications") + s.repository.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, - ).Return([]iinputbox.IInputBoxInputAdded{}, nil) + false, + ).Return(apps, uint64(2), nil).Once() - // Prepare Client - s.client.Unset("HeaderByNumber") - s.client.On( - "HeaderByNumber", + apps = copyApplications(applications) + apps[0].LastInputCheckBlock = 0x11 + apps[0].LastOutputCheckBlock = 0x0F + apps[1].LastOutputCheckBlock = 0x11 + s.repository.On("ListApplications", mock.Anything, mock.Anything, - ).Return(&header0, nil).Once() - s.client.On( - "HeaderByNumber", mock.Anything, + false, + ).Return(apps, uint64(2), nil).Once() + + apps = copyApplications(applications) + apps[0].LastInputCheckBlock = 0x12 + apps[0].LastOutputCheckBlock = 0x0F + apps[1].LastOutputCheckBlock = 0x12 + s.repository.On("ListApplications", mock.Anything, - ).Return(&header1, nil).Once() + mock.Anything, + mock.Anything, + false, + ).Return(apps, uint64(2), nil).Once() + + s.repository.Unset("GetNumberOfExecutedOutputs") + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Times(6) + + s.repository.Unset("UpdateOutputsExecution") + + s.repository.Unset("UpdateEventLastCheckBlock") + s.repository.On("UpdateEventLastCheckBlock", + mock.Anything, + mock.Anything, + MonitoredEvent_InputAdded, + mock.Anything, + ).Return(nil).Times(1) + s.repository.On("UpdateEventLastCheckBlock", + mock.Anything, + mock.Anything, + MonitoredEvent_OutputExecuted, + mock.Anything, + ).Return(nil).Times(5) // Start service ready := make(chan struct{}, 1) @@ -123,126 +506,191 @@ func (s *EvmReaderSuite) TestOutputExecution() { wsClient.fireNewHead(&header0) wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header2) time.Sleep(1 * time.Second) - s.repository.AssertNumberOfCalls( - s.T(), - "UpdateOutputsExecution", - 0, - ) + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 0) + s.repository.AssertExpectations(s.T()) + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) } -func (s *EvmReaderSuite) TestReadOutputExecution() { +func (s *EvmReaderSuite) setupOutputMismatchTest() { + s.client = newMockEthClient() + s.repository = newMockRepository() + s.applicationContract1 = newMockApplicationContract() + s.inputBox = newMockInputBox() + s.contractFactory = newMockAdapterFactory() + + s.evmReader = &Service{ + client: s.client, + wsClient: s.wsClient, + repository: s.repository, + defaultBlock: DefaultBlock_Latest, + adapterFactory: s.contractFactory, + hasEnabledApps: true, + inputReaderEnabled: true, + blockchainMaxRetries: 0, + blockchainSubscriptionRetryInterval: time.Second, + } - appAddress := common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E") + logLevel, err := config.GetLogLevel() + s.Require().Nil(err) - // Contract Factory - applicationContract := &MockApplicationContract{} - inputBox := newMockInputBox() + serviceArgs := &service.CreateInfo{Name: "evm-reader", Impl: s.evmReader, LogLevel: logLevel} + err = service.Create(context.Background(), serviceArgs, &s.evmReader.Service) + s.Require().Nil(err) - // Setup adapter factory - adapterFactory := newMockAdapterFactory() - adapterFactory.Unset("CreateAdapters") - adapterFactory.On("CreateAdapters", + apps := copyApplications(applications) + s.repository.On("ListApplications", mock.Anything, mock.Anything, - ).Return(applicationContract, inputBox, nil) - - //New EVM Reader - wsClient := FakeWSEhtClient{} - s.evmReader.wsClient = &wsClient - s.evmReader.adapterFactory = adapterFactory - - // Prepare Output Executed Events - outputExecution0 := &appcontract.IApplicationOutputExecuted{ - OutputIndex: 1, - Output: common.Hex2Bytes("AABBCCDDEE"), - Raw: types.Log{ - TxHash: common.HexToHash("0xdeadbeef"), - }, - } + mock.Anything, + false, + ).Return(apps, uint64(2), nil).Once() - outputExecutionEvents := []*appcontract.IApplicationOutputExecuted{outputExecution0} - applicationContract.On("RetrieveOutputExecutionEvents", + apps = copyApplications(applications[1:2]) + apps[0].LastOutputCheckBlock = 0x11 + s.repository.On("ListApplications", + mock.Anything, mock.Anything, - ).Return(outputExecutionEvents, nil).Once() + mock.Anything, + false, + ).Return(apps, uint64(1), nil).Once() - // Prepare repository - s.repository.Unset("ListApplications") - s.repository.On( - "ListApplications", + apps = copyApplications(applications[1:2]) + apps[0].LastOutputCheckBlock = 0x12 + s.repository.On("ListApplications", mock.Anything, mock.Anything, mock.Anything, false, - ).Return([]*Application{{ - IApplicationAddress: appAddress, - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - IInputBoxBlock: 0x10, - EpochLength: 10, - LastOutputCheckBlock: 0x10, - }}, uint64(1), nil).Once() + ).Return(apps, uint64(1), nil).Once() - s.repository.Unset("UpdateEventLastCheckBlock") s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, MonitoredEvent_InputAdded, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Times(1) s.repository.On("UpdateEventLastCheckBlock", mock.Anything, mock.Anything, MonitoredEvent_OutputExecuted, mock.Anything, - ).Once().Return(nil) + ).Return(nil).Times(5) + + s.repository.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Once().Return(uint64(0), nil) + + s.repository.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(uint64(0), nil).Times(4) + + s.repository.On("CreateEpochsAndInputs", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil).Once() + + s.repository.On("GetEpoch", + mock.Anything, + mock.Anything, + uint64(0)).Return(nil, nil).Once() output := &Output{ Index: 1, - RawData: common.Hex2Bytes("AABBCCDDEE"), + RawData: common.Hex2Bytes("FFBBCCDDEE"), } - - s.repository.Unset("GetOutput") s.repository.On("GetOutput", mock.Anything, mock.Anything, - mock.Anything).Return(output, nil) + mock.Anything, + ).Return(output, nil).Once() - s.repository.Unset("UpdateOutputsExecution") - s.repository.On("UpdateOutputsExecution", + s.repository.On("UpdateApplicationState", mock.Anything, + applications[0].ID, + ApplicationState_Inoperable, mock.Anything, + ).Return(nil).Once() + + s.applicationContract1.On("GetDeploymentBlockNumber", mock.Anything, + ).Return(new(big.Int).SetUint64(0x10), nil).Once() + + s.applicationContract1.On("GetNumberOfExecutedOutputs", mock.Anything, - ).Once().Run(func(arguments mock.Arguments) { - obj := arguments.Get(2) - outputs, ok := obj.([]*Output) - s.Require().True(ok) - s.Require().Equal(1, len(outputs)) - output := outputs[0] - s.Require().NotNil(output) - s.Require().Equal(uint64(1), output.Index) - s.Require().Equal(common.HexToHash("0xdeadbeef"), *output.ExecutionTransactionHash) + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.applicationContract1.On("GetNumberOfExecutedOutputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() - }).Return(nil) + s.applicationContract1.On("RetrieveOutputExecutionEvents", + mock.Anything, + ).Return([]*iapplication.IApplicationOutputExecuted{outputExecution0}, nil).Once() - //No Inputs - inputBox.Unset("RetrieveInputs") - inputBox.On("RetrieveInputs", + events0 := []iinputbox.IInputBoxInputAdded{inputAddedEvent0} + retrieveInputsOpts0 := bind.FilterOpts{ + Context: s.ctx, + Start: 0x11, + End: Pointer(uint64(0x11)), + } + s.inputBox.On("RetrieveInputs", + &retrieveInputsOpts0, mock.Anything, mock.Anything, + ).Return(events0, nil).Once() + + s.inputBox.On("GetNumberOfInputs", mock.Anything, - ).Return([]iinputbox.IInputBoxInputAdded{}, nil) + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Once() + s.inputBox.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(1), nil).Once() + s.inputBox.On("GetNumberOfInputs", + mock.Anything, + mock.Anything, + ).Return(new(big.Int).SetUint64(0), nil).Times(4) - // Prepare Client - s.client.Unset("HeaderByNumber") - s.client.On( - "HeaderByNumber", + s.contractFactory.On("CreateAdapters", mock.Anything, mock.Anything, - ).Return(&header0, nil).Once() + ).Return(s.applicationContract1, s.inputBox, nil).Once() + s.contractFactory.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(s.applicationContract2, nil, nil).Once() + s.contractFactory.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(s.applicationContract2, nil, nil).Once() + s.contractFactory.On("CreateAdapters", + mock.Anything, + mock.Anything, + ).Return(s.applicationContract2, nil, nil).Once() +} + +func (s *EvmReaderSuite) TestCheckOutputFailsWhenOutputMismatches() { + s.setupOutputMismatchTest() + + wsClient := FakeWSEhtClient{} + s.evmReader.wsClient = &wsClient // Start service ready := make(chan struct{}, 1) @@ -260,393 +708,17 @@ func (s *EvmReaderSuite) TestReadOutputExecution() { } wsClient.fireNewHead(&header0) + wsClient.fireNewHead(&header1) + wsClient.fireNewHead(&header2) time.Sleep(1 * time.Second) - s.repository.AssertNumberOfCalls( - s.T(), - "UpdateOutputsExecution", - 1, - ) + s.repository.AssertNumberOfCalls(s.T(), "UpdateOutputsExecution", 0) + s.repository.AssertExpectations(s.T()) -} + s.inputBox.AssertExpectations(s.T()) + s.applicationContract1.AssertExpectations(s.T()) + s.applicationContract2.AssertExpectations(s.T()) + s.contractFactory.AssertExpectations(s.T()) + s.client.AssertExpectations(s.T()) -func (s *EvmReaderSuite) TestCheckOutputFails() { - s.Run("whenRetrieveOutputsFails", func() { - ctx := context.Background() - //ctx, cancel := context.WithCancel(context.Background()) - //defer cancel() - - appAddress := common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E") - - // Contract Factory - applicationContract := &MockApplicationContract{} - inputBox := newMockInputBox() - - // Setup adapter factory - adapterFactory := newMockAdapterFactory() - adapterFactory.Unset("CreateAdapters") - adapterFactory.On("CreateAdapters", - mock.Anything, - mock.Anything, - ).Return(applicationContract, inputBox, nil) - - //New EVM Reader - client := newMockEthClient() - wsClient := FakeWSEhtClient{} - repository := newMockRepository() - evmReader := Service{ - client: client, - wsClient: &wsClient, - repository: repository, - defaultBlock: DefaultBlock_Latest, - adapterFactory: adapterFactory, - hasEnabledApps: true, - inputReaderEnabled: true, - } - serviceArgs := &service.CreateInfo{Name: "evm-reader", Impl: &evmReader} - err := service.Create(ctx, serviceArgs, &evmReader.Service) - s.Require().Nil(err) - - applicationContract.On("RetrieveOutputExecutionEvents", - mock.Anything, - ).Return([]*appcontract.IApplicationOutputExecuted{}, errors.New("No outputs for you")) - - // Prepare repository - repository.Unset("ListApplications") - repository.On( - "ListApplications", - mock.Anything, - mock.Anything, - mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: appAddress, - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - IInputBoxBlock: 0x10, - EpochLength: 10, - LastOutputCheckBlock: 0x10, - }}, uint64(1), nil).Once() - - output := &Output{ - Index: 1, - RawData: common.Hex2Bytes("AABBCCDDEE"), - } - - repository.Unset("GetOutput") - repository.On("GetOutput", - mock.Anything, - mock.Anything, - mock.Anything).Return(output, nil) - - repository.Unset("UpdateOutputsExecution") - repository.On("UpdateOutputsExecution", - mock.Anything, - mock.Anything, - mock.Anything, - mock.Anything, - ).Once().Return(nil) - - //No Inputs - inputBox.Unset("RetrieveInputs") - inputBox.On("RetrieveInputs", - mock.Anything, - mock.Anything, - mock.Anything, - ).Return([]iinputbox.IInputBoxInputAdded{}, nil) - - // Prepare Client - client.Unset("HeaderByNumber") - client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil).Once() - - //// Start service - //ready := make(chan struct{}, 1) - //errChannel := make(chan error, 1) - - //go func() { - // errChannel <- evmReader.Run(ctx, ready) - //}() - - //select { - //case <-ready: - // break - //case err := <-errChannel: - // s.FailNow("unexpected error signal", err) - //} - - //wsClient.fireNewHead(&header0) - //time.Sleep(1 * time.Second) - - //s.repository.AssertNumberOfCalls( - // s.T(), - // "UpdateOutputsExecution", - // 0, - //) - - }) - - s.Run("whenGetOutputsFails", func() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - appAddress := common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E") - - // Contract Factory - applicationContract := &MockApplicationContract{} - inputBox := newMockInputBox() - - // Setup adapter factory - adapterFactory := newMockAdapterFactory() - adapterFactory.Unset("CreateAdapters") - adapterFactory.On("CreateAdapters", - mock.Anything, - mock.Anything, - ).Return(applicationContract, inputBox, nil) - - //New EVM Reader - client := newMockEthClient() - wsClient := FakeWSEhtClient{} - repository := newMockRepository() - s.evmReader.client = client - s.evmReader.wsClient = &wsClient - s.evmReader.repository = repository - s.evmReader.adapterFactory = adapterFactory - - // Prepare Output Executed Events - outputExecution0 := &appcontract.IApplicationOutputExecuted{ - OutputIndex: 1, - Output: common.Hex2Bytes("AABBCCDDEE"), - Raw: types.Log{ - TxHash: common.HexToHash("0xdeadbeef"), - }, - } - - outputExecutionEvents := []*appcontract.IApplicationOutputExecuted{outputExecution0} - applicationContract.On("RetrieveOutputExecutionEvents", - mock.Anything, - ).Return(outputExecutionEvents, nil).Once() - - // Prepare repository - repository.Unset("ListApplications") - repository.On( - "ListApplications", - mock.Anything, - mock.Anything, - mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: appAddress, - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - IInputBoxBlock: 0x10, - EpochLength: 10, - LastOutputCheckBlock: 0x10, - }}, uint64(1), nil).Once() - - repository.Unset("UpdateEventLastCheckBlock") - repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_InputAdded, - mock.Anything, - ).Once().Return(nil) - repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_OutputExecuted, - mock.Anything, - ).Once().Return(nil) - - repository.Unset("GetOutput") - repository.On("GetOutput", - mock.Anything, - mock.Anything, - mock.Anything).Return(nil, errors.New("no output for you")) - - repository.Unset("UpdateOutputsExecution") - repository.On("UpdateOutputsExecution", - mock.Anything, - mock.Anything, - mock.Anything, - mock.Anything, - ).Once().Return(nil) - - //No Inputs - inputBox.Unset("RetrieveInputs") - inputBox.On("RetrieveInputs", - mock.Anything, - mock.Anything, - mock.Anything, - ).Return([]iinputbox.IInputBoxInputAdded{}, nil) - - // Prepare Client - client.Unset("HeaderByNumber") - client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil).Once() - - // Start service - ready := make(chan struct{}, 1) - errChannel := make(chan error, 1) - - go func() { - errChannel <- s.evmReader.Run(ctx, ready) - }() - - select { - case <-ready: - break - case err := <-errChannel: - s.FailNow("unexpected error signal", err) - } - - wsClient.fireNewHead(&header0) - time.Sleep(1 * time.Second) - - repository.AssertNumberOfCalls( - s.T(), - "UpdateOutputsExecution", - 0, - ) - - }) - - s.Run("whenOutputMismatch", func() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - appAddress := common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E") - - // Contract Factory - applicationContract := &MockApplicationContract{} - inputBox := newMockInputBox() - - // Setup adapter factory - adapterFactory := newMockAdapterFactory() - adapterFactory.Unset("CreateAdapters") - adapterFactory.On("CreateAdapters", - mock.Anything, - mock.Anything, - ).Return(applicationContract, inputBox, nil) - - //New EVM Reader - client := newMockEthClient() - wsClient := FakeWSEhtClient{} - repository := newMockRepository() - s.evmReader.client = client - s.evmReader.wsClient = &wsClient - s.evmReader.repository = repository - s.evmReader.adapterFactory = adapterFactory - - // Prepare Output Executed Events - outputExecution0 := &appcontract.IApplicationOutputExecuted{ - OutputIndex: 1, - Output: common.Hex2Bytes("AABBCCDDEE"), - Raw: types.Log{ - TxHash: common.HexToHash("0xdeadbeef"), - }, - } - - outputExecutionEvents := []*appcontract.IApplicationOutputExecuted{outputExecution0} - applicationContract.On("RetrieveOutputExecutionEvents", - mock.Anything, - ).Return(outputExecutionEvents, nil).Once() - - // Prepare repository - repository.Unset("ListApplications") - repository.On( - "ListApplications", - mock.Anything, - mock.Anything, - mock.Anything, - false, - ).Return([]*Application{{ - IApplicationAddress: appAddress, - IConsensusAddress: common.HexToAddress("0xdeadbeef"), - IInputBoxAddress: common.HexToAddress("0xBa3Cf8fB82E43D370117A0b7296f91ED674E94e3"), - IInputBoxBlock: 0x10, - EpochLength: 10, - LastOutputCheckBlock: 0x10, - }}, uint64(1), nil).Once() - - output := &Output{ - Index: 1, - RawData: common.Hex2Bytes("FFBBCCDDEE"), - } - - repository.Unset("UpdateEventLastCheckBlock") - repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_InputAdded, - mock.Anything, - ).Once().Return(nil) - repository.On("UpdateEventLastCheckBlock", - mock.Anything, - mock.Anything, - MonitoredEvent_OutputExecuted, - mock.Anything, - ).Once().Return(nil) - - repository.Unset("GetOutput") - repository.On("GetOutput", - mock.Anything, - mock.Anything, - mock.Anything).Return(output, nil) - - repository.Unset("UpdateOutputsExecution") - repository.On("UpdateOutputsExecution", - mock.Anything, - mock.Anything, - mock.Anything, - mock.Anything, - ).Once().Return(nil) - - //No Inputs - inputBox.Unset("RetrieveInputs") - inputBox.On("RetrieveInputs", - mock.Anything, - mock.Anything, - mock.Anything, - ).Return([]iinputbox.IInputBoxInputAdded{}, nil) - - // Prepare Client - client.Unset("HeaderByNumber") - client.On( - "HeaderByNumber", - mock.Anything, - mock.Anything, - ).Return(&header0, nil).Once() - - // Start service - ready := make(chan struct{}, 1) - errChannel := make(chan error, 1) - - go func() { - errChannel <- s.evmReader.Run(ctx, ready) - }() - - select { - case <-ready: - break - case err := <-errChannel: - s.FailNow("unexpected error signal", err) - } - - wsClient.fireNewHead(&header0) - time.Sleep(1 * time.Second) - - repository.AssertNumberOfCalls( - s.T(), - "UpdateOutputsExecution", - 0, - ) - - }) } diff --git a/internal/evmreader/sealedepochs.go b/internal/evmreader/sealedepochs.go new file mode 100644 index 000000000..0e88a37d7 --- /dev/null +++ b/internal/evmreader/sealedepochs.go @@ -0,0 +1,535 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package evmreader + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + + . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/idaveconsensus" + "github.com/cartesi/rollups-node/pkg/ethutil" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" +) + +func (r *Service) initializeNewApplicationSealedEpochSync( + ctx context.Context, + app appContracts, + mostRecentBlockNumber uint64, +) error { + r.Logger.Info("Initializing application sealed epoch sync", + "application", app.application.Name, + "current_block", mostRecentBlockNumber, + ) + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(mostRecentBlockNumber), + } + deploymentBlock, err := app.daveConsensus.GetDeploymentBlockNumber(callOpts) + if err != nil { + r.Logger.Error("Error retrieving dave consensus deployment block number", + "application", app.application.Name, + "address", app.application.IApplicationAddress, + "consensus_address", app.application.IConsensusAddress, + "error", err, + ) + return fmt.Errorf("failed to retrieve DaveConsensus deployment block: %w", err) + } + if deploymentBlock.Sign() <= 0 { + r.Logger.Error("Invalid dave consensus deployment block number retrieved", + "application", app.application.Name, + "address", app.application.IApplicationAddress, + "consensus_address", app.application.IConsensusAddress, + "block_number", deploymentBlock.Uint64(), + ) + return errors.New("invalid dave consensus deployment block number retrieved") + } + + lastEpochCheckBlock := deploymentBlock.Uint64() - 1 + err = r.repository.UpdateEventLastCheckBlock(ctx, []int64{app.application.ID}, MonitoredEvent_EpochSealed, lastEpochCheckBlock) + if err != nil { + r.Logger.Error("Failed to update application LastEpochCheckBlock", + "application", app.application.Name, + "last_epoch_check_block", lastEpochCheckBlock, + "error", err, + ) + return err + } + r.Logger.Debug("Application sealed epoch sync initialized", + "application", app.application.Name, + "deployment_block", deploymentBlock.Uint64(), + "next_search_block", lastEpochCheckBlock+1, + "current_block", mostRecentBlockNumber, + ) + app.application.LastEpochCheckBlock = lastEpochCheckBlock + return nil +} + +func (r *Service) checkForEpochsAndInputs( + ctx context.Context, + applications []appContracts, + mostRecentBlockNumber uint64, +) { + if !r.inputReaderEnabled { + return + } + + r.Logger.Debug("Checking for new epochs and inputs", "apps", applications) + + // Process each application individually since each has its own DaveConsensus contract + for _, app := range applications { + r.Logger.Debug("Processing DaveConsensus application", + "application", app.application.Name, + "consensus_address", app.application.IConsensusAddress) + + err := r.processApplicationSealedEpochs(ctx, app, mostRecentBlockNumber) + if err != nil { + r.Logger.Error("Error processing application sealed epochs", + "application", app.application.Name, + "consensus_address", app.application.IConsensusAddress, + "error", err) + continue + } + + err = r.processApplicationOpenEpoch(ctx, app, mostRecentBlockNumber) + if err != nil { + r.Logger.Error("Error processing application open epoch", + "application", app.application.Name, + "consensus_address", app.application.IConsensusAddress, + "error", err) + continue + } + } +} + +func (r *Service) processApplicationSealedEpochs( + ctx context.Context, + app appContracts, + mostRecentBlockNumber uint64, +) error { + // Find the starting block for epoch search + if app.application.LastEpochCheckBlock == 0 { + err := r.initializeNewApplicationSealedEpochSync(ctx, app, mostRecentBlockNumber) + if err != nil { + r.Logger.Error("Failed to initialize application sealed epoch sync", + "application", app.application.Name, + "most_recent_block", mostRecentBlockNumber, + "error", err, + ) + return fmt.Errorf("failed to determine start block for epoch search: %w", err) + } + } + + if mostRecentBlockNumber < app.application.LastEpochCheckBlock { + r.Logger.Warn( + "Not reading sealed epochs: most recent block is lower than the last processed one", + "application", app.application.Name, "address", app.application.IApplicationAddress, + "last_epoch_check_block", app.application.LastEpochCheckBlock, + "most_recent_block", mostRecentBlockNumber, + ) + return nil + } else if mostRecentBlockNumber == app.application.LastEpochCheckBlock { + r.Logger.Debug("Not reading sealed epochs: already checked the most recent blocks", + "application", app.application.Name, "address", app.application.IApplicationAddress, + "last_epoch_check_block", app.application.LastEpochCheckBlock, + "most_recent_block", mostRecentBlockNumber, + ) + return nil + } + + nextSearchBlock := app.application.LastEpochCheckBlock + 1 + r.Logger.Debug("Checking sealed epochs for application", + "application", app.application.Name, + "last_epoch_check_block", app.application.LastEpochCheckBlock, + "next_search_block", nextSearchBlock, + "most_recent_block", mostRecentBlockNumber, + ) + + // Create oracle function that returns the current sealed epoch number for a given block + oracle := func(ctx context.Context, block uint64) (*big.Int, error) { + r.Logger.Debug("Retrieving current sealed epoch", "application", app.application.Name, "block", block) + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(block), + } + + sealedEpoch, err := app.daveConsensus.GetCurrentSealedEpoch(callOpts) + if err != nil { + return nil, fmt.Errorf("failed to get current sealed epoch at block %d: %w", block, err) + } + + return sealedEpoch.EpochNumber, nil + } + + // Create onHit function that processes epoch transitions + onHit := func(block uint64) error { + r.Logger.Debug("Epoch transition found", "application", app.application.Name, "block", block) + return r.processEpochTransition(ctx, app, block) + } + + prevValue := big.NewInt(-1) + lastEpoch, err := r.repository.GetLastNonOpenEpoch(ctx, app.application.IApplicationAddress.String()) + if err != nil { + return fmt.Errorf("failed to get last non open epoch: %w", err) + } + if lastEpoch != nil { + prevValue = new(big.Int).SetUint64(lastEpoch.Index) + // assert that the last epoch's last block is less than the next search block + if lastEpoch.LastBlock > nextSearchBlock { + return r.setApplicationInoperable(ctx, app.application, + "application last non open epoch last block %d is greater than next search block %d", + lastEpoch.LastBlock, + nextSearchBlock, + ) + } + } + + // Use FindTransitions to find epoch transitions + _, err = ethutil.FindTransitions(ctx, nextSearchBlock, mostRecentBlockNumber, prevValue, oracle, onHit) + if err != nil { + return fmt.Errorf("failed to walk epoch transitions: %w", err) + } + + // Update the last check block for this application + err = r.repository.UpdateEventLastCheckBlock(ctx, []int64{app.application.ID}, MonitoredEvent_EpochSealed, mostRecentBlockNumber) + if err != nil { + return fmt.Errorf("failed to update last epoch check block: %w", err) + } + + r.Logger.Debug("Sealed epoch search completed", "application", app.application.Name, "most_recent_block", mostRecentBlockNumber) + + return nil +} + +func (r *Service) processEpochTransition( + ctx context.Context, + app appContracts, + transitionBlock uint64, +) error { + r.Logger.Debug("Processing epoch transition", "application", app.application.Name, "block", transitionBlock) + + // Get the sealed epoch information at this block + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(transitionBlock), + } + + sealedEpoch, err := app.daveConsensus.GetCurrentSealedEpoch(callOpts) + if err != nil { + return fmt.Errorf("failed to get sealed epoch at transition block %d: %w", transitionBlock, err) + } + + r.Logger.Info("Found sealed epoch event", + "application", app.application.Name, + "block", transitionBlock, + "epoch_number", sealedEpoch.EpochNumber, + "input_lower_bound", sealedEpoch.InputIndexLowerBound, + "input_upper_bound", sealedEpoch.InputIndexUpperBound, + "tournament", sealedEpoch.Tournament) + + // Retrieve the actual EpochSealed events for this transition + filterOpts := &bind.FilterOpts{ + Context: ctx, + Start: transitionBlock, + End: &transitionBlock, + } + + sealedEvents, err := app.daveConsensus.RetrieveSealedEpochs(filterOpts) + if err != nil { + return fmt.Errorf("failed to retrieve sealed epoch events at block %d: %w", transitionBlock, err) + } + + // Process each sealed epoch event + for _, event := range sealedEvents { + err := r.processSealedEpochEvent(ctx, app, event) + if err != nil { + r.Logger.Error("Error processing sealed epoch event", + "epoch_number", event.EpochNumber, + "block", transitionBlock, + "error", err) + return fmt.Errorf("failed to process sealed epoch event at block %d: %w", transitionBlock, err) + } + } + + return nil +} + +func (r *Service) processSealedEpochEvent( + ctx context.Context, + app appContracts, + event *idaveconsensus.IDaveConsensusEpochSealed, +) error { + r.Logger.Debug("Processing sealed epoch event", + "epoch_number", event.EpochNumber, + "input_lower_bound", event.InputIndexLowerBound, + "input_upper_bound", event.InputIndexUpperBound, + "tournament", event.Tournament) + + firstBlock := uint64(0) + epochNumber := event.EpochNumber.Uint64() + if epochNumber == 0 { + if app.application.IInputBoxBlock == 0 { + r.Logger.Error("Application has no InputBox block number defined", + "application", app.application.Name, + "inputbox", app.application.IInputBoxAddress, + "iinputbox_block", app.application.IInputBoxBlock, + ) + return errors.New("application has no InputBox block number defined") + } + firstBlock = app.application.IInputBoxBlock + } else { + prevEpochNumber := epochNumber - 1 + prevEpoch, err := r.repository.GetEpoch(ctx, app.application.IApplicationAddress.Hex(), prevEpochNumber) + if err != nil { + return fmt.Errorf("failed to fetch epoch %d: %w", prevEpochNumber, err) + } + if prevEpoch == nil { + return fmt.Errorf("failed to fetch previous epoch %d: should not be nil", prevEpochNumber) + } + + prevEpoch.ClaimTransactionHash = &event.Raw.TxHash + err = r.repository.UpdateEpochClaimTransactionHash(ctx, app.application.IApplicationAddress.Hex(), prevEpoch) + if err != nil { + return fmt.Errorf("failed to update previous epoch %d: %w", prevEpochNumber, err) + } + firstBlock = prevEpoch.LastBlock + } + + epoch, err := r.repository.GetEpoch(ctx, app.application.IApplicationAddress.Hex(), epochNumber) + if err != nil { + return fmt.Errorf("failed to fetch epoch %d: %w", epochNumber, err) + } + + if epoch == nil { + // Create new epoch from sealed event + epoch = &Epoch{ + Index: event.EpochNumber.Uint64(), + FirstBlock: firstBlock, // Will be calculated based on epoch length + LastBlock: event.Raw.BlockNumber, + InputIndexLowerBound: event.InputIndexLowerBound.Uint64(), + InputIndexUpperBound: event.InputIndexUpperBound.Uint64(), + TournamentAddress: &event.Tournament, + Status: EpochStatus_Closed, // Sealed epochs are closed + } + } else { + if epoch.FirstBlock != firstBlock || epoch.InputIndexLowerBound != event.InputIndexLowerBound.Uint64() { + return fmt.Errorf("epoch %d data mismatch with sealed event", epoch.Index) + } + epoch.LastBlock = event.Raw.BlockNumber + epoch.InputIndexUpperBound = event.InputIndexUpperBound.Uint64() + epoch.TournamentAddress = &event.Tournament + epoch.Status = EpochStatus_Closed // Sealed epochs are closed + } + + // Fetch inputs for this epoch from the InputBox + var inputs []*Input + if epoch.InputIndexUpperBound > epoch.InputIndexLowerBound { + var err error + lastInputCheckBlock, err := r.repository.GetEventLastCheckBlock(ctx, app.application.ID, MonitoredEvent_InputAdded) + if err != nil { + return fmt.Errorf("failed to get last input check block: %w", err) + } + + nextSearchBlock := lastInputCheckBlock + 1 + if lastInputCheckBlock == 0 { // First time fetching inputs for this application + nextSearchBlock = epoch.FirstBlock + } + if nextSearchBlock < epoch.FirstBlock || nextSearchBlock > epoch.LastBlock { + return fmt.Errorf("invalid next search block %d for inputs in epoch %d (first block: %d, last block: %d)", + nextSearchBlock, epoch.Index, epoch.FirstBlock, epoch.LastBlock) + } + + inputs, err = r.fetchInputsForEpoch(ctx, app, epoch.Index, nextSearchBlock, epoch.LastBlock, + epoch.InputIndexLowerBound, epoch.InputIndexUpperBound) + if err != nil { + return fmt.Errorf("failed to fetch inputs for epoch %d: %w", epoch.Index, err) + } + } + // Store epoch and inputs + epochInputMap := map[*Epoch][]*Input{epoch: inputs} + + r.Logger.Debug("Storing sealed epoch", "application", app.application.Name, "epoch_number", epoch.Index) + + err = r.repository.CreateEpochsAndInputs( + ctx, + app.application.IApplicationAddress.String(), + epochInputMap, + event.Raw.BlockNumber, + ) + if err != nil { + return fmt.Errorf("failed to store epoch and inputs: %w", err) + } + + r.Logger.Debug("Stored sealed epoch and inputs", + "application", app.application.Name, + "epoch_number", epoch.Index, + "num_inputs", len(inputs), + "block", event.Raw.BlockNumber) + + return nil +} + +func (r *Service) fetchInputsForEpoch( + ctx context.Context, + app appContracts, + epochIndex uint64, + startBlock, endBlock uint64, + lowerBound, upperBound uint64, +) ([]*Input, error) { + r.Logger.Debug("Fetching inputs for epoch", + "application", app.application.Name, + "epoch_index", epochIndex, + "input_lower_bound", lowerBound, + "input_upper_bound", upperBound, + "epoch_first_block", startBlock, + "epoch_last_block", endBlock, + ) + + // Define oracle function that returns the number of inputs at a given block + oracle := func(ctx context.Context, block uint64) (*big.Int, error) { + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(block), + } + numInputs, err := app.inputSource.GetNumberOfInputs(callOpts, app.application.IApplicationAddress) + if err != nil { + return nil, fmt.Errorf("failed to get number of inputs at block %d: %w", block, err) + } + return numInputs, nil + } + + var sortedInputs []*Input + // Define onHit function that accumulates inputs at transition blocks + onHit := func(block uint64) error { + filterOpts := &bind.FilterOpts{ + Context: ctx, + Start: block, + End: &block, + } + inputEvents, err := app.inputSource.RetrieveInputs( + filterOpts, + []common.Address{app.application.IApplicationAddress}, + nil, + ) + if err != nil { + return fmt.Errorf("failed to retrieve inputs at block %d: %w", block, err) + } + for _, event := range inputEvents { + if event.Index.Uint64() >= lowerBound && event.Index.Uint64() < upperBound { + input := &Input{ + Index: event.Index.Uint64(), + Status: InputCompletionStatus_None, + RawData: event.Input, + BlockNumber: event.Raw.BlockNumber, + TransactionReference: event.Raw.TxHash, + } + sortedInputs = insertSorted(sortByInputIndex, sortedInputs, input) + } + } + return nil + } + + inputCount, err := r.repository.GetNumberOfInputs(ctx, app.application.IApplicationAddress.String()) + if err != nil { + return nil, fmt.Errorf("failed to get number of inputs from repository: %w", err) + } + prevValue := new(big.Int).SetUint64(inputCount) + + // Use FindTransitions to find blocks where inputs were added + _, err = ethutil.FindTransitions(ctx, startBlock, endBlock, prevValue, oracle, onHit) + if err != nil { + return nil, fmt.Errorf("failed to walk input transitions: %w", err) + } + + r.Logger.Debug("Fetched inputs for epoch", + "application", app.application.Name, + "epoch_index", epochIndex, + "input_count", len(sortedInputs), + ) + return sortedInputs, nil +} + +func (r *Service) processApplicationOpenEpoch( + ctx context.Context, + app appContracts, + mostRecentBlockNumber uint64, +) error { + r.Logger.Debug("Checking for inputs on current open epoch", + "application", app.application.Name, + "most_recent_block", mostRecentBlockNumber, + ) + + lastEpoch, err := r.repository.GetLastNonOpenEpoch(ctx, app.application.IApplicationAddress.String()) + if err != nil { + return fmt.Errorf("failed to get last non open epoch: %w", err) + } + if lastEpoch == nil { + return r.setApplicationInoperable(ctx, app.application, + "invalid state. no non open epochs found for application") + } + + nextEpochNumber := lastEpoch.Index + 1 + openEpoch, err := r.repository.GetEpoch(ctx, app.application.IApplicationAddress.Hex(), nextEpochNumber) + if err != nil { + return fmt.Errorf("failed to fetch epoch %d: %w", nextEpochNumber, err) + } + if openEpoch == nil { + // Create epoch from sealed event + openEpoch = &Epoch{ + Index: nextEpochNumber, + FirstBlock: lastEpoch.LastBlock, + LastBlock: mostRecentBlockNumber, + InputIndexLowerBound: lastEpoch.InputIndexUpperBound, + InputIndexUpperBound: lastEpoch.InputIndexUpperBound, + Status: EpochStatus_Open, + } + } + + lastInputCheckBlock, err := r.repository.GetEventLastCheckBlock(ctx, app.application.ID, MonitoredEvent_InputAdded) + if err != nil { + return fmt.Errorf("failed to get last input check block: %w", err) + } + + // Fetch inputs for this epoch from the InputBox + inputs, err := r.fetchInputsForEpoch(ctx, app, nextEpochNumber, lastInputCheckBlock, + mostRecentBlockNumber, openEpoch.InputIndexLowerBound, math.MaxUint64) + if err != nil { + return fmt.Errorf("failed to fetch inputs for epoch %d: %w", openEpoch.Index, err) + } + + // increase the upper bound according to the number of fetched inputs + openEpoch.InputIndexUpperBound += uint64(len(inputs)) + openEpoch.LastBlock = mostRecentBlockNumber + + r.Logger.Debug("Storing open epoch", + "application", app.application.Name, + "epoch_number", openEpoch.Index, + "new_inputs", len(inputs), + ) + // Store epoch and inputs + epochInputMap := map[*Epoch][]*Input{openEpoch: inputs} + + err = r.repository.CreateEpochsAndInputs( + ctx, + app.application.IApplicationAddress.String(), + epochInputMap, + mostRecentBlockNumber, + ) + + if err != nil { + return fmt.Errorf("failed to store epoch and inputs: %w", err) + } + + r.Logger.Debug("Stored sealed epoch and inputs", + "application", app.application.Name, + "epoch_number", nextEpochNumber, + "num_inputs", len(inputs), + "block", mostRecentBlockNumber) + + return nil +} diff --git a/internal/evmreader/service.go b/internal/evmreader/service.go index 24e3525e9..19e3c894f 100644 --- a/internal/evmreader/service.go +++ b/internal/evmreader/service.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "time" "github.com/cartesi/rollups-node/internal/config" . "github.com/cartesi/rollups-node/internal/model" @@ -30,14 +31,16 @@ type CreateInfo struct { type Service struct { service.Service - client EthClientInterface - wsClient EthClientInterface - adapterFactory AdapterFactory - repository EvmReaderRepository - chainId uint64 - defaultBlock DefaultBlock - hasEnabledApps bool - inputReaderEnabled bool + client EthClientInterface + wsClient EthClientInterface + adapterFactory AdapterFactory + repository EvmReaderRepository + chainId uint64 + defaultBlock DefaultBlock + hasEnabledApps bool + inputReaderEnabled bool + blockchainMaxRetries uint64 + blockchainSubscriptionRetryInterval time.Duration } const EvmReaderConfigKey = "evm-reader" @@ -99,6 +102,8 @@ func Create(ctx context.Context, c *CreateInfo) (*Service, error) { return nil, fmt.Errorf("NodeConfig chainId mismatch: network %d != config %d", chainId.Uint64(), nodeConfig.ChainID) } + s.blockchainMaxRetries = c.Config.BlockchainHttpMaxRetries + s.blockchainSubscriptionRetryInterval = c.Config.BlockchainHttpRetryMinWait s.client = c.EthClient s.wsClient = c.EthWsClient @@ -140,7 +145,10 @@ func (s *Service) Tick() []error { func (s *Service) Serve() error { ready := make(chan struct{}, 1) - go s.Run(s.Context, ready) + go func() { + s.Run(s.Context, ready) + s.Service.Stop(false) + }() return s.Service.Serve() } diff --git a/internal/evmreader/testdata/header_3.json b/internal/evmreader/testdata/header_3.json new file mode 100644 index 000000000..6a55b5484 --- /dev/null +++ b/internal/evmreader/testdata/header_3.json @@ -0,0 +1,14 @@ +{ + "number": "0x33", + "gasUsed": "0x11ddc", + "gasLimit": "0x1c9c380", + "extraData": "0x", + "timestamp": "0x6653eabc", + "difficulty": "0x0", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "sha3Uncles":"0x0000000000000000000000000000000000000000000000000000000000000000", + "stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot":"0x0000000000000000000000000000000000000000000000000000000000000000", + "logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 5e55e234f..983c951c4 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -219,6 +219,8 @@ func (mock *MockMachine) Advance( _ context.Context, input []byte, _ uint64, + _ uint64, + _ bool, ) (*AdvanceResult, error) { // Not used in inspect tests, but needed to satisfy the interface return nil, nil @@ -228,6 +230,14 @@ func (mock *MockMachine) Application() *Application { return mock.application } +func (mock *MockMachine) ProcessedInputs() uint64 { + return 0 +} + +func (m *MockMachine) OutputsProof(ctx context.Context, processedInputs uint64) (*OutputsProof, error) { + return nil, nil +} + func (mock *MockMachine) Synchronize(ctx context.Context, repo manager.MachineRepository) error { // Not used in inspect tests, but needed to satisfy the interface return nil @@ -238,6 +248,12 @@ func (mock *MockMachine) CreateSnapshot(ctx context.Context, processedInputs uin return nil } +// Retrieves the hash of the current machine state +func (m *MockMachine) Hash(ctx context.Context) ([32]byte, error) { + // Not used in inspect tests, but needed to satisfy the interface + return [32]byte{}, nil +} + func (mock *MockMachine) Close() error { // Not used in inspect tests, but needed to satisfy the interface return nil diff --git a/internal/jsonrpc/jsonrpc-discover.json b/internal/jsonrpc/jsonrpc-discover.json index 52be5f8ee..5f8cffda2 100644 --- a/internal/jsonrpc/jsonrpc-discover.json +++ b/internal/jsonrpc/jsonrpc-discover.json @@ -122,6 +122,7 @@ ], "result": { "name": "result", + "description": "A paginated list of epochs, with the proofs omitted", "schema": { "$ref": "#/components/schemas/EpochListResult" } @@ -497,6 +498,466 @@ } } }, + { + "name": "cartesi_listTournaments", + "summary": "Retrieve a List of Tournaments", + "description": "Returns a paginated list of Tournaments, with options to filter by epoch index or level.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter tournaments by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "level", + "description": "Filter tournaments by level.", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "parent_tournament_address", + "description": "Filter tournaments by parent tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": false + }, + { + "name": "parent_match_id_hash", + "description": "Filter tournaments by parent match id hash (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": false + }, + { + "name": "limit", + "description": "The maximum number of tournaments to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of tournaments to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/TournamentListResult" + } + } + }, + { + "name": "cartesi_getTournament", + "summary": "Retrieve a specific tournament", + "description": "Retrieves a single tournament from the application using the specified address.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "address", + "description": "The address of the tournament (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/TournamentGetResult" + } + } + }, + { + "name": "cartesi_listCommitments", + "summary": "List commitments", + "description": "Returns a paginated list of commitments for the specified application. Can filter by epoch index and tournament address.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter commitments by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "tournament_address", + "description": "Filter commitments by tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": false + }, + { + "name": "limit", + "description": "The maximum number of commitments to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of commitments to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/CommitmentListResult" + } + } + }, + { + "name": "cartesi_getCommitment", + "summary": "Get a specific commitment", + "description": "Fetches a single commitment by application, epoch index, tournament address and commitment hash.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "The index of the epoch (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "The tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "commitment", + "description": "The commitment hash (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/CommitmentGetResult" + } + } + }, + { + "name": "cartesi_listMatches", + "summary": "List matches", + "description": "Returns a paginated list of matches for the specified application. Can filter by epoch index and tournament address.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter matches by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "tournament_address", + "description": "Filter matches by tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": false + }, + { + "name": "limit", + "description": "The maximum number of matches to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of matches to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchListResult" + } + } + }, + { + "name": "cartesi_getMatch", + "summary": "Get a specific match", + "description": "Fetches a single match by application, epoch index, tournament address and ID hash.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "The index of the epoch (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "The tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "id_hash", + "description": "The ID hash of the match (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchGetResult" + } + } + }, + { + "name": "cartesi_listMatchAdvances", + "summary": "List match advances", + "description": "Returns a paginated list of match advances for the specified match.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter match advances by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "Filter match advances by tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "id_hash", + "description": "The ID hash of the match (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + }, + { + "name": "limit", + "description": "The maximum number of match advances to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of match advances to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchAdvancedListResult" + } + } + }, + { + "name": "cartesi_getMatchAdvanced", + "summary": "Get a specific match advance", + "description": "Fetches a single match advance by application, epoch index, tournament address, ID hash and parent.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "The index of the epoch (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "The tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "id_hash", + "description": "The ID hash of the match advance (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + }, + { + "name": "parent", + "description": "The parent hash of the match advance (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchAdvancedGetResult" + } + } + }, { "name": "cartesi_getChainId", "summary": "Get node's chain ID", @@ -562,6 +1023,9 @@ "data_availability": { "$ref": "#/components/schemas/ByteArray" }, + "consensus_type": { + "$ref": "#/components/schemas/Consensus" + }, "state": { "$ref": "#/components/schemas/ApplicationState" }, @@ -571,12 +1035,18 @@ "iinputbox_block": { "$ref": "#/components/schemas/UnsignedInteger" }, + "last_epoch_check_block": { + "$ref": "#/components/schemas/UnsignedInteger" + }, "last_input_check_block": { "$ref": "#/components/schemas/UnsignedInteger" }, "last_output_check_block": { "$ref": "#/components/schemas/UnsignedInteger" }, + "last_tournament_check_block": { + "$ref": "#/components/schemas/UnsignedInteger" + }, "processed_inputs": { "$ref": "#/components/schemas/UnsignedInteger" }, @@ -639,14 +1109,46 @@ "last_block": { "$ref": "#/components/schemas/UnsignedInteger" }, - "claim_hash": { + "input_index_lower_bound": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "input_index_upper_bound": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "machine_hash": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "outputs_merkle_root": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "outputs_merkle_proof": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Hash" + }, + "nullable": true + }, + "commitment": { "$ref": "#/components/schemas/Hash", "nullable": true }, + "commitment_proof": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Hash" + }, + "nullable": true + }, "claim_transaction_hash": { "$ref": "#/components/schemas/Hash", "nullable": true }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress", + "nullable": true + }, "status": { "$ref": "#/components/schemas/EpochStatus" }, @@ -1070,6 +1572,14 @@ "INOPERABLE" ] }, + "Consensus": { + "type": "string", + "enum": [ + "AUTHORITY", + "QUORUM", + "PRT" + ] + }, "EthereumAddress": { "type": "string", "format": "hex-byte", @@ -1108,6 +1618,278 @@ "$ref": "#/components/schemas/EthereumAddress" } ] + }, + "WinnerCommitment": { + "type": "string", + "enum": [ + "NONE", + "ONE", + "TWO" + ] + }, + "MatchDeletionReason": { + "type": "string", + "enum": [ + "STEP", + "TIMEOUT", + "CHILD_TOURNAMENT", + "NOT_DELETED" + ] + }, + "Tournament": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "parent_tournament_address": { + "$ref": "#/components/schemas/EthereumAddress", + "nullable": true + }, + "parent_match_id_hash": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "max_level": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "level": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "log2step": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "height": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "winner_commitment": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "final_state_hash": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "finished_at_block": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "TournamentListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Tournament" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "TournamentGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Tournament" + } + } + }, + "Commitment": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "commitment": { + "$ref": "#/components/schemas/Hash" + }, + "final_state_hash": { + "$ref": "#/components/schemas/Hash" + }, + "submitter_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "CommitmentListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Commitment" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "CommitmentGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Commitment" + } + } + }, + "Match": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "id_hash": { + "$ref": "#/components/schemas/Hash" + }, + "commitment_one": { + "$ref": "#/components/schemas/ByteArray" + }, + "commitment_two": { + "$ref": "#/components/schemas/ByteArray" + }, + "left_of_two": { + "$ref": "#/components/schemas/ByteArray" + }, + "block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "winner_commitment": { + "$ref": "#/components/schemas/WinnerCommitment" + }, + "deletion_reason": { + "$ref": "#/components/schemas/MatchDeletionReason" + }, + "deletion_block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "deletion_tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "MatchListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Match" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "MatchGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Match" + } + } + }, + "MatchAdvanced": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "id_hash": { + "$ref": "#/components/schemas/Hash" + }, + "other_parent": { + "$ref": "#/components/schemas/ByteArray" + }, + "left_node": { + "$ref": "#/components/schemas/ByteArray" + }, + "block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "MatchAdvancedListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MatchAdvanced" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "MatchAdvancedGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/MatchAdvanced" + } + } } } } diff --git a/internal/jsonrpc/jsonrpc.go b/internal/jsonrpc/jsonrpc.go index a66d60d9e..819aaea9d 100644 --- a/internal/jsonrpc/jsonrpc.go +++ b/internal/jsonrpc/jsonrpc.go @@ -26,18 +26,20 @@ var discoverSpec embed.FS const ( // Maximum allowed body size (1 MB). - MAX_BODY_SIZE = 1 << 20 + MAX_BODY_SIZE = 1 << 20 //nolint: revive // Maximum amount of items to list (10,000). - LIST_ITEM_LIMIT = 10000 + LIST_ITEM_LIMIT = 10000 //nolint: revive + // Default amount of item on a list (50) + LIST_ITEM_DEFAULT = 50 //nolint: revive ) const ( - JSONRPC_RESOURCE_NOT_FOUND int = -32001 - JSONRPC_PARSE_ERROR int = -32700 - JSONRPC_INVALID_REQUEST int = -32600 - JSONRPC_METHOD_NOT_FOUND int = -32601 - JSONRPC_INVALID_PARAMS int = -32602 - JSONRPC_INTERNAL_ERROR int = -32603 + JSONRPC_RESOURCE_NOT_FOUND int = -32001 //nolint: revive + JSONRPC_PARSE_ERROR int = -32700 //nolint: revive + JSONRPC_INVALID_REQUEST int = -32600 //nolint: revive + JSONRPC_METHOD_NOT_FOUND int = -32601 //nolint: revive + JSONRPC_INVALID_PARAMS int = -32602 //nolint: revive + JSONRPC_INTERNAL_ERROR int = -32603 //nolint: revive ) type rpcHandler = func(*Service, http.ResponseWriter, *http.Request, RPCRequest) @@ -57,7 +59,15 @@ var jsonrpcHandlers = dispatchTable{ "cartesi_getOutput": handleGetOutput, "cartesi_listReports": handleListReports, "cartesi_getReport": handleGetReport, - "cartesi_getChainId": handleGetChainId, + "cartesi_listTournaments": handleListTournaments, + "cartesi_getTournament": handleGetTournament, + "cartesi_listCommitments": handleListCommitments, + "cartesi_getCommitment": handleGetCommitment, + "cartesi_listMatches": handleListMatches, + "cartesi_getMatch": handleGetMatch, + "cartesi_listMatchAdvances": handleListMatchAdvances, + "cartesi_getMatchAdvanced": handleGetMatchAdvanced, + "cartesi_getChainId": handleGetChainID, "cartesi_getNodeVersion": handleGetNodeVersion, } @@ -118,7 +128,7 @@ func handleListApplications(s *Service, w http.ResponseWriter, r *http.Request, } // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } // Cap limit to 10,000. if params.Limit > LIST_ITEM_LIMIT { @@ -207,7 +217,7 @@ func handleListEpochs(s *Service, w http.ResponseWriter, r *http.Request, req RP // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -227,7 +237,7 @@ func handleListEpochs(s *Service, w http.ResponseWriter, r *http.Request, req RP writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid epoch status: %v", err), nil) return } - epochFilter.Status = &status + epochFilter.Status = []model.EpochStatus{status} } epochs, total, err := s.repository.ListEpochs(r.Context(), params.Application, epochFilter, repository.Pagination{ @@ -385,7 +395,7 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -433,7 +443,7 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP return } - var resultInputs []*DecodedInput + resultInputs := make([]*DecodedInput, 0, len(inputs)) for _, in := range inputs { decoded, err := DecodeInput(in, s.inputABI) if err != nil { @@ -441,9 +451,6 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP } resultInputs = append(resultInputs, decoded) } - if resultInputs == nil { - resultInputs = []*DecodedInput{} - } // Format response according to spec result := struct { @@ -554,7 +561,7 @@ func ParseOutputType(s string) ([]byte, error) { if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { s = s[2:] } - if len(s) != 8 { // nolint: mnd + if len(s) != 8 { //nolint: mnd return []byte{}, fmt.Errorf("invalid output type: expected exactly 4 bytes") } // Decode the hex string into bytes. @@ -575,7 +582,7 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -638,7 +645,7 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R return } - var resultOutputs []*DecodedOutput + resultOutputs := make([]*DecodedOutput, 0, len(outputs)) for _, out := range outputs { decoded, err := DecodeOutput(out, s.outputABI) if err != nil { @@ -651,9 +658,6 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Application not found", nil) return } - if resultOutputs == nil { - resultOutputs = []*DecodedOutput{} - } // Format response according to spec result := struct { @@ -735,7 +739,7 @@ func handleListReports(s *Service, w http.ResponseWriter, r *http.Request, req R // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -851,8 +855,540 @@ func handleGetReport(s *Service, w http.ResponseWriter, r *http.Request, req RPC writeRPCResult(w, req.ID, response) } -func handleGetChainId(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { +func handleListTournaments(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListTournamentsParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create tournament filter based on params + tournamentFilter := repository.TournamentFilter{} + if params.EpochIndex != nil { + epochIndex, err := parseIndex(*params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + tournamentFilter.EpochIndex = &epochIndex + } + + if params.Level != nil { + level, err := parseIndex(*params.Level, "level") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + tournamentFilter.Level = &level + } + + if params.ParentTournamentAddress != nil { + parentAddress, err := config.ToAddressFromString(*params.ParentTournamentAddress) + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid parent tournament address: %v", err), nil) + } + tournamentFilter.ParentTournamentAddress = &parentAddress + } + + if params.ParentMatchIDHash != nil { + parentMatchIDHash, err := config.ToHashFromString(*params.ParentMatchIDHash) + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid parent match ID hash: %v", err), nil) + } + tournamentFilter.ParentMatchIDHash = &parentMatchIDHash + } + + tournaments, total, err := s.repository.ListTournaments(r.Context(), params.Application, tournamentFilter, repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + }, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve tournaments from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if tournaments == nil { + tournaments = []*model.Tournament{} + } + + // Format response according to spec + result := struct { + Data []*model.Tournament `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: tournaments, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetTournament(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetTournamentParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Validate tournament address + if _, err := config.ToAddressFromString(params.Address); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + tournament, err := s.repository.GetTournament(r.Context(), params.Application, params.Address) + if err != nil { + s.Logger.Error("Unable to retrieve tournament from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if tournament == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Tournament not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.Tournament `json:"data"` + }{ + Data: tournament, + } + + writeRPCResult(w, req.ID, response) +} + +func handleListCommitments(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListCommitmentsParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create commitment filter based on params + commitmentFilter := repository.CommitmentFilter{} + if params.EpochIndex != nil { + epochIndex, err := parseIndex(*params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + commitmentFilter.EpochIndex = &epochIndex + } + + if params.TournamentAddress != nil { + if _, err := config.ToAddressFromString(*params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + commitmentFilter.TournamentAddress = params.TournamentAddress + } + + commitments, total, err := s.repository.ListCommitments(r.Context(), params.Application, commitmentFilter, repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + }, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve commitments from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if commitments == nil { + commitments = []*model.Commitment{} + } + + // Format response according to spec + result := struct { + Data []*model.Commitment `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: commitments, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetCommitment(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetCommitmentParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := hex.DecodeString(params.Commitment); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid commitment hex: %v", err), nil) + return + } + + commitment, err := s.repository.GetCommitment(r.Context(), params.Application, epochIndex, params.TournamentAddress, params.Commitment) + if err != nil { + s.Logger.Error("Unable to retrieve commitment from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if commitment == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Commitment not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.Commitment `json:"data"` + }{ + Data: commitment, + } + + writeRPCResult(w, req.ID, response) +} + +func handleListMatches(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListMatchesParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create match filter based on params + matchFilter := repository.MatchFilter{} + if params.EpochIndex != nil { + epochIndex, err := parseIndex(*params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + matchFilter.EpochIndex = &epochIndex + } + + if params.TournamentAddress != nil { + if _, err := config.ToAddressFromString(*params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + matchFilter.TournamentAddress = params.TournamentAddress + } + + matches, total, err := s.repository.ListMatches(r.Context(), params.Application, matchFilter, repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + }, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve matches from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if matches == nil { + matches = []*model.Match{} + } + + // Format response according to spec + result := struct { + Data []*model.Match `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: matches, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetMatch(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetMatchParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.IDHash); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid ID hash: %v", err), nil) + return + } + + match, err := s.repository.GetMatch(r.Context(), params.Application, epochIndex, params.TournamentAddress, params.IDHash) + if err != nil { + s.Logger.Error("Unable to retrieve match from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if match == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Match not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.Match `json:"data"` + }{ + Data: match, + } + + writeRPCResult(w, req.ID, response) +} + +func handleListMatchAdvances(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListMatchAdvancesParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create match advance filter based on params + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.IDHash); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid ID hash: %v", err), nil) + return + } + + pagination := repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + } + matchAdvances, total, err := s.repository.ListMatchAdvances(r.Context(), params.Application, epochIndex, + params.TournamentAddress, params.IDHash, pagination, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve match advances from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if matchAdvances == nil { + matchAdvances = []*model.MatchAdvanced{} + } + + // Format response according to spec + result := struct { + Data []*model.MatchAdvanced `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: matchAdvances, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetMatchAdvanced(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetMatchAdvancedParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.IDHash); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid ID hash: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.Parent); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid parent hash: %v", err), nil) + return + } + + matchAdvanced, err := s.repository.GetMatchAdvanced(r.Context(), params.Application, epochIndex, + params.TournamentAddress, params.IDHash, params.Parent) + if err != nil { + s.Logger.Error("Unable to retrieve match advanced from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if matchAdvanced == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Match advanced not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.MatchAdvanced `json:"data"` + }{ + Data: matchAdvanced, + } + + writeRPCResult(w, req.ID, response) +} +func handleGetChainID(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { config, err := repository.LoadNodeConfig[evmreader.PersistentConfig](r.Context(), s.repository, evmreader.EvmReaderConfigKey) if errors.Is(err, repository.ErrNotFound) { writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "EVM Reader config not found", nil) @@ -873,7 +1409,7 @@ func handleGetChainId(s *Service, w http.ResponseWriter, r *http.Request, req RP writeRPCResult(w, req.ID, result) } -func handleGetNodeVersion(s *Service, w http.ResponseWriter, _ *http.Request, req RPCRequest) { +func handleGetNodeVersion(_ *Service, w http.ResponseWriter, _ *http.Request, req RPCRequest) { result := struct { Data string `json:"data"` }{ diff --git a/internal/jsonrpc/jsonrpc_test.go b/internal/jsonrpc/jsonrpc_test.go index e211a8954..ac0d4add8 100644 --- a/internal/jsonrpc/jsonrpc_test.go +++ b/internal/jsonrpc/jsonrpc_test.go @@ -140,7 +140,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_getApplication", @@ -185,7 +185,7 @@ func TestMethod(t *testing.T) { // NodeConfig provision nr := uint64(0xdeadbeef) - repository.SaveNodeConfig(ctx, s.repository, + err := repository.SaveNodeConfig(ctx, s.repository, &model.NodeConfig[evmreader.PersistentConfig]{ Key: evmreader.EvmReaderConfigKey, Value: evmreader.PersistentConfig{ @@ -193,6 +193,7 @@ func TestMethod(t *testing.T) { }, }, ) + assert.Nil(t, err, "on test case: %v, when saving evm reader config", t.Name()) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", @@ -245,11 +246,11 @@ func TestMethod(t *testing.T) { app := uint64(1) nr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: nr, - ClaimHash: &common.Hash{}, + OutputsMerkleRoot: &common.Hash{}, ClaimTransactionHash: &common.Hash{}, Status: model.EpochStatus_ClaimAccepted, }) @@ -280,11 +281,11 @@ func TestMethod(t *testing.T) { app := uint64(1) nr := uint64(1) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: nr, - ClaimHash: &common.Hash{}, + OutputsMerkleRoot: &common.Hash{}, ClaimTransactionHash: &common.Hash{}, Status: model.EpochStatus_ClaimAccepted, }) @@ -345,7 +346,7 @@ func TestMethod(t *testing.T) { app := uint64(2) enr := uint64(1) inr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -379,7 +380,7 @@ func TestMethod(t *testing.T) { app := uint64(2) enr := uint64(1) inr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -450,11 +451,11 @@ func TestMethod(t *testing.T) { nr := uint64(0) epochIndex := uint64(0xdeadbeef) - appID := s.newTestApplication(t, ctx, 0, nr) + appID := s.newTestApplication(ctx, t, 0, nr) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: epochIndex, - ClaimHash: &common.Hash{}, + OutputsMerkleRoot: &common.Hash{}, ClaimTransactionHash: &common.Hash{}, Status: model.EpochStatus_ClaimAccepted, }) @@ -536,7 +537,7 @@ func TestMethod(t *testing.T) { app := uint64(2) enr := uint64(1) inr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -571,7 +572,7 @@ func TestMethod(t *testing.T) { enr := uint64(1) inr := uint64(1) onr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -660,7 +661,7 @@ func TestMethod(t *testing.T) { app := uint64(1) - s.newTestApplication(t, ctx, 0, app) + s.newTestApplication(ctx, t, 0, app) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_getProcessedInputCount", @@ -716,7 +717,7 @@ func TestMethod(t *testing.T) { enr := uint64(1) inr := uint64(1) onr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -802,7 +803,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listApplications", @@ -827,7 +828,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listApplications", @@ -850,7 +851,7 @@ func TestMethod(t *testing.T) { many := uint64(100) limit := uint64(many / 2) for i := range many { - s.newTestApplication(t, ctx, 0, i) + s.newTestApplication(ctx, t, 0, i) } { // offset == 0, descending = false @@ -867,7 +868,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -888,7 +889,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -909,7 +910,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -930,7 +931,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -971,7 +972,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listEpochs", @@ -991,7 +992,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listEpochs", @@ -1017,7 +1018,7 @@ func TestMethod(t *testing.T) { nr := uint64(1) many := uint64(100) limit := uint64(many / 2) - appID := s.newTestApplication(t, ctx, 0, nr) + appID := s.newTestApplication(ctx, t, 0, nr) for i := range many { err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, @@ -1043,7 +1044,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1065,7 +1066,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1087,7 +1088,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1109,7 +1110,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1150,7 +1151,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listInputs", @@ -1198,7 +1199,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listOutputs", @@ -1220,7 +1221,7 @@ func TestMethod(t *testing.T) { app := uint64(3) enr := uint64(1) inr := uint64(1) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -1276,7 +1277,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1298,7 +1299,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1320,7 +1321,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1342,7 +1343,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1383,7 +1384,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listReports", @@ -1405,7 +1406,7 @@ func TestMethod(t *testing.T) { app := uint64(3) enr := uint64(1) inr := uint64(1) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -1459,7 +1460,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1481,7 +1482,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1503,7 +1504,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1525,7 +1526,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1534,6 +1535,21 @@ func TestMethod(t *testing.T) { }) }) + //////////////////////////////////////////////////////////////////////// + // Place holder for new tournament data methods + //////////////////////////////////////////////////////////////////////// + t.Run("cartesi_NEW_METHODS", func(_ *testing.T) { + // TODO: implement proper tests for tournament data methods + testHistogram.inc("cartesi_getTournament") + testHistogram.inc("cartesi_listTournaments") + testHistogram.inc("cartesi_getCommitment") + testHistogram.inc("cartesi_getMatch") + testHistogram.inc("cartesi_listMatchAdvances") + testHistogram.inc("cartesi_listCommitments") + testHistogram.inc("cartesi_listMatches") + testHistogram.inc("cartesi_getMatchAdvanced") + }) + // tested methods, implemented methods and discover methods must match: data, err := discoverSpec.ReadFile("jsonrpc-discover.json") assert.Nil(t, err) @@ -1542,23 +1558,56 @@ func TestMethod(t *testing.T) { err = json.Unmarshal(data, &schema) assert.Nil(t, err) - result := hist{ - "rpc.discover": 1, // +1, because it doesn't show up in the jsonrpc file - } - + allMethods := make(map[string]bool) + tested := make(map[string]bool) for k := range testHistogram { - result.inc(k) + allMethods[k] = true + tested[k] = true } + implemented := make(map[string]bool) for k := range jsonrpcHandlers { - result.inc(k) + allMethods[k] = true + implemented[k] = true } + specified := make(map[string]bool) for _, v := range schema.Methods { - result.inc(v.Name) + allMethods[v.Name] = true + specified[v.Name] = true + } + + // Check each method + var errors []string + for method := range allMethods { + hasTest := tested[method] + hasImpl := implemented[method] + hasSpec := specified[method] + + // All methods must be tested and implemented + // rpc.discover is not discovered (not in schema), others must be + expectedInSpec := method != "rpc.discover" + + var missing []string + if !hasTest { + missing = append(missing, "tests") + } + if !hasImpl { + missing = append(missing, "implementation") + } + if hasSpec != expectedInSpec { + if expectedInSpec { + missing = append(missing, "specification") + } else { + missing = append(missing, "should not be in specification") + } + } + if len(missing) > 0 { + errors = append(errors, fmt.Sprintf("Method %s is missing: %v", method, missing)) + } } - for k, v := range result { - assert.Equal(t, v, 3, "method %v is not: tested && implemented && discovered", k) + if len(errors) > 0 { + t.Errorf("Method coverage issues:\n%s", strings.Join(errors, "\n")) } } diff --git a/internal/jsonrpc/service.go b/internal/jsonrpc/service.go index 4941fb25d..1030f7067 100644 --- a/internal/jsonrpc/service.go +++ b/internal/jsonrpc/service.go @@ -72,8 +72,11 @@ func Create(ctx context.Context, c *CreateInfo) (*Service, error) { mux := http.NewServeMux() mux.HandleFunc("/rpc", s.handleRPC) s.server = &http.Server{ - Addr: c.Config.JsonrpcApiAddress, - Handler: services.CorsMiddleware(mux), // FIXME: add proper cors config + Addr: c.Config.JsonrpcApiAddress, + Handler: services.CorsMiddleware(mux), // FIXME: add proper cors config + WriteTimeout: 30 * time.Second, //nolint: mnd + ReadTimeout: 30 * time.Second, //nolint: mnd + ReadHeaderTimeout: 10 * time.Second, //nolint: mnd } return s, nil @@ -96,9 +99,9 @@ func (s *Service) Tick() []error { return nil } -func (s *Service) Stop(force bool) []error { +func (s *Service) Stop(_ bool) []error { var errs []error - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) //nolint: mnd defer cancel() if err := s.server.Shutdown(ctx); err != nil { errs = append(errs, err) diff --git a/internal/jsonrpc/types.go b/internal/jsonrpc/types.go index e175a4dad..bc0a1da02 100644 --- a/internal/jsonrpc/types.go +++ b/internal/jsonrpc/types.go @@ -231,6 +231,80 @@ type GetReportParams struct { ReportIndex string `json:"report_index"` } +// ListTournamentsParams aligns with the OpenRPC specification +type ListTournamentsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + Level *string `json:"level,omitempty"` + ParentTournamentAddress *string `json:"parent_tournament_address,omitempty"` + ParentMatchIDHash *string `json:"parent_match_id_hash,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetTournamentParams aligns with the OpenRPC specification +type GetTournamentParams struct { + Application string `json:"application"` + Address string `json:"address"` +} + +// ListCommitmentsParams aligns with the OpenRPC specification +type ListCommitmentsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + TournamentAddress *string `json:"tournament_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetCommitmentParams aligns with the OpenRPC specification +type GetCommitmentParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + Commitment string `json:"commitment"` +} + +// ListMatchesParams aligns with the OpenRPC specification +type ListMatchesParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + TournamentAddress *string `json:"tournament_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetMatchParams aligns with the OpenRPC specification +type GetMatchParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` +} + +// ListMatchAdvancesParams aligns with the OpenRPC specification +type ListMatchAdvancesParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index,omitempty"` + TournamentAddress string `json:"tournament_address,omitempty"` + IDHash string `json:"id_hash"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetMatchAdvancedParams aligns with the OpenRPC specification +type GetMatchAdvancedParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` + Parent string `json:"parent"` +} + // ----------------------------------------------------------------------------- // ABI Decoding helpers (provided code) // ----------------------------------------------------------------------------- diff --git a/internal/jsonrpc/util_test.go b/internal/jsonrpc/util_test.go index 241c0a6ef..8bb661f03 100644 --- a/internal/jsonrpc/util_test.go +++ b/internal/jsonrpc/util_test.go @@ -9,13 +9,13 @@ import ( "encoding/json" "fmt" "io" - "log/slog" "math/big" "net/http" "net/http/httptest" "strings" "testing" + "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/pkg/service" @@ -46,7 +46,10 @@ func (x *hex64) MarshalJSON() ([]byte, error) { func (x *hex64) UnmarshalJSON(in []byte) error { var hexString string - json.Unmarshal(in, &hexString) + err := json.Unmarshal(in, &hexString) + if err != nil { + return err + } hexValue, err := model.ParseHexUint64(hexString) if err != nil { return err @@ -77,10 +80,13 @@ func newTestService(t *testing.T, name string) *Service { repo, err := factory.NewRepositoryFromConnectionString(ctx, dbTestEndpoint) assert.Nil(t, err) + logLevel, err := config.GetLogLevel() + assert.Nil(t, err) + ci := CreateInfo{ CreateInfo: service.CreateInfo{ Name: name, - LogLevel: slog.LevelDebug, + LogLevel: logLevel, LogColor: true, }, Repository: repo, @@ -101,13 +107,14 @@ func numberToName(x uint64) string { } // create an application with mostly stub values. -func (s *Service) newTestApplication(t *testing.T, ctx context.Context, test, i uint64) int64 { +func (s *Service) newTestApplication(ctx context.Context, t *testing.T, test, i uint64) int64 { hex := numberToName(i) id, err := s.repository.CreateApplication(ctx, &model.Application{ Name: hex, IApplicationAddress: common.HexToAddress(hex), DataAvailability: []byte{0x00, 0x00, 0x00, 0x00}, State: model.ApplicationState_Enabled, + ConsensusType: model.Consensus_Authority, }, false) assert.Nil(t, err, "on test case: %v, when creating application: %v", test, i) return id @@ -133,12 +140,12 @@ func (s *Service) doRequest(t *testing.T, i uint64, reqData []byte) []byte { // input from ./cartesi-rollups-cli send echo-dapp -y "" func emptyInput() []byte { - raw, _ := hexutil.Decode("0x415bf363000000000000000000000000000000000000000000000000000000000000343a0000000000000000000000002e662c8a1a6c8008482a41ef6d3b333497e7f956000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000068c97fb45a1ab2f3478ee32e84c0a464f70d9da8d470868984ba5f00d9da757bbcee2098000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000") + raw, _ := hexutil.Decode("0x415bf363000000000000000000000000000000000000000000000000000000000000343a0000000000000000000000002e662c8a1a6c8008482a41ef6d3b333497e7f956000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000068c97fb45a1ab2f3478ee32e84c0a464f70d9da8d470868984ba5f00d9da757bbcee2098000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000") //nolint: lll return raw } // output from ./cartesi-rollups-cli send echo-dapp -y "" func emptyVoucher() []byte { - raw, _ := hexutil.Decode("0x237a816f000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000000000000000000000000000000000000deadbeef00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000") + raw, _ := hexutil.Decode("0x237a816f000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000000000000000000000000000000000000deadbeef00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000") //nolint: lll return raw } diff --git a/internal/manager/instance.go b/internal/manager/instance.go index 2fac54d75..f1ad2000a 100644 --- a/internal/manager/instance.go +++ b/internal/manager/instance.go @@ -5,6 +5,7 @@ package manager import ( "context" + "encoding/hex" "errors" "fmt" "log/slog" @@ -142,6 +143,10 @@ func (m *MachineInstanceImpl) Application() *Application { return m.application } +func (m *MachineInstanceImpl) ProcessedInputs() uint64 { + return m.processedInputs +} + // Synchronize brings the machine up to date with processed inputs func (m *MachineInstanceImpl) Synchronize(ctx context.Context, repo MachineRepository) error { appAddress := m.application.IApplicationAddress.String() @@ -175,7 +180,7 @@ func (m *MachineInstanceImpl) Synchronize(ctx context.Context, repo MachineRepos "epoch_index", input.EpochIndex, "input_index", input.Index) - _, err := m.Advance(ctx, input.RawData, input.Index) + _, err := m.Advance(ctx, input.RawData, input.EpochIndex, input.Index, false) if err != nil { return fmt.Errorf("%w: failed to replay input %d: %v", ErrMachineSynchronization, input.Index, err) @@ -205,7 +210,7 @@ func (m *MachineInstanceImpl) forkForAdvance(ctx context.Context, index uint64) } // Advance processes an input and advances the machine state -func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index uint64) (*AdvanceResult, error) { +func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, epochIndex uint64, index uint64, computeHashes bool) (*AdvanceResult, error) { // Only one advance can be active at a time m.advanceMutex.Lock() defer m.advanceMutex.Unlock() @@ -230,12 +235,25 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u return nil, errors.Join(err, fork.Close()) } + prevOutputsHashProof, err := fork.OutputsHashProof(ctx) + if err != nil { + return nil, errors.Join(err, fork.Close()) + } + // Create a timeout context for the advance operation advanceCtx, cancel := context.WithTimeout(ctx, m.advanceTimeout) defer cancel() + if computeHashes { + // write the checkpoint hash before processing + err = fork.WriteCheckpointHash(advanceCtx, prevMachineHash) + if err != nil { + return nil, errors.Join(err, fork.Close()) + } + } + // Process the input - accepted, outputs, reports, outputsHash, err := fork.Advance(advanceCtx, input) + accepted, outputs, reports, hashes, remaining, outputsHash, err := fork.Advance(advanceCtx, input, computeHashes) status, err := toInputStatus(accepted, err) if err != nil { return nil, errors.Join(err, fork.Close()) @@ -243,21 +261,28 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u // Create the result result := &AdvanceResult{ - InputIndex: index, - Status: status, - Outputs: outputs, - Reports: reports, - OutputsHash: outputsHash, + EpochIndex: epochIndex, + InputIndex: index, + Status: status, + Outputs: outputs, + Reports: reports, + Hashes: hashes, + RemainingMetaCycles: remaining, + IsDaveConsensus: computeHashes, } // If the input was accepted, update the machine state if result.Status == InputCompletionStatus_Accepted { // Get the machine hash after processing - machineHash, err := fork.Hash(ctx) + result.MachineHash, err = fork.Hash(ctx) + if err != nil { + return nil, errors.Join(err, fork.Close()) + } + result.OutputsHash = outputsHash + result.OutputsHashProof, err = fork.OutputsHashProof(ctx) if err != nil { return nil, errors.Join(err, fork.Close()) } - result.MachineHash = (*common.Hash)(&machineHash) // Replace the current machine with the fork m.mutex.HLock() @@ -270,8 +295,9 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u m.mutex.Unlock() } else { // Use the previous state for rejected inputs - result.MachineHash = (*common.Hash)(&prevMachineHash) + result.MachineHash = prevMachineHash result.OutputsHash = prevOutputsHash + result.OutputsHashProof = prevOutputsHashProof // Close the fork since we're not using it err = fork.Close() @@ -384,6 +410,79 @@ func (m *MachineInstanceImpl) CreateSnapshot(ctx context.Context, processedInput return nil } +func (m *MachineInstanceImpl) Hash(ctx context.Context) ([32]byte, error) { + // Acquire the advance mutex to ensure no advance operations are in progress + m.advanceMutex.Lock() + defer m.advanceMutex.Unlock() + + // Acquire a read lock on the machine + m.mutex.LLock() + defer m.mutex.Unlock() + + if m.runtime == nil { + return [32]byte{}, ErrMachineClosed + } + + m.logger.Debug("Retrieving machine root hash") + + storeCtx, cancel := context.WithTimeout(ctx, m.application.ExecutionParameters.LoadDeadline) + defer cancel() + + hash, err := m.runtime.Hash(storeCtx) + if err != nil { + m.logger.Error("Failed to retrieve machine root hash", "error", err) + return [32]byte{}, err + } + + m.logger.Debug("Machine root hash retrieved successfully", "hash", "0x"+hex.EncodeToString(hash[:])) + return hash, nil +} + +func (m *MachineInstanceImpl) OutputsProof(ctx context.Context, processedInputs uint64) (*OutputsProof, error) { + // Acquire the advance mutex to ensure no advance operations are in progress + m.advanceMutex.Lock() + defer m.advanceMutex.Unlock() + + // Acquire a read lock on the machine + m.mutex.LLock() + defer m.mutex.Unlock() + + if m.runtime == nil { + return nil, ErrMachineClosed + } + + m.logger.Debug("Retrieving machine hash, outputs merkle root and outputs merkle proof") + + proofCtx, cancel := context.WithTimeout(ctx, m.application.ExecutionParameters.LoadDeadline) + defer cancel() + + // Get the machine state before processing + machineHash, err := m.runtime.Hash(proofCtx) + if err != nil { + return nil, errors.Join(err, m.runtime.Close()) + } + + outputsHash, err := m.runtime.OutputsHash(proofCtx) + if err != nil { + return nil, errors.Join(err, m.runtime.Close()) + } + + outputsHashProof, err := m.runtime.OutputsHashProof(proofCtx) + if err != nil { + return nil, errors.Join(err, m.runtime.Close()) + } + + proof := &OutputsProof{ + MachineHash: machineHash, + OutputsHash: outputsHash, + OutputsHashProof: outputsHashProof, + } + + m.logger.Debug("Machine machine hash, outputs merkle root and outputs merkle proof retrieved successfully", + "hash", "0x"+hex.EncodeToString(machineHash[:])) + return proof, nil +} + // Close shuts down the machine instance func (m *MachineInstanceImpl) Close() error { // Acquire all locks to ensure no operations are in progress diff --git a/internal/manager/instance_test.go b/internal/manager/instance_test.go index 169763820..82f27640b 100644 --- a/internal/manager/instance_test.go +++ b/internal/manager/instance_test.go @@ -33,10 +33,10 @@ type MockMachineRuntimeFactory struct { } func (f *MockMachineRuntimeFactory) CreateMachineRuntime( - ctx context.Context, - app *model.Application, - logger *slog.Logger, - checkHash bool, + _ context.Context, + _ *model.Application, + _ *slog.Logger, + _ bool, ) (machine.Machine, error) { return f.RuntimeToReturn, f.ErrorToReturn } @@ -210,7 +210,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require := s.Require() _, fork, machine := s.setupAdvance() - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Nil(err) require.NotNil(res) @@ -219,7 +219,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require.Equal(expectedOutputs, res.Outputs) require.Equal(expectedReports1, res.Reports) require.Equal(newHash(1), res.OutputsHash) - require.Equal(newHash(2), *res.MachineHash) + require.Equal(newHash(2), res.MachineHash) require.Equal(uint64(6), machine.processedInputs) }) @@ -229,7 +229,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceAcceptedReturn = false fork.CloseError = nil - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Nil(err) require.NotNil(res) @@ -238,7 +238,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require.Equal(expectedOutputs, res.Outputs) require.Equal(expectedReports1, res.Reports) require.Equal(newHash(1), res.OutputsHash) - require.Equal(newHash(2), *res.MachineHash) + require.Equal(newHash(2), res.MachineHash) require.Equal(uint64(6), machine.processedInputs) }) @@ -249,7 +249,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceError = err fork.CloseError, inner.CloseError = inner.CloseError, fork.CloseError - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Nil(err) require.NotNil(res) @@ -257,7 +257,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require.Equal(expectedOutputs, res.Outputs) require.Equal(expectedReports1, res.Reports) require.Equal(newHash(1), res.OutputsHash) - require.Equal(newHash(2), *res.MachineHash) + require.Equal(newHash(2), res.MachineHash) require.Equal(uint64(6), machine.processedInputs) }) } @@ -294,7 +294,7 @@ func (s *MachineInstanceSuite) TestAdvance() { errFork := errors.New("Fork error") inner.ForkError = errFork - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.Equal(errFork, err) @@ -308,7 +308,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceError = errAdvance fork.CloseError, inner.CloseError = inner.CloseError, fork.CloseError - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errAdvance) @@ -325,7 +325,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.CloseError = errClose inner.CloseError = nil - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errAdvance) @@ -341,7 +341,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.HashError = errHash fork.CloseError, inner.CloseError = inner.CloseError, fork.CloseError - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errHash) @@ -358,7 +358,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.CloseError = errClose inner.CloseError = nil - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errHash) @@ -374,7 +374,7 @@ func (s *MachineInstanceSuite) TestAdvance() { errClose := errors.New("Close error") inner.CloseError = errClose - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errClose) @@ -389,7 +389,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceError = machine.ErrException fork.CloseError = errClose - res, err := machineInst.Advance(context.Background(), []byte{}, 5) + res, err := machineInst.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.NotNil(res) require.ErrorIs(err, errClose) @@ -601,7 +601,7 @@ func (s *MachineInstanceSuite) TestClose() { time.Sleep(centisecond / 2) // This should block until Close is done - _, err := machine.Advance(context.Background(), []byte{}, 5) + _, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Equal(ErrMachineClosed, err) }() @@ -622,37 +622,6 @@ func (s *MachineInstanceSuite) TestClose() { // ------------------------------------------------------------------------------------------------ -// MockMachineInstance implements the MachineInstance interface for testing -type MockMachineInstance struct { - application *model.Application -} - -func (m *MockMachineInstance) Application() *model.Application { - return m.application -} - -func (m *MockMachineInstance) Advance(ctx context.Context, input []byte, index uint64) (*model.AdvanceResult, error) { - return nil, nil -} - -func (m *MockMachineInstance) Inspect(ctx context.Context, query []byte) (*model.InspectResult, error) { - return nil, nil -} - -func (m *MockMachineInstance) Synchronize(ctx context.Context, repo MachineRepository) error { - return nil -} - -func (m *MockMachineInstance) CreateSnapshot(ctx context.Context, processedInputs uint64, path string) error { - return nil -} - -func (m *MockMachineInstance) Close() error { - return nil -} - -// ------------------------------------------------------------------------------------------------ - var ( errUnreachable = errors.New("unreachable") expectedOutputs = []machine.Output{ @@ -708,7 +677,7 @@ func (s *MachineInstanceSuite) setupAdvance() (*MockRollupsMachine, *MockRollups newBytes(21, 200), newBytes(22, 200), } - fork.AdvanceHashReturn = newHash(1) + fork.OutputsHashReturn = newHash(1) fork.AdvanceError = nil fork.HashReturn = newHash(2) @@ -803,11 +772,18 @@ type MockRollupsMachine struct { HashReturn machine.Hash HashError error - AdvanceAcceptedReturn bool - AdvanceOutputsReturn []machine.Output - AdvanceReportsReturn []machine.Report - AdvanceHashReturn machine.Hash - AdvanceError error + CheckpointHashError error + + AdvanceAcceptedReturn bool + AdvanceOutputsReturn []machine.Output + AdvanceReportsReturn []machine.Report + AdvanceLeafsReturn []machine.Hash + AdvanceRemainingReturn uint64 + OutputsHashReturn machine.Hash + OutputsHashError error + OutputsHashProofReturn []machine.Hash + OutputsHashProofError error + AdvanceError error InspectAcceptedReturn bool InspectReportsReturn []machine.Report @@ -827,22 +803,30 @@ func (m *MockRollupsMachine) Hash(_ context.Context) (machine.Hash, error) { } func (m *MockRollupsMachine) OutputsHash(_ context.Context) (machine.Hash, error) { - return m.AdvanceHashReturn, m.HashError + return m.OutputsHashReturn, m.HashError +} + +func (m *MockRollupsMachine) OutputsHashProof(_ context.Context) ([]machine.Hash, error) { + return m.OutputsHashProofReturn, m.OutputsHashProofError +} + +func (m *MockRollupsMachine) WriteCheckpointHash(_ context.Context, _ machine.Hash) error { + return m.CheckpointHashError } -func (m *MockRollupsMachine) Advance(_ context.Context, input []byte) ( - bool, []machine.Output, []machine.Report, machine.Hash, error, +func (m *MockRollupsMachine) Advance(_ context.Context, _ []byte, _ bool) ( + bool, []machine.Output, []machine.Report, []machine.Hash, uint64, machine.Hash, error, ) { return m.AdvanceAcceptedReturn, m.AdvanceOutputsReturn, m.AdvanceReportsReturn, - m.AdvanceHashReturn, + m.AdvanceLeafsReturn, + m.AdvanceRemainingReturn, + m.OutputsHashReturn, m.AdvanceError } -func (m *MockRollupsMachine) Inspect(_ context.Context, - query []byte, -) (bool, []machine.Report, error) { +func (m *MockRollupsMachine) Inspect(_ context.Context, _ []byte) (bool, []machine.Report, error) { return m.InspectAcceptedReturn, m.InspectReportsReturn, m.InspectError } diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 5d17a2d23..29379a8cf 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -132,7 +132,7 @@ func (m *MachineManager) UpdateMachines(ctx context.Context) error { "epoch_index", input.EpochIndex, "input_index", input.Index) - _, err := instance.Advance(ctx, input.RawData, input.Index) + _, err := instance.Advance(ctx, input.RawData, input.EpochIndex, input.Index, false) if err != nil { m.logger.Error("Failed to replay input after snapshot", "application", app.Name, diff --git a/internal/manager/manager_test.go b/internal/manager/manager_test.go index a016fb9a7..f5ef13e3e 100644 --- a/internal/manager/manager_test.go +++ b/internal/manager/manager_test.go @@ -103,9 +103,9 @@ func (s *MachineManagerSuite) TestUpdateMachines() { app2 := &model.Application{ID: 2, Name: "App2"} app3 := &model.Application{ID: 3, Name: "App3"} - mockMachine1 := &MockMachineInstance{application: app1} - mockMachine2 := &MockMachineInstance{application: app2} - mockMachine3 := &MockMachineInstance{application: app3} + mockMachine1 := &DummyMachineInstanceMock{application: app1} + mockMachine2 := &DummyMachineInstanceMock{application: app2} + mockMachine3 := &DummyMachineInstanceMock{application: app3} manager.addMachine(1, mockMachine1) manager.addMachine(2, mockMachine2) @@ -130,7 +130,7 @@ func (s *MachineManagerSuite) TestGetMachine() { Return(nil, nil) manager := NewMachineManager(context.Background(), repo, nil, false) - machine := &MockMachineInstance{application: &model.Application{ID: 1}} + machine := &DummyMachineInstanceMock{application: &model.Application{ID: 1}} // Add a machine manager.addMachine(1, machine) @@ -153,7 +153,7 @@ func (s *MachineManagerSuite) TestHasMachine() { Return(nil, nil) manager := NewMachineManager(context.Background(), repo, nil, false) - machine := &MockMachineInstance{application: &model.Application{ID: 1}} + machine := &DummyMachineInstanceMock{application: &model.Application{ID: 1}} // Add a machine manager.addMachine(1, machine) @@ -173,8 +173,8 @@ func (s *MachineManagerSuite) TestAddMachine() { Return(nil, nil) manager := NewMachineManager(context.Background(), repo, nil, false) - machine1 := &MockMachineInstance{application: &model.Application{ID: 1}} - machine2 := &MockMachineInstance{application: &model.Application{ID: 2}} + machine1 := &DummyMachineInstanceMock{application: &model.Application{ID: 1}} + machine2 := &DummyMachineInstanceMock{application: &model.Application{ID: 2}} // Add first machine added := manager.addMachine(1, machine1) @@ -202,9 +202,9 @@ func (s *MachineManagerSuite) TestRemoveDisabledMachines() { app2 := &model.Application{ID: 2} app3 := &model.Application{ID: 3} - machine1 := &MockMachineInstance{application: app1} - machine2 := &MockMachineInstance{application: app2} - machine3 := &MockMachineInstance{application: app3} + machine1 := &DummyMachineInstanceMock{application: app1} + machine2 := &DummyMachineInstanceMock{application: app2} + machine3 := &DummyMachineInstanceMock{application: app3} manager.addMachine(1, machine1) manager.addMachine(2, machine2) @@ -233,8 +233,8 @@ func (s *MachineManagerSuite) TestApplications() { app1 := &model.Application{ID: 1, Name: "App1"} app2 := &model.Application{ID: 2, Name: "App2"} - machine1 := &MockMachineInstance{application: app1} - machine2 := &MockMachineInstance{application: app2} + machine1 := &DummyMachineInstanceMock{application: app1} + machine2 := &DummyMachineInstanceMock{application: app2} manager.addMachine(1, machine1) manager.addMachine(2, machine2) @@ -290,3 +290,46 @@ func (m *MockMachineRepository) GetLastSnapshot( } return args.Get(0).(*model.Input), args.Error(1) } + +// ------------------------------------------------------------------------------------------------ + +// DummyMachineInstanceMock implements the MachineInstance interface for testing +type DummyMachineInstanceMock struct { + application *model.Application +} + +func (m *DummyMachineInstanceMock) Application() *model.Application { + return m.application +} + +func (m *DummyMachineInstanceMock) ProcessedInputs() uint64 { + return 0 +} + +func (m *DummyMachineInstanceMock) OutputsProof(ctx context.Context, processedInputs uint64) (*model.OutputsProof, error) { + return nil, nil +} + +func (m *DummyMachineInstanceMock) Advance(_ context.Context, _ []byte, _ uint64, _ uint64, _ bool) (*model.AdvanceResult, error) { + return nil, nil +} + +func (m *DummyMachineInstanceMock) Inspect(_ context.Context, _ []byte) (*model.InspectResult, error) { + return nil, nil +} + +func (m *DummyMachineInstanceMock) Synchronize(_ context.Context, _ MachineRepository) error { + return nil +} + +func (m *DummyMachineInstanceMock) CreateSnapshot(_ context.Context, _ uint64, _ string) error { + return nil +} + +func (m *DummyMachineInstanceMock) Hash(_ context.Context) ([32]byte, error) { + return [32]byte{}, nil +} + +func (m *DummyMachineInstanceMock) Close() error { + return nil +} diff --git a/internal/manager/types.go b/internal/manager/types.go index c6d71bdec..a6ad9a0b7 100644 --- a/internal/manager/types.go +++ b/internal/manager/types.go @@ -12,10 +12,13 @@ import ( // MachineInstance defines the interface for a machine instance type MachineInstance interface { Application() *Application - Advance(ctx context.Context, input []byte, index uint64) (*AdvanceResult, error) + Advance(ctx context.Context, input []byte, epochIndex uint64, inputIndex uint64, computeHashes bool) (*AdvanceResult, error) Inspect(ctx context.Context, query []byte) (*InspectResult, error) Synchronize(ctx context.Context, repo MachineRepository) error CreateSnapshot(ctx context.Context, processedInputs uint64, path string) error + ProcessedInputs() uint64 + Hash(ctx context.Context) ([32]byte, error) + OutputsProof(ctx context.Context, processedInputs uint64) (*OutputsProof, error) Close() error } diff --git a/internal/merkle/builder.go b/internal/merkle/builder.go new file mode 100644 index 000000000..9f903a9e8 --- /dev/null +++ b/internal/merkle/builder.go @@ -0,0 +1,442 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package merkle + +import ( + "errors" + "fmt" + "math/big" + "slices" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + zero = big.NewInt(0) + one = big.NewInt(1) + overflowValue = new(big.Int).Lsh(one, 256) + overflowMask = new(big.Int).Sub(overflowValue, one) +) + +// MerkleProof: dave/common-rs/merkle/src/tree.rs +type Proof struct { + Pos *big.Int + Node common.Hash + Siblings []common.Hash +} + +func Leaf(node common.Hash, pos *big.Int) *Proof { + return &Proof{ + Node: node, + Pos: pos, + Siblings: nil, + } +} + +func (proof *Proof) BuildRoot() common.Hash { + two := big.NewInt(2) + rootHash := proof.Node + + for i, s := range proof.Siblings { + + // ((pos >> i) % 2) == 0 + if new(big.Int).Rem(new(big.Int).Rsh(proof.Pos, uint(i)), two).Cmp(zero) == 0 { + rootHash = crypto.Keccak256Hash(rootHash[:], s[:]) + } else { + rootHash = crypto.Keccak256Hash(s[:], rootHash[:]) + } + } + return rootHash +} + +func (proof *Proof) BuildRootChildren() (common.Hash, common.Hash, error) { + if len(proof.Siblings) == 0 { + zero := common.Hash{} + return zero, zero, errors.New("Siblings array is empty") + } + two := big.NewInt(2) + height := len(proof.Siblings) + childHash := proof.Node + + for i, s := range proof.Siblings[:height-1] { + + // ((pos >> i) % 2) == 0 + if new(big.Int).Rem(new(big.Int).Rsh(proof.Pos, uint(i)), two).Cmp(zero) == 0 { + childHash = crypto.Keccak256Hash(childHash[:], s[:]) + } else { + childHash = crypto.Keccak256Hash(s[:], childHash[:]) + } + } + + // ((pos >> (height-1)) % 2) == 0 + if new(big.Int).Rem(new(big.Int).Rsh(proof.Pos, uint(height-1)), two).Cmp(zero) == 0 { + return childHash, proof.Siblings[height-1], nil + } else { + return proof.Siblings[height-1], childHash, nil + } +} + +func (proof *Proof) VerifyRoot(other common.Hash) bool { + return proof.BuildRoot() == other +} + +func (proof *Proof) PushHash(h common.Hash) { + proof.Siblings = append(proof.Siblings, h) +} + +func RootChildrenFromProof(leaf common.Hash, siblings []common.Hash, index uint64) (common.Hash, common.Hash, error) { + p := &Proof{ + Pos: new(big.Int).SetUint64(index), + Node: leaf, + Siblings: siblings, + } + return p.BuildRootChildren() +} + +//////////////////////////////////////////////////////////////////////////////// + +// MerkleTree: dave/common-rs/merkle/src/tree.rs +type Tree struct { + RootHash common.Hash + Height uint32 + Subtrees *InnerNode +} + +// InnerNode: dave/common-rs/merkle/src/tree.rs +// Emulate the rust enum type with a struct containing both {Pair, Iterated}. +type InnerNode struct { + // Pair + LHS, RHS *Tree + + // Iterated + Child *Tree +} + +func (inner *InnerNode) Valid() bool { + isPair := (inner.LHS != nil && inner.RHS != nil) + isIterated := inner.Child != nil + return (isPair || isIterated) && !(isPair && isIterated) // xor +} + +func (inner *InnerNode) Children() (*Tree, *Tree) { + if !inner.Valid() { + panic(fmt.Sprintf("invalid InnerNode state: %v\n", inner)) + } + + if inner.Child != nil { + return inner.Child, inner.Child + } else { + return inner.LHS, inner.RHS + } +} + +func TreeLeaf(hash common.Hash) *Tree { + return &Tree{ + Height: 0, + RootHash: hash, + Subtrees: nil, + } +} + +func (tree *Tree) GetRootHash() common.Hash { + return tree.RootHash +} + +func (tree *Tree) FindChildByHash(hash common.Hash) *InnerNode { + if inner := tree.Subtrees; inner != nil { + if !inner.Valid() { + panic(fmt.Sprintf("invalid InnerNode state: %v\n", inner)) + } + + if inner.Child != nil { + child := inner.Child.FindChildByHash(hash) + if child != nil { + return child + } + } else { + lhs := inner.LHS.FindChildByHash(hash) + if lhs != nil { + return lhs + } + + rhs := inner.LHS.FindChildByHash(hash) + if rhs != nil { + return rhs + } + } + } + return nil // not found +} + +func (tree *Tree) Join(other *Tree) *Tree { + return &Tree{ + RootHash: crypto.Keccak256Hash(tree.RootHash[:], other.RootHash[:]), + Height: tree.Height + 1, + Subtrees: &InnerNode{ + LHS: tree, + RHS: other, + }, + } +} + +func (tree *Tree) Iterated(rep uint64) *Tree { + root := tree + for range rep { + root = &Tree{ + RootHash: crypto.Keccak256Hash(root.RootHash[:], root.RootHash[:]), + Height: root.Height + 1, + Subtrees: &InnerNode{ + Child: root, + }, + } + } + return root +} + +func (tree *Tree) ProveLeaf(index *big.Int) *Proof { + return tree.ProveLeafRec(index) +} + +func (tree *Tree) ProveLast() *Proof { + // index = (1 << height) - 1 + index := new(big.Int).Sub( + new(big.Int).Lsh( + one, + uint(tree.Height), + ), + one, + ) + return tree.ProveLeaf(index) +} + +func (tree *Tree) ProveLeafRec(index *big.Int) *Proof { + numLeafs := new(big.Int).Lsh(one, uint(tree.Height)) + if numLeafs.Cmp(index) <= 0 { + panic(fmt.Sprintf("index out of bounds: %v, %v", numLeafs, index)) + } + + subtree := tree.Subtrees + if subtree == nil { + if index.Cmp(zero) != 0 { + panic(fmt.Sprintf("invalid Tree state: %v", tree)) + } + if tree.Height != 0 { + panic(fmt.Sprintf("invalid Tree state: %v", tree)) + } + return Leaf(tree.RootHash, index) + } + + shiftAmount := uint(tree.Height - 1) + isLeftLeaf := new(big.Int).Rsh(index, shiftAmount).Cmp(zero) == 0 + + // innerIndex = index & !(1 << shiftAmount) + innerIndex := new(big.Int).And( + index, + new(big.Int).Not( + new(big.Int).Lsh( + one, + shiftAmount, + ), + ), + ) + + lhs, rhs := subtree.Children() + if isLeftLeaf { + proof := lhs.ProveLeafRec(innerIndex) + proof.PushHash(rhs.RootHash) + proof.Pos = index + return proof + } else { + proof := rhs.ProveLeafRec(innerIndex) + proof.PushHash(lhs.RootHash) + proof.Pos = index + return proof + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Node: common-rs/merkle/src/tree_builder.rs +type Node struct { + Tree *Tree + AccumulatedCount *big.Int +} + +type Builder struct { + Trees []Node +} + +func (b *Builder) Height() (uint32, bool) { + n := len(b.Trees) + if n == 0 { + return 0, false + } + return b.Trees[n-1].Tree.Height, true +} + +func (b *Builder) Count() (*big.Int, bool) { + n := len(b.Trees) + if n == 0 { + return nil, false + } + return b.Trees[n-1].AccumulatedCount, true +} + +func (b *Builder) CanBuild() bool { + n := len(b.Trees) + if n == 0 { + return false + } + return isPow2(b.Trees[n-1].AccumulatedCount) +} + +func (b *Builder) Append(leaf *Tree) { + b.AppendRepeated(leaf, big.NewInt(1)) +} + +func (b *Builder) AppendRepeatedUint64(leaf *Tree, reps uint64) { + b.AppendRepeated(leaf, new(big.Int).SetUint64(reps)) +} + +func (b *Builder) AppendRepeated(leaf *Tree, reps *big.Int) { + if reps.Cmp(zero) <= 0 { + panic("invalid repetitions") + } + + accumulatedCount := b.CalculateAccumulatedCount(reps) + if height, ok := b.Height(); ok { + if height != leaf.Height { + panic("mismatched tree size") + } + } + b.Trees = append(b.Trees, Node{ + Tree: leaf, + AccumulatedCount: accumulatedCount, + }) +} + +func (b *Builder) Build() *Tree { + if count, ok := b.Count(); ok { + if !isCountPow2(count) { + panic(fmt.Sprintf("builder has %v leafs, which is not a power of two", count)) + } + log2Size := countTrailingZeroes(count) + return buildMerkle(b.Trees, log2Size, big.NewInt(0)) + } else { + panic("no leafs in the merkle builder") + } +} + +func (b *Builder) CalculateAccumulatedCount(reps *big.Int) *big.Int { + n := len(b.Trees) + if n != 0 { + if reps.Cmp(zero) == 0 { + panic("merkle builder is full") + } + + accumulatedCount := new(big.Int).And( + new(big.Int).Add(reps, b.Trees[n-1].AccumulatedCount), + overflowMask, + ) + if reps.Cmp(accumulatedCount) >= 0 { + panic("merkle tree overflow") + } + return accumulatedCount + } else { + return reps + } +} + +func buildMerkle(trees []Node, log2Size uint, stride *big.Int) *Tree { + size := new(big.Int).And( + new(big.Int).Lsh(one, log2Size), + overflowMask, + ) + + firstTime := new(big.Int).Add(new(big.Int).Mul(stride, size), one) + lastTime := new(big.Int).Mul(new(big.Int).Add(stride, one), size) + + firstCell := findCellContaining(trees, firstTime) + lastCell := findCellContaining(trees, lastTime) + + if firstCell == lastCell { + tree := trees[firstCell].Tree + iterated := tree.Iterated(uint64(log2Size)) + return iterated + } + + left := buildMerkle(trees[firstCell:(lastCell+1)], + log2Size-1, + new(big.Int).Lsh(stride, 1), + ) + + right := buildMerkle(trees[firstCell:(lastCell+1)], + log2Size-1, + new(big.Int).Add(new(big.Int).Lsh(stride, 1), one), + ) + + return left.Join(right) +} + +func findCellContaining(trees []Node, elem *big.Int) uint { + left := uint(0) + right := uint(len(trees) - 1) + + for left < right { + needle := left + (right-left)/2 + + x := new(big.Int).And( + new(big.Int).Sub(trees[needle].AccumulatedCount, one), + overflowMask, + ) + y := new(big.Int).And( + new(big.Int).Sub(elem, one), + overflowMask, + ) + if x.Cmp(y) < 0 { + left = needle + 1 + } else { + right = needle + } + } + return left +} + +//////////////////////////////////////////////////////////////////////////////// + +func isPow2(x *big.Int) bool { + if x.Sign() <= 0 { + return false + } + + // x & (x-1) == 0 + return new(big.Int).And( + x, + new(big.Int).Sub( + x, + one, + ), + ).Cmp(zero) == 0 +} + +func isCountPow2(x *big.Int) bool { + return x.Cmp(big.NewInt(0)) == 0 || isPow2(x) +} + +func countTrailingZeroes(x *big.Int) uint { + count := uint(0) + + // each byte from least to most significant +brk: + for _, b := range slices.Backward(x.Bytes()) { + for i := range 8 { + if b>>i&1 != 0 { + break brk + } + count++ + } + } + return count +} diff --git a/internal/merkle/builder_test.go b/internal/merkle/builder_test.go new file mode 100644 index 000000000..18a5d1d33 --- /dev/null +++ b/internal/merkle/builder_test.go @@ -0,0 +1,254 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package merkle + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" +) + +var ( + oneDigest = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") + zeroDigest = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") +) + +func TestIsCountPow2(t *testing.T) { + assert.True(t, isCountPow2(big.NewInt(0))) + assert.True(t, isCountPow2(big.NewInt(1))) + assert.True(t, isCountPow2(big.NewInt(2))) + assert.False(t, isCountPow2(big.NewInt(3))) + assert.True(t, isCountPow2(big.NewInt(4))) + assert.False(t, isCountPow2(big.NewInt(5))) +} + +// repanicked +//func TestRepeatZero(t *testing.T) { +// defer recover() +// +// builder := Builder{} +// builder.AppendRepeatedUint64(TreeLeaf(zeroHash), 0) +//} + +func TestSimple0(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(oneDigest)) + treeRoot := builder.Build().RootHash + expected := oneDigest + + assert.Equal(t, expected, treeRoot) +} + +func TestSimple1(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(zeroDigest)) + builder.Append(TreeLeaf(oneDigest)) + treeRoot := builder.Build().RootHash + + expected := TreeLeaf(zeroDigest).Join(TreeLeaf(oneDigest)).RootHash + + assert.Equal(t, expected, treeRoot) +} + +func TestSimple2(t *testing.T) { + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(oneDigest), 2) + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 2) + treeRoot := builder.Build().RootHash + + lhs := TreeLeaf(oneDigest).Join(TreeLeaf(oneDigest)) + rhs := TreeLeaf(zeroDigest).Join(TreeLeaf(zeroDigest)) + expected := lhs.Join(rhs).RootHash + + assert.Equal(t, expected, treeRoot) +} + +func TestSimple3(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(zeroDigest)) + builder.AppendRepeatedUint64(TreeLeaf(oneDigest), 2) + builder.Append(TreeLeaf(zeroDigest)) + treeRoot := builder.Build().RootHash + + lhs := TreeLeaf(zeroDigest).Join(TreeLeaf(oneDigest)) + rhs := TreeLeaf(oneDigest).Join(TreeLeaf(zeroDigest)) + expected := lhs.Join(rhs).RootHash + + assert.Equal(t, expected, treeRoot) +} + +func TestMerkleBuilder8(t *testing.T) { + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 2) + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 6) + assert.True(t, builder.CanBuild()) + + merkle := builder.Build() + assert.Equal(t, merkle.RootHash, TreeLeaf(zeroDigest).Iterated(3).RootHash) +} + +func TestMerkleBuilder64(t *testing.T) { + one := big.NewInt(1) + two := big.NewInt(2) + reps := new(big.Int).Sub(new(big.Int).Lsh(one, 64), two) + + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 2) + builder.AppendRepeated(TreeLeaf(zeroDigest), reps) + assert.True(t, builder.CanBuild()) + + merkle := builder.Build() + assert.Equal(t, merkle.RootHash, TreeLeaf(zeroDigest).Iterated(64).RootHash) +} + +func TestMerkleBuilder256(t *testing.T) { + one := big.NewInt(1) + reps := new(big.Int).Lsh(one, 256) + + builder := Builder{} + builder.AppendRepeated(TreeLeaf(zeroDigest), reps) + assert.True(t, builder.CanBuild()) + + merkle := builder.Build() + assert.Equal(t, merkle.RootHash, TreeLeaf(zeroDigest).Iterated(256).RootHash) +} + +func TestAppendAndRepeated(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(zeroDigest)) + assert.True(t, builder.CanBuild()) + tree1 := builder.Build() + + builder = Builder{} + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 1) + tree2 := builder.Build() + + assert.Equal(t, tree1, tree2) +} + +func TestBuildRootChildren1(t *testing.T) { + p := Proof{ + Pos: big.NewInt(1), + Node: common.HexToHash("0x01"), + Siblings: []common.Hash{ + common.HexToHash("0x02"), + }, + } + rootHash := p.BuildRoot() + lhs, rhs, err := p.BuildRootChildren() + + assert.Nil(t, err) + assert.Equal(t, rootHash, crypto.Keccak256Hash(lhs[:], rhs[:])) +} + +func TestBuildRootChildren2(t *testing.T) { + p := Proof{ + Pos: big.NewInt(1), + Node: common.HexToHash("0x01"), + Siblings: []common.Hash{ + common.HexToHash("0x02"), + common.HexToHash("0x03"), + }, + } + rootHash := p.BuildRoot() + lhs, rhs, err := p.BuildRootChildren() + + assert.Nil(t, err) + assert.Equal(t, rootHash, crypto.Keccak256Hash(lhs[:], rhs[:])) +} + +func TestBuildRootChildrenAgainstBuilder(t *testing.T) { + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(common.HexToHash("0x976dc34e226f0c9803d556f26426aaa82ba7b5f96a5ed094f4f150c3c27aeaf5")), 16777216) + builder.AppendRepeatedUint64(TreeLeaf(common.HexToHash("0xfffeb0e2d6fc065fdcf03c25e23e9730528ca7b890308765b0e6b07586db9c6e")), 16777216) + builder.AppendRepeatedUint64(TreeLeaf(common.HexToHash("0x1588d343bd73f167bf4886b8ab7694b4d83b60087ddbdb445c427c16f26d2644")), 16777216) + builder.AppendRepeatedUint64(TreeLeaf(common.HexToHash("0x1588d343bd73f167bf4886b8ab7694b4d83b60087ddbdb445c427c16f26d2644")), 281474926379008) + + builderTree := builder.Build() + proofBuilder := builderTree.ProveLast() + + rootHashBuilder := proofBuilder.BuildRoot() + lhsBuilder, rhsBuilder, err := proofBuilder.BuildRootChildren() + + proofSiblings := Proof{ + Pos: new(big.Int).SetUint64((1 << 48) - 1), + Node: common.HexToHash("0x1588d343bd73f167bf4886b8ab7694b4d83b60087ddbdb445c427c16f26d2644"), + Siblings: []common.Hash{ + common.HexToHash("0x1588d343bd73f167bf4886b8ab7694b4d83b60087ddbdb445c427c16f26d2644"), + common.HexToHash("0x6e94fed4ae1c88a9e09a36ad7b6a4e1cf16d3b9ca3af8cd2d7bb069451690e64"), + common.HexToHash("0x546d5eed29c39ab4194c600a8534e3d3502478384eb3dc9add095703e422ed38"), + common.HexToHash("0x38caf4f802a07958ed558995bab538a137d33b6172294362e6b21ac7b7121fd9"), + common.HexToHash("0xb5daca74fbcfb5af2f8e3c38dc8ad05ec83114b7bba846b3330950266aa40eab"), + common.HexToHash("0x020b890661da486ee8869b2b6109f4bd795f8bf4bbb88a8d3589a0603f3b824a"), + common.HexToHash("0xc2c4c83bec36615b797be1407a8e55b08b5cfbeb11dfe6a1db7f942c58d379ed"), + common.HexToHash("0x1998d07cf9e1006cc3b0f3c5fe08dda1119e471c20f7ceb29695cee940a7e75f"), + common.HexToHash("0x443631cea815fc6298f029109fe1fda376d3bb185f8e21544829b4b09c947f9c"), + common.HexToHash("0x159ad50b10bba4d6d2407ce012199523e3f45f2360ba288618c63bf8b91a56ab"), + common.HexToHash("0x75e8815dec610854174bee1c387a04b78ac942f9f675c7ddce7ec3f6edd69e03"), + common.HexToHash("0xca9bd9e206bd4e98e4443663dd4eef65c94cf02fd62abd30e36569c56191e9d2"), + common.HexToHash("0xed97ed05a833b3f0615df43ee27fcc8dc742ccbf84702719747be4f93afce440"), + common.HexToHash("0x228fddde650b389a339dab2088d3e1c9858989a1eecf1a424d0f4e1e70284047"), + common.HexToHash("0xb1b6565d519dc4b85d75d074a37a56186f27b240977d2a2ec0e6860217745316"), + common.HexToHash("0x32f63e77fd8f3762e6ae0efc82b62e0bd99430e656afde91792f4eb3c0eb2d0b"), + common.HexToHash("0xe50e2d01372de278e92c28a75277bbd83c747b89ef9933cb55c41d4c25f4e043"), + common.HexToHash("0x3b34305f34e7749600e503c11ad3bd739f3981a16470db39f5d9b324d0c9c2b3"), + common.HexToHash("0xc62e8ec325b8c50fc32e54b456943f7e191181b38e8decae1d784b06ec331fcd"), + common.HexToHash("0x176cd516c027b04ea15ef1bf3040f0295880ee9b06124a61707a1e95fd4d7032"), + common.HexToHash("0xf0ee382cc41769e98ccd36d2996dca753af0dbdf13ea4e121f896fe46ab734c8"), + common.HexToHash("0xc1ecfb62885ee2bb05bc1de974068c5e64f4f5a23dfdfb85d30dd47314ff3042"), + common.HexToHash("0x3c8db9c54f3cdc333d266c021b7b82fdd97c6a7936741fe482fe8792206485a9"), + common.HexToHash("0x599a4d9a455da52573f0a15a9716289e722411d814fffbf854ac0cf9c84f1b64"), + common.HexToHash("0xb57a65f271d3f4bf499536bcb1158ac8a19ec0b9b87204b0cd12f171cfaf5b50"), + common.HexToHash("0x66666cb6c0ba3ad91588a289c7207eeb68bc451840126b97aae50e8f9637ddae"), + common.HexToHash("0x416826c4ee8d20ec51cf42390a2948ef8d15c1d4169143c07152583bdecc64a4"), + common.HexToHash("0x759d0b439a3e16c4bcd780b724775279e4f6c20a1cee40c4c4452ce5444b8b53"), + common.HexToHash("0x7888795e43e9f91907342bf8694756ad251bce7d6aeeea393a7da65598873b61"), + common.HexToHash("0x21a9e92e15cd6e5d8c7b0f20416a7ced013f4f0dc7f42cce889cda91ea92d16a"), + common.HexToHash("0x8e30f629baa35c5de738d64d562e0492623fc1b42aa3205711c3b740124f281c"), + common.HexToHash("0xd3c85a40b8e5ecd91966a47acba3a6abb17b2995bb10fb61bf84b57ca25a7516"), + common.HexToHash("0x76e076fb82921263e6cff4a5579419d3b8fb9c9398f147bbc7108f72912cd67a"), + common.HexToHash("0x43bdd668fdaef54e7a315e68918095373b4d7538fb383535305265e29346e0c1"), + common.HexToHash("0x19df614f8b4749c1a9ed4ed3949691d0c2d9209541e1fbf2c30dbc32e990bcd9"), + common.HexToHash("0xfc2379f2a164ec902fff26ff3840383dcba0ea54f04f703b4f56844e4db1dbce"), + common.HexToHash("0x1dd54410bdd1baaeafc1dbf9c04f5f05f698fdbd7d3209fa55aeec1c7d0fe19a"), + common.HexToHash("0x721ad44b29a2c63e1ba75fd4774fa0e1a0a5bff806d4b441debfd6a95e75f5c9"), + common.HexToHash("0xfa1144bd963f10d4a325184fbe43a36ec3f96a4212d71592e84679975117ef5c"), + common.HexToHash("0xefef7b6fab7d64ac6f6764f932835af1f9015d11dc94ae1112956fd801cd8a30"), + common.HexToHash("0xea1e381daf0fd8ecfd94994fcc88fb2874f08ab3bbc397d86389d433e7fc8368"), + common.HexToHash("0xcfeff346f98bb48c38a0c650fe7fa2b9669ebdb37128cbe813a16a34e42fd7bf"), + common.HexToHash("0x86835aafc70841ff671b492912db2407511b5a631823f02f8b2bdc0308103a82"), + common.HexToHash("0xd86da47a2e5be3b6c43d25ae86977ce1db075ce9f82ead96d090d7a34a3bdc4f"), + common.HexToHash("0x081d234561620d2bf78176378d004ed891749c04600d1abd053616ce4aed15b4"), + common.HexToHash("0x85e7040a6c668e5fee700b3a3af4b9a0de0806a832f53458860c28ccd3ec92b8"), + common.HexToHash("0xcbb7692c46db878f98bd4ed47f028397ca1508eb381b8eb6f08bc0c2c9844d29"), + common.HexToHash("0x3dd5ebb22e39edc69e6259ef7f3fab49d4e73078e61c5a26737e8a387fa456e6"), + }, + } + + rootHashProof := proofSiblings.BuildRoot() + lhsProof, rhsProof, err := proofSiblings.BuildRootChildren() + + assert.Nil(t, err) + assert.Equal(t, rootHashBuilder, rootHashProof) + assert.Equal(t, lhsProof, lhsBuilder) + assert.Equal(t, rhsProof, rhsBuilder) + assert.Equal(t, rootHashBuilder, crypto.Keccak256Hash(lhsBuilder[:], rhsBuilder[:])) + assert.Equal(t, rootHashProof, crypto.Keccak256Hash(lhsProof[:], rhsProof[:])) +} + +// repanicked +//func TestBuildNotPow2(t *testing.T) { +// defer recover() +// +// builder := Builder{} +// builder.Append(TreeLeaf(zeroDigest)) +// builder.Append(TreeLeaf(zeroDigest)) +// builder.Append(TreeLeaf(zeroDigest)) +// assert.False(t, builder.CanBuild()) +// +// builder.Build() +//} diff --git a/internal/model/models.go b/internal/model/models.go index 8c19f583e..82bcb6d38 100644 --- a/internal/model/models.go +++ b/internal/model/models.go @@ -17,24 +17,27 @@ import ( ) type Application struct { - ID int64 `sql:"primary_key" json:"-"` - Name string `json:"name"` - IApplicationAddress common.Address `json:"iapplication_address"` - IConsensusAddress common.Address `json:"iconsensus_address"` - IInputBoxAddress common.Address `json:"iinputbox_address"` - TemplateHash common.Hash `json:"template_hash"` - TemplateURI string `json:"-"` - EpochLength uint64 `json:"epoch_length"` - DataAvailability []byte `json:"data_availability"` - State ApplicationState `json:"state"` - Reason *string `json:"reason"` - IInputBoxBlock uint64 `json:"iinputbox_block"` - LastInputCheckBlock uint64 `json:"last_input_check_block"` - LastOutputCheckBlock uint64 `json:"last_output_check_block"` - ProcessedInputs uint64 `json:"processed_inputs"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExecutionParameters ExecutionParameters `json:"execution_parameters"` + ID int64 `sql:"primary_key" json:"-"` + Name string `json:"name"` + IApplicationAddress common.Address `json:"iapplication_address"` + IConsensusAddress common.Address `json:"iconsensus_address"` + IInputBoxAddress common.Address `json:"iinputbox_address"` + TemplateHash common.Hash `json:"template_hash"` + TemplateURI string `json:"-"` + EpochLength uint64 `json:"epoch_length"` + DataAvailability []byte `json:"data_availability"` + ConsensusType Consensus `json:"consensus_type"` + State ApplicationState `json:"state"` + Reason *string `json:"reason"` + IInputBoxBlock uint64 `json:"iinputbox_block"` + LastEpochCheckBlock uint64 `json:"last_epoch_check_block"` + LastInputCheckBlock uint64 `json:"last_input_check_block"` + LastOutputCheckBlock uint64 `json:"last_output_check_block"` + LastTournamentCheckBlock uint64 `json:"last_tournament_check_block"` + ProcessedInputs uint64 `json:"processed_inputs"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + ExecutionParameters ExecutionParameters `json:"execution_parameters"` } // HasDataAvailabilitySelector checks if the application's DataAvailability @@ -49,20 +52,24 @@ func (a *Application) MarshalJSON() ([]byte, error) { // Define a new structure that embeds the alias but overrides the hex fields. aux := &struct { *Alias - DataAvailability string `json:"data_availability"` - IInputBoxBlock string `json:"iinputbox_block"` - LastInputCheckBlock string `json:"last_input_check_block"` - LastOutputCheckBlock string `json:"last_output_check_block"` - EpochLength string `json:"epoch_length"` - ProcessedInputs string `json:"processed_inputs"` + DataAvailability string `json:"data_availability"` + IInputBoxBlock string `json:"iinputbox_block"` + LastEpochCheckBlock string `json:"last_epoch_check_block"` + LastInputCheckBlock string `json:"last_input_check_block"` + LastOutputCheckBlock string `json:"last_output_check_block"` + LastTournamentCheckBlock string `json:"last_tournament_check_block"` + EpochLength string `json:"epoch_length"` + ProcessedInputs string `json:"processed_inputs"` }{ - Alias: (*Alias)(a), - DataAvailability: "0x" + hex.EncodeToString(a.DataAvailability), - IInputBoxBlock: fmt.Sprintf("0x%x", a.IInputBoxBlock), - LastInputCheckBlock: fmt.Sprintf("0x%x", a.LastInputCheckBlock), - LastOutputCheckBlock: fmt.Sprintf("0x%x", a.LastOutputCheckBlock), - EpochLength: fmt.Sprintf("0x%x", a.EpochLength), - ProcessedInputs: fmt.Sprintf("0x%x", a.ProcessedInputs), + Alias: (*Alias)(a), + DataAvailability: "0x" + hex.EncodeToString(a.DataAvailability), + IInputBoxBlock: fmt.Sprintf("0x%x", a.IInputBoxBlock), + LastEpochCheckBlock: fmt.Sprintf("0x%x", a.LastEpochCheckBlock), + LastInputCheckBlock: fmt.Sprintf("0x%x", a.LastInputCheckBlock), + LastOutputCheckBlock: fmt.Sprintf("0x%x", a.LastOutputCheckBlock), + LastTournamentCheckBlock: fmt.Sprintf("0x%x", a.LastTournamentCheckBlock), + EpochLength: fmt.Sprintf("0x%x", a.EpochLength), + ProcessedInputs: fmt.Sprintf("0x%x", a.ProcessedInputs), } return json.Marshal(aux) } @@ -72,12 +79,14 @@ func (a *Application) UnmarshalJSON(in []byte) error { aux := &struct { *Alias - DataAvailability string `json:"data_availability"` - IInputBoxBlock string `json:"iinputbox_block"` - LastInputCheckBlock string `json:"last_input_check_block"` - LastOutputCheckBlock string `json:"last_output_check_block"` - EpochLength string `json:"epoch_length"` - ProcessedInputs string `json:"processed_inputs"` + DataAvailability string `json:"data_availability"` + IInputBoxBlock string `json:"iinputbox_block"` + LastInputCheckBlock string `json:"last_input_check_block"` + LastOutputCheckBlock string `json:"last_output_check_block"` + LastEpochCheckBlock string `json:"last_epoch_check_block"` + LastTournamentCheckBlock string `json:"last_tournament_check_block"` + EpochLength string `json:"epoch_length"` + ProcessedInputs string `json:"processed_inputs"` }{} var err error @@ -109,6 +118,16 @@ func (a *Application) UnmarshalJSON(in []byte) error { return err } + a.LastEpochCheckBlock, err = ParseHexUint64(aux.LastEpochCheckBlock) + if err != nil { + return err + } + + a.LastTournamentCheckBlock, err = ParseHexUint64(aux.LastTournamentCheckBlock) + if err != nil { + return err + } + a.EpochLength, err = ParseHexUint64(aux.EpochLength) if err != nil { return err @@ -122,6 +141,10 @@ func (a *Application) UnmarshalJSON(in []byte) error { return nil } +func (a *Application) IsDaveConsensus() bool { + return a.ConsensusType == Consensus_PRT +} + type ApplicationState string const ( @@ -165,6 +188,49 @@ func (e ApplicationState) String() string { return string(e) } +type Consensus string + +const ( + Consensus_Authority Consensus = "AUTHORITY" + Consensus_Quorum Consensus = "QUORUM" + Consensus_PRT Consensus = "PRT" +) + +var ConsensusAllValues = []Consensus{ + Consensus_Authority, + Consensus_Quorum, + Consensus_PRT, +} + +func (e *Consensus) Scan(value any) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("invalid value for ConsensusType enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "AUTHORITY": + *e = Consensus_Authority + case "QUORUM": + *e = Consensus_Quorum + case "PRT": + *e = Consensus_PRT + default: + return errors.New("invalid value '" + enumValue + "' for Consensus enum") + } + + return nil +} + +func (e Consensus) String() string { + return string(e) +} + const DATA_AVAILABILITY_SELECTOR_SIZE = 4 type DataAvailabilitySelector [DATA_AVAILABILITY_SELECTOR_SIZE]byte @@ -482,25 +548,42 @@ func ParseHexUint64(s string) (uint64, error) { return strconv.ParseUint(s[2:], 16, 64) } +func ParseHexInt64(s string) (int64, error) { + if s == "" || len(s) < 3 || (!strings.HasPrefix(s, "0x") && !strings.HasPrefix(s, "0X")) { + return 0, fmt.Errorf("invalid hex string: %s", s) + } + return strconv.ParseInt(s[2:], 16, 64) +} + func ParseHexDuration(s string) (time.Duration, error) { - ns, err := ParseHexUint64(s) + ns, err := ParseHexInt64(s) if err != nil { return 0, err } + if ns < 0 { + return 0, fmt.Errorf("duration cannot be negative: %s", s) + } return time.Duration(ns), nil } type Epoch struct { - ApplicationID int64 `sql:"primary_key" json:"-"` - Index uint64 `sql:"primary_key" json:"index"` - FirstBlock uint64 `json:"first_block"` - LastBlock uint64 `json:"last_block"` - ClaimHash *common.Hash `json:"claim_hash"` - ClaimTransactionHash *common.Hash `json:"claim_transaction_hash"` - Status EpochStatus `json:"status"` - VirtualIndex uint64 `json:"virtual_index"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ApplicationID int64 `sql:"primary_key" json:"-"` + Index uint64 `sql:"primary_key" json:"index"` + FirstBlock uint64 `json:"first_block"` + LastBlock uint64 `json:"last_block"` + InputIndexLowerBound uint64 `json:"input_index_lower_bound"` + InputIndexUpperBound uint64 `json:"input_index_upper_bound"` + MachineHash *common.Hash `json:"machine_hash"` + OutputsMerkleRoot *common.Hash `json:"claim_hash"` + OutputsMerkleProof []common.Hash `json:"outputs_merkle_proof,omitempty"` + ClaimTransactionHash *common.Hash `json:"claim_transaction_hash"` + Commitment *common.Hash `json:"commitment"` + CommitmentProof []common.Hash `json:"commitment_proof,omitempty"` + TournamentAddress *common.Address `json:"tournament_address"` + Status EpochStatus `json:"status"` + VirtualIndex uint64 `json:"virtual_index"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } func (e *Epoch) MarshalJSON() ([]byte, error) { @@ -508,17 +591,21 @@ func (e *Epoch) MarshalJSON() ([]byte, error) { type Alias Epoch // Define a new structure that embeds the alias but overrides the hex fields. aux := &struct { - Index string `json:"index"` - FirstBlock string `json:"first_block"` - LastBlock string `json:"last_block"` - VirtualIndex string `json:"virtual_index"` + Index string `json:"index"` + FirstBlock string `json:"first_block"` + LastBlock string `json:"last_block"` + InputIndexLowerBound string `json:"input_index_lower_bound"` + InputIndexUpperBound string `json:"input_index_upper_bound"` + VirtualIndex string `json:"virtual_index"` *Alias }{ - Index: fmt.Sprintf("0x%x", e.Index), - FirstBlock: fmt.Sprintf("0x%x", e.FirstBlock), - LastBlock: fmt.Sprintf("0x%x", e.LastBlock), - VirtualIndex: fmt.Sprintf("0x%x", e.VirtualIndex), - Alias: (*Alias)(e), + Index: fmt.Sprintf("0x%x", e.Index), + FirstBlock: fmt.Sprintf("0x%x", e.FirstBlock), + LastBlock: fmt.Sprintf("0x%x", e.LastBlock), + InputIndexLowerBound: fmt.Sprintf("0x%x", e.InputIndexLowerBound), + InputIndexUpperBound: fmt.Sprintf("0x%x", e.InputIndexUpperBound), + VirtualIndex: fmt.Sprintf("0x%x", e.VirtualIndex), + Alias: (*Alias)(e), } return json.Marshal(aux) } @@ -528,10 +615,12 @@ func (e *Epoch) UnmarshalJSON(in []byte) error { aux := &struct { *Alias - Index string `json:"index"` - FirstBlock string `json:"first_block"` - LastBlock string `json:"last_block"` - VirtualIndex string `json:"virtual_index"` + Index string `json:"index"` + FirstBlock string `json:"first_block"` + LastBlock string `json:"last_block"` + InputIndexLowerBound string `json:"input_index_lower_bound"` + InputIndexUpperBound string `json:"input_index_upper_bound"` + VirtualIndex string `json:"virtual_index"` }{} var err error @@ -558,6 +647,16 @@ func (e *Epoch) UnmarshalJSON(in []byte) error { return err } + e.InputIndexLowerBound, err = ParseHexUint64(aux.InputIndexLowerBound) + if err != nil { + return err + } + + e.InputIndexUpperBound, err = ParseHexUint64(aux.InputIndexUpperBound) + if err != nil { + return err + } + e.VirtualIndex, err = ParseHexUint64(aux.VirtualIndex) if err != nil { return err @@ -834,13 +933,22 @@ type NodeConfig[T any] struct { UpdatedAt time.Time } +type OutputsProof struct { + OutputsHash common.Hash + OutputsHashProof [][32]byte + MachineHash common.Hash +} + type AdvanceResult struct { - InputIndex uint64 - Status InputCompletionStatus - Outputs [][]byte - Reports [][]byte - OutputsHash common.Hash - MachineHash *common.Hash + OutputsProof + EpochIndex uint64 + InputIndex uint64 + Status InputCompletionStatus + Outputs [][]byte + Reports [][]byte + Hashes [][32]byte + RemainingMetaCycles uint64 + IsDaveConsensus bool } type InspectResult struct { @@ -907,16 +1015,361 @@ func (e DefaultBlock) String() string { type MonitoredEvent string const ( - MonitoredEvent_InputAdded MonitoredEvent = "InputAdded" - MonitoredEvent_OutputExecuted MonitoredEvent = "OutputExecuted" - MonitoredEvent_ClaimSubmitted MonitoredEvent = "ClaimSubmitted" - MonitoredEvent_ClaimAccepted MonitoredEvent = "ClaimAccepted" + MonitoredEvent_InputAdded MonitoredEvent = "InputAdded" + MonitoredEvent_OutputExecuted MonitoredEvent = "OutputExecuted" + MonitoredEvent_ClaimSubmitted MonitoredEvent = "ClaimSubmitted" + MonitoredEvent_ClaimAccepted MonitoredEvent = "ClaimAccepted" + MonitoredEvent_EpochSealed MonitoredEvent = "EpochSealed" + MonitoredEvent_CommitmentJoined MonitoredEvent = "CommitmentJoined" + MonitoredEvent_MatchAdvanced MonitoredEvent = "MatchAdvanced" + MonitoredEvent_MatchCreated MonitoredEvent = "MatchCreated" + MonitoredEvent_MatchDeleted MonitoredEvent = "MatchDeleted" + MonitoredEvent_NewInnerTournament MonitoredEvent = "NewInnerTournament" ) func (e MonitoredEvent) String() string { return string(e) } +type Tournament struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + Address common.Address `sql:"primary_key" json:"address"` + ParentTournamentAddress *common.Address `json:"parent_tournament_address"` + ParentMatchIDHash *common.Hash `json:"parent_match_id_hash"` + MaxLevel uint64 `json:"max_level"` + Level uint64 `json:"level"` + Log2Step uint64 `json:"log2step"` + Height uint64 `json:"height"` + WinnerCommitment *common.Hash `json:"winner_commitment"` + FinalStateHash *common.Hash `json:"final_state_hash"` + FinishedAtBlock uint64 `json:"finished_at_block"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (t *Tournament) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias Tournament + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + MaxLevel string `json:"max_level"` + Level string `json:"level"` + Log2Step string `json:"log2step"` + Height string `json:"height"` + FinishedAtBlock string `json:"finished_at_block"` + *Alias + }{ + Alias: (*Alias)(t), + EpochIndex: fmt.Sprintf("0x%x", t.EpochIndex), + MaxLevel: fmt.Sprintf("0x%x", t.MaxLevel), + Level: fmt.Sprintf("0x%x", t.Level), + Log2Step: fmt.Sprintf("0x%x", t.Log2Step), + Height: fmt.Sprintf("0x%x", t.Height), + FinishedAtBlock: fmt.Sprintf("0x%x", t.FinishedAtBlock), + } + return json.Marshal(aux) +} + +type Commitment struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + TournamentAddress common.Address `sql:"primary_key" json:"tournament_address"` + Commitment common.Hash `sql:"primary_key" json:"commitment"` + FinalStateHash common.Hash `json:"final_state_hash"` + SubmitterAddress common.Address `json:"submitter_address"` + BlockNumber uint64 `json:"block_number"` + TxHash common.Hash `json:"tx_hash"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (c *Commitment) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias Commitment + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", c.EpochIndex), + BlockNumber: fmt.Sprintf("0x%x", c.BlockNumber), + Alias: (*Alias)(c), + } + return json.Marshal(aux) +} + +type Match struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + TournamentAddress common.Address `sql:"primary_key" json:"tournament_address"` + IDHash common.Hash `sql:"primary_key" json:"id_hash"` + CommitmentOne common.Hash `json:"commitment_one"` + CommitmentTwo common.Hash `json:"commitment_two"` + LeftOfTwo common.Hash `json:"left_of_two"` + BlockNumber uint64 `json:"block_number"` + TxHash common.Hash `json:"tx_hash"` + Winner WinnerCommitment `json:"winner_commitment"` + DeletionReason MatchDeletionReason `json:"deletion_reason"` + DeletionBlockNumber uint64 `json:"deletion_block_number"` + DeletionTxHash common.Hash `json:"deletion_tx_hash"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (m *Match) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias Match + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + DeletionBlockNumber string `json:"deletion_block_number"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", m.EpochIndex), + BlockNumber: fmt.Sprintf("0x%x", m.BlockNumber), + DeletionBlockNumber: fmt.Sprintf("0x%x", m.DeletionBlockNumber), + Alias: (*Alias)(m), + } + return json.Marshal(aux) +} + +type MatchAdvanced struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + TournamentAddress common.Address `sql:"primary_key" json:"tournament_address"` + IDHash common.Hash `sql:"primary_key" json:"id_hash"` + OtherParent common.Hash `json:"other_parent"` + LeftNode common.Hash `json:"left_node"` + BlockNumber uint64 `json:"block_number"` + TxHash common.Hash `json:"tx_hash"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (m *MatchAdvanced) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias MatchAdvanced + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", m.EpochIndex), + BlockNumber: fmt.Sprintf("0x%x", m.BlockNumber), + Alias: (*Alias)(m), + } + return json.Marshal(aux) +} + +// MatchDeletionReason represents the reason why a match was deleted +type MatchDeletionReason string + +const ( + MatchDeletionReason_STEP MatchDeletionReason = "STEP" + MatchDeletionReason_TIMEOUT MatchDeletionReason = "TIMEOUT" + MatchDeletionReason_CHILD_TOURNAMENT MatchDeletionReason = "CHILD_TOURNAMENT" + MatchDeletionReason_NOT_DELETED MatchDeletionReason = "NOT_DELETED" +) + +var MatchDeletionReasonAllValues = []MatchDeletionReason{ + MatchDeletionReason_STEP, + MatchDeletionReason_TIMEOUT, + MatchDeletionReason_CHILD_TOURNAMENT, + MatchDeletionReason_NOT_DELETED, +} + +func (e *MatchDeletionReason) Scan(value any) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("invalid value for MatchDeletionReason enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "STEP": + *e = MatchDeletionReason_STEP + case "TIMEOUT": + *e = MatchDeletionReason_TIMEOUT + case "CHILD_TOURNAMENT": + *e = MatchDeletionReason_CHILD_TOURNAMENT + case "NOT_DELETED": + *e = MatchDeletionReason_NOT_DELETED + default: + return errors.New("invalid value '" + enumValue + "' for MatchDeletionReason enum") + } + + return nil +} + +func (e MatchDeletionReason) String() string { + return string(e) +} + +func MatchDeletionReasonFromUint8(v uint8) MatchDeletionReason { + switch v { + case 0: + return MatchDeletionReason_STEP + case 1: + return MatchDeletionReason_TIMEOUT + case 2: //nolint: mnd + return MatchDeletionReason_CHILD_TOURNAMENT + case 0xff: //nolint: mnd + return MatchDeletionReason_NOT_DELETED + default: + return MatchDeletionReason_STEP // default to STEP for unknown values + } +} + +// WinnerCommitment represents the winner commitment of a match +type WinnerCommitment string + +const ( + WinnerCommitment_NONE WinnerCommitment = "NONE" + WinnerCommitment_ONE WinnerCommitment = "ONE" + WinnerCommitment_TWO WinnerCommitment = "TWO" +) + +var WinnerCommitmentAllValues = []WinnerCommitment{ + WinnerCommitment_NONE, + WinnerCommitment_ONE, + WinnerCommitment_TWO, +} + +func (e *WinnerCommitment) Scan(value any) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("invalid value for WinnerCommitment enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "NONE": + *e = WinnerCommitment_NONE + case "ONE": + *e = WinnerCommitment_ONE + case "TWO": + *e = WinnerCommitment_TWO + default: + return errors.New("invalid value '" + enumValue + "' for WinnerCommitment enum") + } + + return nil +} + +func (e WinnerCommitment) String() string { + return string(e) +} + +func WinnerCommitmentFromUint8(v uint8) WinnerCommitment { + switch v { + case 0: + return WinnerCommitment_NONE + case 1: + return WinnerCommitment_ONE + case 2: //nolint: mnd + return WinnerCommitment_TWO + default: + return WinnerCommitment_NONE // default to NONE for unknown values + } +} + +type StateHash struct { + InputEpochApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `json:"epoch_index"` + InputIndex uint64 `json:"input_index"` + Index uint64 `sql:"primary_key" json:"index"` + MachineHash common.Hash `json:"machine_hash"` + Repetitions uint64 `json:"repetitions"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (s *StateHash) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias StateHash + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + InputIndex string `json:"input_index"` + Index string `json:"index"` + Repetitions string `json:"repetitions"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", s.EpochIndex), + InputIndex: fmt.Sprintf("0x%x", s.InputIndex), + Index: fmt.Sprintf("0x%x", s.Index), + Repetitions: fmt.Sprintf("0x%x", s.Repetitions), + Alias: (*Alias)(s), + } + return json.Marshal(aux) +} + +func (s *StateHash) UnmarshalJSON(data []byte) error { + // Create an alias to avoid infinite recursion in UnmarshalJSON. + type Alias StateHash + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + InputIndex string `json:"input_index"` + Index string `json:"index"` + Repetitions string `json:"repetitions"` + *Alias + }{ + Alias: (*Alias)(s), + } + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + + if aux.EpochIndex != "" { + val, err := ParseHexUint64(aux.EpochIndex) + if err != nil { + return fmt.Errorf("invalid epoch_index: %w", err) + } + s.EpochIndex = val + } + + if aux.InputIndex != "" { + val, err := ParseHexUint64(aux.InputIndex) + if err != nil { + return fmt.Errorf("invalid input_index: %w", err) + } + s.InputIndex = val + } + + if aux.Index != "" { + val, err := ParseHexUint64(aux.Index) + if err != nil { + return fmt.Errorf("invalid index: %w", err) + } + s.Index = val + } + + if aux.Repetitions != "" { + val, err := ParseHexUint64(aux.Repetitions) + if err != nil { + return fmt.Errorf("invalid repetitions: %w", err) + } + s.Repetitions = val + } + + return nil +} + func Pointer[T any](v T) *T { return &v } diff --git a/internal/node/node.go b/internal/node/node.go index a45cd5500..e6c56a184 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -27,6 +27,7 @@ type CreateInfo struct { Config config.NodeConfig + PrtClient *ethclient.Client ClaimerClient *ethclient.Client ReaderClient *ethclient.Client ReaderWSClient *ethclient.Client @@ -37,7 +38,6 @@ type Service struct { service.Service Children []service.IService - Client *ethclient.Client Repository repository.Repository } @@ -90,6 +90,11 @@ func createServices(ctx context.Context, c *CreateInfo, s *Service) error { ch <- newClaimer(ctx, c, s) }() + numChildren++ + go func() { + ch <- newPrt(ctx, c, s) + }() + if c.Config.FeatureJsonrpcApiEnabled { numChildren++ go func() { @@ -150,6 +155,8 @@ func newEVMReader(ctx context.Context, c *CreateInfo, s *Service) service.IServi readerArgs := evmreader.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "evm-reader", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -174,6 +181,8 @@ func newAdvancer(ctx context.Context, c *CreateInfo, s *Service) service.IServic advancerArgs := advancer.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "advancer", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -197,6 +206,8 @@ func newValidator(ctx context.Context, c *CreateInfo, s *Service) service.IServi validatorArgs := validator.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "validator", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -220,6 +231,8 @@ func newClaimer(ctx context.Context, c *CreateInfo, s *Service) service.IService claimerArgs := claimer.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "claimer", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -244,6 +257,8 @@ func newJsonrpc(ctx context.Context, c *CreateInfo, s *Service) service.IService jsonrpcArgs := jsonrpc.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "jsonrpc", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -266,15 +281,18 @@ func newPrt(ctx context.Context, c *CreateInfo, s *Service) service.IService { prtArgs := prt.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "prt", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, TelemetryCreate: false, - PollInterval: c.Config.ValidatorPollingInterval, + PollInterval: c.Config.PrtPollingInterval, ServeMux: s.ServeMux, }, + EthClient: c.PrtClient, Repository: c.Repository, - Config: *c.Config.ToValidatorConfig(), + Config: *c.Config.ToPrtConfig(), } prtService, err := prt.Create(ctx, &prtArgs) diff --git a/internal/prt/itournament_adapter.go b/internal/prt/itournament_adapter.go new file mode 100644 index 000000000..a638f5293 --- /dev/null +++ b/internal/prt/itournament_adapter.go @@ -0,0 +1,441 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package prt + +import ( + "math/big" + + . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/itournament" + "github.com/cartesi/rollups-node/pkg/ethutil" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +// ITournament Wrapper +type ITournamentAdapterImpl struct { + tournament *itournament.ITournament + client *ethclient.Client + tournamentAddress common.Address + filter ethutil.Filter +} + +func NewITournamentAdapter( + tournamentAddress common.Address, + client *ethclient.Client, + filter ethutil.Filter, +) (TournamentAdapter, error) { + tournamentContract, err := itournament.NewITournament(tournamentAddress, client) + if err != nil { + return nil, err + } + return &ITournamentAdapterImpl{ + tournament: tournamentContract, + tournamentAddress: tournamentAddress, + client: client, + filter: filter, + }, nil +} + +func (a *ITournamentAdapterImpl) Result(opts *bind.CallOpts) (bool, [32]byte, [32]byte, error) { + result, err := a.tournament.ArbitrationResult(opts) + // ArbitrationResult reverts when it has finished with no winners + if info, ok := ExtractJsonErrorInfo(err); ok && info.HasData && info.Data == TournamentFailedNoWinner { + return true, [32]byte{}, [32]byte{}, nil + } + return result.Finished, result.WinnerCommitment, result.FinalState, err +} + +func (a *ITournamentAdapterImpl) Constants(opts *bind.CallOpts) (TournamentConstants, error) { + c, err := a.tournament.TournamentLevelConstants(opts) + return TournamentConstants{ + MaxLevel: c.MaxLevel, + Level: c.Level, + Log2step: c.Log2step, + Height: c.Height, + }, err +} + +func (a *ITournamentAdapterImpl) TimeFinished(opts *bind.CallOpts) (bool, uint64, error) { + return a.tournament.TimeFinished(opts) +} + +func buildCommitmentJoinedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events[MonitoredEvent_CommitmentJoined.String()].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveCommitmentJoinedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentCommitmentJoined, error) { + q, err := buildCommitmentJoinedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentCommitmentJoined + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseCommitmentJoined(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildMatchAdvancedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events[MonitoredEvent_MatchAdvanced.String()].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveMatchAdvancedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentMatchAdvanced, error) { + q, err := buildMatchAdvancedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentMatchAdvanced + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseMatchAdvanced(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildMatchCreatedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events[MonitoredEvent_MatchCreated.String()].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveMatchCreatedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentMatchCreated, error) { + q, err := buildMatchCreatedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentMatchCreated + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseMatchCreated(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildMatchDeletedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events[MonitoredEvent_MatchDeleted.String()].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveMatchDeletedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentMatchDeleted, error) { + q, err := buildMatchDeletedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentMatchDeleted + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseMatchDeleted(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildNewInnerTournamentFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events[MonitoredEvent_NewInnerTournament.String()].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveNewInnerTournamentEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentNewInnerTournament, error) { + q, err := buildNewInnerTournamentFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentNewInnerTournament + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseNewInnerTournament(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildAllEventsFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{ + c.Events[MonitoredEvent_CommitmentJoined.String()].ID, + c.Events[MonitoredEvent_MatchAdvanced.String()].ID, + c.Events[MonitoredEvent_MatchCreated.String()].ID, + c.Events[MonitoredEvent_MatchDeleted.String()].ID, + c.Events[MonitoredEvent_NewInnerTournament.String()].ID, + }, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveAllEvents( + opts *bind.FilterOpts, +) (*TournamentEvents, error) { + q, err := buildAllEventsFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var commitmentJoined []*itournament.ITournamentCommitmentJoined + var matchAdvanced []*itournament.ITournamentMatchAdvanced + var matchCreated []*itournament.ITournamentMatchCreated + var matchDeleted []*itournament.ITournamentMatchDeleted + var newInnerTournament []*itournament.ITournamentNewInnerTournament + + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return nil, err + } + + for log, err := range itr { + if err != nil { + return nil, err + } + + switch log.Topics[0] { + case c.Events[MonitoredEvent_CommitmentJoined.String()].ID: + ev, err := a.tournament.ParseCommitmentJoined(*log) + if err != nil { + return nil, err + } + commitmentJoined = append(commitmentJoined, ev) + case c.Events[MonitoredEvent_MatchAdvanced.String()].ID: + ev, err := a.tournament.ParseMatchAdvanced(*log) + if err != nil { + return nil, err + } + matchAdvanced = append(matchAdvanced, ev) + case c.Events[MonitoredEvent_MatchCreated.String()].ID: + ev, err := a.tournament.ParseMatchCreated(*log) + if err != nil { + return nil, err + } + matchCreated = append(matchCreated, ev) + case c.Events[MonitoredEvent_MatchDeleted.String()].ID: + ev, err := a.tournament.ParseMatchDeleted(*log) + if err != nil { + return nil, err + } + matchDeleted = append(matchDeleted, ev) + case c.Events[MonitoredEvent_NewInnerTournament.String()].ID: + ev, err := a.tournament.ParseNewInnerTournament(*log) + if err != nil { + return nil, err + } + newInnerTournament = append(newInnerTournament, ev) + } + } + + return &TournamentEvents{ + CommitmentJoined: commitmentJoined, + MatchAdvanced: matchAdvanced, + MatchCreated: matchCreated, + MatchDeleted: matchDeleted, + NewInnerTournament: newInnerTournament, + }, nil +} diff --git a/internal/prt/prt.go b/internal/prt/prt.go index 2083daa2e..141857f4c 100644 --- a/internal/prt/prt.go +++ b/internal/prt/prt.go @@ -7,41 +7,820 @@ import ( "context" "errors" "fmt" + "math/big" + "time" + "unsafe" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + + "github.com/cartesi/rollups-node/internal/merkle" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/pkg/contracts/idaveconsensus" + "github.com/cartesi/rollups-node/pkg/contracts/itournament" ) -type PrtRepository interface { - ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination, descending bool) ([]*Application, uint64, error) +type prtRepository interface { + ListApplications(ctx context.Context, f repository.ApplicationFilter, + p repository.Pagination, descending bool) ([]*Application, uint64, error) UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error + + ListEpochs(ctx context.Context, nameOrAddress string, f repository.EpochFilter, + p repository.Pagination, descending bool) ([]*Epoch, uint64, error) + GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) + UpdateEpochStatus(ctx context.Context, nameOrAddress string, e *Epoch) error + + CreateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + GetTournament(ctx context.Context, nameOrAddress string, address string) (*Tournament, error) + UpdateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + ListTournaments(ctx context.Context, nameOrAddress string, f repository.TournamentFilter, + p repository.Pagination, descending bool) ([]*Tournament, uint64, error) + + StoreTournamentEvents(ctx context.Context, appID int64, commitments []*Commitment, matches []*Match, + matchAdvanced []*MatchAdvanced, matchDeleted []*Match, lastBlock uint64) error + + GetCommitment(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, commitmentHex string) (*Commitment, error) + + SaveNodeConfigRaw(ctx context.Context, key string, rawJSON []byte) error + LoadNodeConfigRaw(ctx context.Context, key string) (rawJSON []byte, createdAt, updatedAt time.Time, err error) +} + +// EthClientInterface defines the methods we need from ethclient.Client +type EthClientInterface interface { + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + ChainID(ctx context.Context) (*big.Int, error) +} + +func getAllRunningApplications(ctx context.Context, r prtRepository) ([]*Application, uint64, error) { + f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled), ConsensusType: Pointer(Consensus_PRT)} + return r.ListApplications(ctx, f, repository.Pagination{}, false) +} + +func getAllClaimComputedEpochs(ctx context.Context, r prtRepository, nameOrAddress string) ([]*Epoch, uint64, error) { + f := repository.EpochFilter{Status: []EpochStatus{EpochStatus_ClaimComputed}} + return r.ListEpochs(ctx, nameOrAddress, f, repository.Pagination{}, false) } -func getAllRunningApplications(ctx context.Context, er PrtRepository) ([]*Application, uint64, error) { - f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled)} - return er.ListApplications(ctx, f, repository.Pagination{}, false) +func getAllSubTournaments( + ctx context.Context, + r prtRepository, + nameOrAddress string, + epochIndex uint64, + tournamentAddress *common.Address, + level TournamentLevel, +) ([]*Tournament, uint64, error) { + f := repository.TournamentFilter{EpochIndex: &epochIndex, ParentTournamentAddress: tournamentAddress, Level: (*uint64)(&level)} + return r.ListTournaments(ctx, nameOrAddress, f, repository.Pagination{}, false) } // setApplicationInoperable marks an application as inoperable with the given reason, // logs any error that occurs during the update, and returns an error with the reason. -func (v *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...interface{}) error { +func (s *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...any) error { reason := fmt.Sprintf(reasonFmt, args...) appAddress := app.IApplicationAddress.String() // Log the reason first - v.Logger.Error(reason, "application", appAddress) + s.Logger.Error(reason, "application", appAddress) // Update application state - err := v.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) + err := s.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) if err != nil { - v.Logger.Error("failed to update application state to inoperable", "app", appAddress, "err", err) + s.Logger.Error("failed to update application state to inoperable", "app", appAddress, "err", err) } // Return the error with the reason return errors.New(reason) } -func (v *Service) validateApplication(ctx context.Context, app *Application) error { - v.Logger.Debug("Starting validation", "application", app.Name) +func (s *Service) saveTournamentEvents(ctx context.Context, app *Application, epoch *Epoch, + tournamentAddress common.Address, events *TournamentEvents, lastBlock uint64) error { + commitments := make([]*Commitment, 0, len(events.CommitmentJoined)) + for _, ev := range events.CommitmentJoined { + c := Commitment{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + Commitment: ev.Commitment, + FinalStateHash: ev.FinalStateHash, + SubmitterAddress: ev.Submitter, + BlockNumber: ev.Raw.BlockNumber, + TxHash: ev.Raw.TxHash, + } + s.Logger.Info("Found CommitmentJoined event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "commitment", c.Commitment.String()) + commitments = append(commitments, &c) + } + + matches := make([]*Match, 0, len(events.MatchCreated)) + for _, ev := range events.MatchCreated { + m := Match{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + IDHash: ev.MatchIdHash, + CommitmentOne: ev.One, + CommitmentTwo: ev.Two, + LeftOfTwo: ev.LeftOfTwo, + BlockNumber: ev.Raw.BlockNumber, + TxHash: ev.Raw.TxHash, + Winner: WinnerCommitment_NONE, + DeletionReason: MatchDeletionReason_NOT_DELETED, + DeletionBlockNumber: 0, + DeletionTxHash: common.Hash{}, + } + s.Logger.Info("Found MatchCreated event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "id_hash", m.IDHash.String(), + "one", m.CommitmentOne.String(), + "two", m.CommitmentTwo.String(), + "leftOfTwo", m.LeftOfTwo.String()) + matches = append(matches, &m) + } + + matchAdvanced := make([]*MatchAdvanced, 0, len(events.MatchAdvanced)) + for _, ev := range events.MatchAdvanced { + m := &MatchAdvanced{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + IDHash: ev.MatchIdHash, + OtherParent: ev.OtherParent, + LeftNode: ev.LeftNode, + BlockNumber: ev.Raw.BlockNumber, + TxHash: ev.Raw.TxHash, + } + s.Logger.Info("Found MatchAdvanced event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "id_hash", m.IDHash.String(), + "other_parent", m.OtherParent.String(), + "left_node", m.LeftNode.String()) + matchAdvanced = append(matchAdvanced, m) + } + + matchDeleted := make([]*Match, 0, len(events.MatchDeleted)) + for _, ev := range events.MatchDeleted { + m := Match{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + IDHash: ev.MatchIdHash, + CommitmentOne: ev.One, + CommitmentTwo: ev.Two, + Winner: WinnerCommitmentFromUint8(ev.WinnerCommitment), + DeletionReason: MatchDeletionReasonFromUint8(ev.Reason), + DeletionBlockNumber: ev.Raw.BlockNumber, + DeletionTxHash: ev.Raw.TxHash, + } + s.Logger.Info("Found MatchDeleted event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "id_hash", ((common.Hash)(ev.MatchIdHash)).String(), + "one", ((common.Hash)(ev.One)).String(), + "two", ((common.Hash)(ev.Two)).String(), + "winner", m.Winner.String(), + "reason", m.DeletionReason.String(), + ) + matchDeleted = append(matchDeleted, &m) + } + + err := s.repository.StoreTournamentEvents(ctx, app.ID, commitments, matches, matchAdvanced, matchDeleted, lastBlock) + if err != nil { + s.Logger.Error("failed to save tournament events", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + return nil +} + +func (s *Service) createTournament( + ctx context.Context, + app *Application, + epoch *Epoch, + level TournamentLevel, + parentMatchIDHash *common.Hash, + parentTournamentAddress *common.Address, + tournamentAddress common.Address, +) (*Tournament, error) { + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return nil, fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + + adapter, err := NewITournamentAdapter(tournamentAddress, ethClient, s.filter) + if err != nil { + s.Logger.Error("failed to create "+level.String()+" tournament adapter", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return nil, err + } + + constants, err := adapter.Constants(nil) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament constants", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return nil, err + } + + var winnerCommitmentPtr *common.Hash + var finalStatePtr *common.Hash + finishedAtBlock := uint64(0) + if epoch.ClaimTransactionHash != nil { + finished, timeFinished, err := adapter.TimeFinished(nil) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament finished at time", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return nil, err + } + if !finished { + s.Logger.Error(level.String()+" tournament should be finished", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return nil, err + } + finishedAtBlock = timeFinished + + _, winnerCommitment, finalState, err := adapter.Result(nil) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament result", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return nil, err + } + + // root tournament with no winner. + if level == RootLevel && winnerCommitment == [32]byte{} { + return nil, s.setApplicationInoperable(ctx, app, + "Epoch %d root tournament %s has finished without winners. Setting application as inoperable.", + epoch.Index, tournamentAddress.String()) + } + + if level == RootLevel && *epoch.Commitment != winnerCommitment { + return nil, s.setApplicationInoperable(ctx, app, + "Epoch %d has inconsistent commitment between off-chain (%s) and on-chain (%s). Setting application as inoperable.", + epoch.Index, epoch.Commitment.String(), hexutil.Encode(winnerCommitment[:])) + } + winnerCommitmentPtr = new(common.Hash) + *winnerCommitmentPtr = winnerCommitment + + finalStatePtr = new(common.Hash) + *finalStatePtr = finalState + } else { + s.Logger.Info("Found open "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String()) + } + + t := &Tournament{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + Address: tournamentAddress, + ParentMatchIDHash: parentMatchIDHash, + ParentTournamentAddress: parentTournamentAddress, + MaxLevel: constants.MaxLevel, + Level: constants.Level, + Log2Step: constants.Log2step, + Height: constants.Height, + WinnerCommitment: winnerCommitmentPtr, + FinalStateHash: finalStatePtr, + FinishedAtBlock: finishedAtBlock, + } + + err = s.repository.CreateTournament(ctx, app.IApplicationAddress.Hex(), t) + if err != nil { + s.Logger.Error("failed to create "+level.String()+" tournament in database", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return nil, err + } + return t, nil +} + +func (s *Service) updateTournamentIfFinished( + ctx context.Context, + app *Application, + epoch *Epoch, + level TournamentLevel, + adapter TournamentAdapter, + t *Tournament, + mostRecentBlock uint64, +) error { + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(mostRecentBlock), + } + + finished, timeFinished, err := adapter.TimeFinished(callOpts) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament finished at time", "application", app.Name, + "epoch", epoch.Index, "tournament_address", t.Address.String(), "error", err) + return err + } + if !finished { + return nil + } + t.FinishedAtBlock = timeFinished + + _, winnerCommitment, finalState, err := adapter.Result(callOpts) + if err != nil { + + s.Logger.Error("failed to fetch "+level.String()+" tournament result", "application", app.Name, + "epoch", epoch.Index, "tournament_address", t.Address.String(), "error", err) + return err + } + + // root tournament with no winner. + if level == RootLevel && winnerCommitment == [32]byte{} { + return s.setApplicationInoperable(ctx, app, + "Epoch %d root tournament %s has finished without winners. Setting application as inoperable.", + epoch.Index, t.Address.String()) + } + + if level == RootLevel && *epoch.Commitment != winnerCommitment { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent commitment between off-chain (%s) and on-chain (%s)", + epoch.Index, epoch.Commitment.String(), hexutil.Encode(winnerCommitment[:])) + } + t.WinnerCommitment = new(common.Hash) + *t.WinnerCommitment = winnerCommitment + + t.FinalStateHash = new(common.Hash) + *t.FinalStateHash = finalState + + return s.repository.UpdateTournament(ctx, app.IApplicationAddress.Hex(), t) +} + +func (s *Service) checkEpochs(ctx context.Context, app *Application, mostRecentBlock uint64) error { + if app.LastTournamentCheckBlock >= mostRecentBlock { + s.Logger.Debug("No new blocks since last tournament check", "application", app.Name, + "last_tournament_check_block", app.LastTournamentCheckBlock, "most_recent_block", mostRecentBlock) + return nil // nothing to do + } + + epochs, _, err := getAllClaimComputedEpochs(ctx, s.repository, app.Name) + if err != nil { + s.Logger.Error("failed to list epochs", "application", app.Name, "error", err) + return err + } + if len(epochs) == 0 { + s.Logger.Debug("No epochs with claim computed status", "application", app.Name) + return nil // nothing to do + } + + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + + consensus, err := idaveconsensus.NewIDaveConsensus(app.IConsensusAddress, ethClient) + if err != nil { + s.Logger.Error("failed to bind dave consensus contract", "application", app.Name, + "consensus_address", app.IConsensusAddress.String(), "error", err) + return err + } + + for _, epoch := range epochs { + if epoch.ClaimTransactionHash == nil { // epoch not claimed on-chain yet + err = s.fetchTournamentData(ctx, app, epoch, RootLevel, nil, nil, *epoch.TournamentAddress, mostRecentBlock) + if err != nil { + s.Logger.Error("failed to fetch root tournament data", "application", app.Name, + "epoch", epoch.Index, "tournament", epoch.TournamentAddress.String(), "error", err) + return err + } + break + } + + receipt, err := ethClient.TransactionReceipt(ctx, *epoch.ClaimTransactionHash) + if err != nil { + s.Logger.Error("failed to fetch transaction receipt for epoch", "application", app.Name, + "epoch", epoch.Index, "tx", epoch.ClaimTransactionHash, "error", err) + return err + } + + if receipt.Status != 1 { + return fmt.Errorf("EpochSealed transaction hash points to failed transaction") + } + + var event *idaveconsensus.IDaveConsensusEpochSealed + for _, vLog := range receipt.Logs { + event, err = consensus.ParseEpochSealed(*vLog) + if err != nil { + continue // Skip logs that don't match + } + } + if event == nil { + return fmt.Errorf("failed to find EpochSealed event in receipt logs") + + } + + if epoch.Index != event.EpochNumber.Uint64()-1 { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent index between off-chain (%d) and on-chain (%d)", + epoch.Index, epoch.Index, event.EpochNumber.Uint64()-1) + } + if *epoch.MachineHash != event.InitialMachineStateHash { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent machine hash between off-chain (%s) and on-chain (%s)", + epoch.Index, epoch.MachineHash.String(), hexutil.Encode(event.InitialMachineStateHash[:])) + } + if *epoch.OutputsMerkleRoot != event.OutputsMerkleRoot { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent claim hash between off-chain (%s) and on-chain (%s)", + epoch.Index, epoch.OutputsMerkleRoot.String(), hexutil.Encode(event.OutputsMerkleRoot[:])) + } + + err = s.fetchTournamentData(ctx, app, epoch, RootLevel, nil, nil, *epoch.TournamentAddress, mostRecentBlock) + if err != nil { + s.Logger.Error("failed to fetch tournament data", "application", app.Name, + "epoch", epoch.Index, "tournament", epoch.TournamentAddress.String(), "error", err) + return err + } + + s.Logger.Info("Found finalized epoch. OutputsMerkleRoot matched. Setting claim as accepted", + "application", app.Name, + "epoch", epoch.Index, + "event_block_number", event.Raw.BlockNumber, + "claim_hash", fmt.Sprintf("%x", event.OutputsMerkleRoot), + "tx", epoch.ClaimTransactionHash, + ) + + epoch.Status = EpochStatus_ClaimAccepted + err = s.repository.UpdateEpochStatus(ctx, app.Name, epoch) + if err != nil { + s.Logger.Error("failed to update epoch status to claim accepted", "application", app.Name, "epoch", epoch.Index, "error", err) + return err + } + } + return nil +} + +func (s *Service) fetchTournamentData( + ctx context.Context, + app *Application, + epoch *Epoch, + level TournamentLevel, + parentMatchIDHash *common.Hash, + parentTournamentAddress *common.Address, + tournamentAddress common.Address, + mostRecentBlock uint64, +) error { + s.Logger.Debug("Fetching "+level.String()+" tournament data", "application", app.Name, "tournament", tournamentAddress.String()) + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + + adapter, err := NewITournamentAdapter(tournamentAddress, ethClient, s.filter) + if err != nil { + s.Logger.Error("failed to create "+level.String()+" tournament adapter", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + t, err := s.repository.GetTournament(ctx, app.IApplicationAddress.Hex(), tournamentAddress.Hex()) + if err != nil { + s.Logger.Error("failed to load "+level.String()+" tournament from database", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + if t == nil { + t, err = s.createTournament(ctx, app, epoch, level, + parentMatchIDHash, parentTournamentAddress, tournamentAddress) + if err != nil { + s.Logger.Error("failed to create new "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + } else if t.FinishedAtBlock == 0 { + err = s.updateTournamentIfFinished(ctx, app, epoch, level, adapter, t, mostRecentBlock) + if err != nil { + s.Logger.Error("failed to check if "+level.String()+" tournament was finished", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + } + if t.FinishedAtBlock != 0 { + s.Logger.Info("Found finished "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", t.Address.String()) + } + } + + nextSearchBlock := max(epoch.LastBlock, app.LastTournamentCheckBlock+1) + var endBlock uint64 + if t.FinishedAtBlock != 0 { + if nextSearchBlock > t.FinishedAtBlock { + s.Logger.Debug("No new blocks to search for "+level.String()+" tournament events", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), + "finished_at_block", t.FinishedAtBlock, "next_search_block", nextSearchBlock) + return nil + } + endBlock = t.FinishedAtBlock + } else { + endBlock = mostRecentBlock + } + + s.Logger.Debug("Searching for "+level.String()+" tournament events", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), + "next_search_block", nextSearchBlock, "end_block", endBlock) + opts := &bind.FilterOpts{ + Context: ctx, + Start: nextSearchBlock, + End: &endBlock, + } + + events, err := adapter.RetrieveAllEvents(opts) + if err != nil { + s.Logger.Error("failed to retrieve all events from "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + s.Logger.Debug("Retrieved events for "+level.String()+" tournament", "address", t.Address.String(), + "epoch", epoch.Index, + "commitmentJoined", len(events.CommitmentJoined), + "matchCreated", len(events.MatchCreated), + "matchAdvanced", len(events.MatchAdvanced), + "matchDeleted", len(events.MatchDeleted), + "newInnerTournament", len(events.NewInnerTournament)) + + err = s.saveTournamentEvents(ctx, app, epoch, tournamentAddress, events, endBlock) + if err != nil { + s.Logger.Error("failed to save events for "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", t.Address.String(), "error", err) + return err + } + + if level == BottomLevel { + return nil // no inner tournaments + } + + nextLevel := level + 1 + innerTournaments, _, err := getAllSubTournaments(ctx, s.repository, app.Name, epoch.Index, &tournamentAddress, level+1) + if err != nil { + s.Logger.Error("failed to list inner tournaments from "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + for _, i := range innerTournaments { + s.Logger.Debug("Fetching data for previous open "+nextLevel.String()+" tournament", + "parent_match_id_hash", i.ParentMatchIDHash.String(), + "parent_tournament_address", i.ParentTournamentAddress.String(), + "address", i.Address.String()) + + if i.FinishedAtBlock != 0 { + s.Logger.Debug("Skipping finished inner tournament", "address", i.Address.String()) + continue // already finished + } + + err = s.fetchTournamentData(ctx, app, epoch, nextLevel, i.ParentMatchIDHash, &tournamentAddress, i.Address, mostRecentBlock) + if err != nil { + s.Logger.Error("failed to fetch "+nextLevel.String()+" tournament data", "application", app.Name, + "tournament", i.Address.String(), "error", err) + return err + } + } + + for _, newInner := range events.NewInnerTournament { + hashID := (common.Hash)(newInner.MatchIdHash) + childAddress := newInner.ChildTournament + + s.Logger.Info("NewInnerTournament event", "id_hash", hashID.String(), "tournament_address", childAddress.String()) + + err = s.fetchTournamentData(ctx, app, epoch, nextLevel, &hashID, &tournamentAddress, childAddress, mostRecentBlock) + if err != nil { + s.Logger.Error("failed to fetch "+nextLevel.String()+" tournament data", "application", app.Name, + "tournament", childAddress.String(), "error", err) + return err + } + } + + return nil +} + +func (s *Service) trySettle(ctx context.Context, app *Application, mostRecentBlock uint64) error { + if tx, joinTxIsInFlight := s.joinInFlight[app.ID]; joinTxIsInFlight { + s.Logger.Debug("Waiting for join tournament transaction to be mined", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx) + return nil // wait for settle to be mined + } + + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + + if tx, settleTxIsInFlight := s.settleInFlight[app.ID]; settleTxIsInFlight { + _, isPending, err := ethClient.TransactionByHash(ctx, *tx) + if err != nil { + s.Logger.Error("failed to fetch last settle transaction status", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx, "error", err) + return err + } + if isPending { + s.Logger.Debug("Previous settle transaction is still pending", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx) + return nil + } + s.Logger.Debug("Previous settle transaction has been mined", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx) + delete(s.settleInFlight, app.ID) + } + + consensus, err := idaveconsensus.NewIDaveConsensus(app.IConsensusAddress, ethClient) + if err != nil { + s.Logger.Error("failed to bind dave consensus contract", "application", app.Name, + "consensus_address", app.IConsensusAddress.String(), "error", err) + return err + } + + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(mostRecentBlock), + } + + result, err := consensus.CanSettle(callOpts) + if err != nil { + s.Logger.Error("failed to call CanSettle on DaveConsensus", "application", app.Name, + "consensus", app.IConsensusAddress.String(), "error", err) + return err + } + + s.currentEpochIndex = result.EpochNumber.Uint64() + + if !result.IsFinished { + s.Logger.Debug("Epoch root tournament has not finished yet. Skipping Settle", + "application", app.Name, "epoch_index", s.currentEpochIndex) + return nil // nothing to do + } + + epoch, err := s.repository.GetEpoch(ctx, app.IApplicationAddress.Hex(), s.currentEpochIndex) + if err != nil { + s.Logger.Error("failed to list epochs", "application", app.Name, "error", err) + return err + } + if epoch == nil || epoch.Status != EpochStatus_ClaimComputed { + s.Logger.Info("Application sync has not finished. Skipping Settle", "application", app.Name, + "epoch_index", s.currentEpochIndex) + return nil // nothing to do + } + + s.Logger.Info("Sending Settle transaction", "application", app.Name, "epoch_index", epoch.Index, + "outputs_merkle_root", epoch.OutputsMerkleRoot.String()) + + tx, err := consensus.Settle(s.txOpts, result.EpochNumber, + *epoch.OutputsMerkleRoot, hashSliceTobyteSlice(epoch.OutputsMerkleProof)) + if err != nil { + s.Logger.Error("failed to send Settle transaction", "application", app.Name, + "epoch_index", result.EpochNumber.Uint64(), "error", err) + return err + } + settleTx := tx.Hash() + s.settleInFlight[app.ID] = &settleTx + + return nil +} + +func (s *Service) reactToTournament(ctx context.Context, app *Application, mostRecentBlock uint64) error { + if tx, settleTxIsInFlight := s.settleInFlight[app.ID]; settleTxIsInFlight { + s.Logger.Debug("Waiting for settle transaction to be mined", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx) + return nil // wait for settle to be mined + } + + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + if tx, joinTxIsInFlight := s.joinInFlight[app.ID]; joinTxIsInFlight { + _, isPending, err := ethClient.TransactionByHash(ctx, *tx) + if err != nil { + s.Logger.Error("failed to fetch last join tournament transaction status", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx, "error", err) + return err + } + if isPending { + s.Logger.Debug("Previous join tournament transaction is still pending", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx) + return nil + } + s.Logger.Debug("Previous join tournament transaction has been mined", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tx", tx) + delete(s.joinInFlight, app.ID) + } + + epoch, err := s.repository.GetEpoch(ctx, app.IApplicationAddress.Hex(), s.currentEpochIndex) + if err != nil { + s.Logger.Error("failed to list epochs", "application", app.Name, "error", err) + return err + } + if epoch == nil || epoch.Status != EpochStatus_ClaimComputed { + s.Logger.Debug("Application sync has not finished. Skipping join tournament", "application", app.Name, + "epoch_index", s.currentEpochIndex) + return nil // nothing to do + } + + commitment, err := s.repository.GetCommitment(ctx, app.IApplicationAddress.Hex(), epoch.Index, + epoch.TournamentAddress.Hex(), epoch.Commitment.String()) + if err != nil { + s.Logger.Error("failed to get commitment from repository", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tournament", epoch.TournamentAddress.Hex(), + "commitment", epoch.Commitment.Hex(), "error", err) + return err + } + if commitment != nil { + s.Logger.Debug("Commitment already joined. Skipping JoinTournament", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tournament", epoch.TournamentAddress.Hex(), "commitment", epoch.Commitment.Hex()) + return nil + } + + tournament, err := itournament.NewITournament(*epoch.TournamentAddress, ethClient) + if err != nil { + s.Logger.Error("failed to bind tournament contract", "application", app.Name, + "tournament", epoch.TournamentAddress.String(), "error", err) + return err + } + + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: new(big.Int).SetUint64(mostRecentBlock), + } + bondValue, err := tournament.BondValue(callOpts) + if err != nil { + s.Logger.Error("failed to fetch tournament bond value", "application", app.Name, + "epoch_index", s.currentEpochIndex, "tournament", epoch.TournamentAddress.Hex(), + "error", err) + return err + } + + txOptsWithValue := *s.txOpts + txOptsWithValue.Value = bondValue + + // FIXME move this to constants + idx := uint64(1<<48) - 1 //nolint: mnd + leftNode, rightNode, err := merkle.RootChildrenFromProof(*epoch.MachineHash, epoch.CommitmentProof, idx) + if err != nil { + s.Logger.Error("failed to compute left and right nodes from commitment proof", "application", app.Name, "epoch_index", s.currentEpochIndex, "error", err) + return err + } + + s.Logger.Info("Joining tournament", "application", app.Name, "epoch_index", epoch.Index, "commitment", epoch.Commitment, "left_node", leftNode.String(), "right_node", rightNode.String()) + + tx, err := tournament.JoinTournament(&txOptsWithValue, *epoch.MachineHash, + asBytes32Slice(epoch.CommitmentProof), leftNode, rightNode) + if err != nil { + s.Logger.Error("failed to send join tournament transaction", "application", app.Name, + "epoch_index", s.currentEpochIndex, "error", err) + return err + } + joinTx := tx.Hash() + s.joinInFlight[app.ID] = &joinTx + return nil } + +func (s *Service) validateApplication(ctx context.Context, app *Application) error { + s.Logger.Debug("Syncing PTR tournaments", "application", app.Name) + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client ") + } + mostRecentBlock, err := ethClient.BlockNumber(ctx) + if err != nil { + s.Logger.Error("failed to fetch latest block number", "application", app.Name, "error", err) + return err + } + err = s.checkEpochs(ctx, app, mostRecentBlock) + if err != nil { + return err + } + if s.submissionEnabled { + err = s.trySettle(ctx, app, mostRecentBlock) + if err != nil { + return err + } + err = s.reactToTournament(ctx, app, mostRecentBlock) + if err != nil { + return err + } + } + return nil +} + +// hashSliceToByteSlice converts []common.Hash to [][32]byte without copying. +// This is safe because common.Hash is defined as [32]byte, so the memory layout is identical. +func hashSliceTobyteSlice(b []common.Hash) [][32]byte { + return *(*[][32]byte)(unsafe.Pointer(&b)) +} diff --git a/internal/prt/service.go b/internal/prt/service.go index ca9f9682a..81ec8d532 100644 --- a/internal/prt/service.go +++ b/internal/prt/service.go @@ -5,30 +5,45 @@ package prt import ( "context" + "errors" "fmt" + "math/big" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/merkle" + "github.com/cartesi/rollups-node/internal/config/auth" + . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/pkg/ethutil" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ) +type CreateInfo struct { + service.CreateInfo + Config config.PrtConfig + Repository repository.Repository + EthClient EthClientInterface +} + type Service struct { service.Service - repository PrtRepository - - // cached constants - pristineRootHash common.Hash - pristinePostContext []common.Hash + repository prtRepository + client EthClientInterface + submissionEnabled bool + filter ethutil.Filter + txOpts *bind.TransactOpts + currentEpochIndex uint64 + settleInFlight map[int64]*common.Hash // application.ID -> txHash + joinInFlight map[int64]*common.Hash // application.ID -> txHash } -type CreateInfo struct { - service.CreateInfo - - Config config.ValidatorConfig +const PrtConfigKey = "prt" - Repository repository.Repository +type PersistentConfig struct { + DefaultBlock DefaultBlock + ClaimSubmissionEnabled bool + ChainID uint64 } func Create(ctx context.Context, c *CreateInfo) (*Service, error) { @@ -45,13 +60,49 @@ func Create(ctx context.Context, c *CreateInfo) (*Service, error) { return nil, err } + if c.EthClient == nil { + return nil, fmt.Errorf("EthClient on prt service Create is nil") + } + chainID, err := c.EthClient.ChainID(ctx) + if err != nil { + return nil, err + } + if chainID.Uint64() != c.Config.BlockchainId { + return nil, fmt.Errorf("EthClient chainId mismatch: network %d != provided %d", + chainID.Uint64(), c.Config.BlockchainId) + } + s.repository = c.Repository if s.repository == nil { - return nil, fmt.Errorf("repository on validator service Create is nil") + return nil, fmt.Errorf("repository on prt service Create is nil") } - s.pristinePostContext = merkle.CreatePostContext() - s.pristineRootHash = s.pristinePostContext[merkle.TREE_DEPTH] + nodeConfig, err := s.setupPersistentConfig(ctx, &c.Config) + if err != nil { + return nil, err + } + if chainID.Uint64() != nodeConfig.ChainID { + return nil, fmt.Errorf("NodeConfig chainId mismatch: network %d != config %d", + chainID.Uint64(), nodeConfig.ChainID) + } + + s.client = c.EthClient + s.submissionEnabled = nodeConfig.ClaimSubmissionEnabled + s.filter = ethutil.Filter{ + MinChunkSize: ethutil.DefaultMinChunkSize, + MaxChunkSize: new(big.Int).SetUint64(c.Config.BlockchainMaxBlockRange), + Logger: s.Logger, + } + + s.settleInFlight = map[int64]*common.Hash{} + s.joinInFlight = map[int64]*common.Hash{} + + if s.submissionEnabled { + s.txOpts, err = auth.GetTransactOpts(chainID) + if err != nil { + return nil, err + } + } return s, nil } @@ -77,10 +128,40 @@ func (s *Service) Tick() []error { } return errs } -func (s *Service) Stop(b bool) []error { + +func (s *Service) Stop(_ bool) []error { return nil } -func (v *Service) String() string { - return v.Name +func (s *Service) String() string { + return s.Name +} + +func (s *Service) setupPersistentConfig( + ctx context.Context, + c *config.PrtConfig, +) (*PersistentConfig, error) { + config, err := repository.LoadNodeConfig[PersistentConfig](ctx, s.repository, PrtConfigKey) + if config == nil && errors.Is(err, repository.ErrNotFound) { + nc := NodeConfig[PersistentConfig]{ + Key: PrtConfigKey, + Value: PersistentConfig{ + DefaultBlock: c.BlockchainDefaultBlock, + ClaimSubmissionEnabled: c.FeatureClaimSubmissionEnabled, + ChainID: c.BlockchainId, + }, + } + s.Logger.Info("Initializing PRT persistent config", "config", nc.Value) + err = repository.SaveNodeConfig(ctx, s.repository, &nc) + if err != nil { + return nil, err + } + return &nc.Value, nil + } else if err == nil { + s.Logger.Info("PRT service was already configured. Using previous persistent config", "config", config.Value) + return &config.Value, nil + } + + s.Logger.Error("Could not retrieve persistent config from Database. %w", "error", err) + return nil, err } diff --git a/internal/prt/types.go b/internal/prt/types.go new file mode 100644 index 000000000..ce9454b63 --- /dev/null +++ b/internal/prt/types.go @@ -0,0 +1,62 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package prt + +import ( + "github.com/ethereum/go-ethereum/accounts/abi/bind" + + "github.com/cartesi/rollups-node/pkg/contracts/itournament" +) + +type TournamentConstants struct { + MaxLevel uint64 + Level uint64 + Log2step uint64 + Height uint64 +} + +// Interface for Tournament reading +type TournamentAdapter interface { + RetrieveCommitmentJoinedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentCommitmentJoined, error) + RetrieveMatchAdvancedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentMatchAdvanced, error) + RetrieveMatchCreatedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentMatchCreated, error) + RetrieveMatchDeletedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentMatchDeleted, error) + RetrieveNewInnerTournamentEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentNewInnerTournament, error) + RetrieveAllEvents(opts *bind.FilterOpts) (*TournamentEvents, error) + Result(opts *bind.CallOpts) (bool, [32]byte, [32]byte, error) + Constants(opts *bind.CallOpts) (TournamentConstants, error) + TimeFinished(opts *bind.CallOpts) (bool, uint64, error) +} + +// Struct to hold all events retrieved at once +type TournamentEvents struct { + CommitmentJoined []*itournament.ITournamentCommitmentJoined + MatchAdvanced []*itournament.ITournamentMatchAdvanced + MatchCreated []*itournament.ITournamentMatchCreated + MatchDeleted []*itournament.ITournamentMatchDeleted + NewInnerTournament []*itournament.ITournamentNewInnerTournament +} + +type TournamentLevel uint64 + +const ( + RootLevel TournamentLevel = iota + MiddleLevel + BottomLevel +) + +func (l TournamentLevel) String() string { + switch l { + case RootLevel: + return "root" + case MiddleLevel: + return "middle" + case BottomLevel: + return "bottom" + default: + return "unknown" + } +} + +const TournamentFailedNoWinner string = "0xb3045ef8" diff --git a/internal/prt/util.go b/internal/prt/util.go new file mode 100644 index 000000000..a2d0c3719 --- /dev/null +++ b/internal/prt/util.go @@ -0,0 +1,49 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package prt + +import ( + "errors" + "unsafe" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" +) + +type JSONRPCInfo struct { + Code int + Message string + Data any + HasCode bool + HasData bool +} + +func ExtractJsonErrorInfo(err error) (JSONRPCInfo, bool) { + var out JSONRPCInfo + if err == nil { + return out, false + } + + var e rpc.Error + if errors.As(err, &e) { + out.Code = e.ErrorCode() + out.Message = e.Error() + out.HasCode = true + } + + var de rpc.DataError + if errors.As(err, &de) { + out.Data = de.ErrorData() + out.HasData = true + if !out.HasCode { + out.Message = de.Error() + } + } + + return out, out.HasCode || out.HasData +} + +func asBytes32Slice(proof []common.Hash) [][32]byte { + return *(*[][32]byte)(unsafe.Pointer(&proof)) +} diff --git a/internal/repository/postgres/application.go b/internal/repository/postgres/application.go index 9c531cf3b..a30ce142c 100644 --- a/internal/repository/postgres/application.go +++ b/internal/repository/postgres/application.go @@ -34,10 +34,13 @@ func (r *PostgresRepository) CreateApplication( table.Application.TemplateURI, table.Application.EpochLength, table.Application.DataAvailability, + table.Application.ConsensusType, table.Application.State, table.Application.IinputboxBlock, + table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, ). VALUES( @@ -48,11 +51,14 @@ func (r *PostgresRepository) CreateApplication( app.TemplateHash, app.TemplateURI, app.EpochLength, - app.DataAvailability[:], + app.DataAvailability, + app.ConsensusType, app.State, app.IInputBoxBlock, + app.LastEpochCheckBlock, app.LastInputCheckBlock, app.LastOutputCheckBlock, + app.LastTournamentCheckBlock, app.ProcessedInputs, ). RETURNING(table.Application.ID) @@ -147,11 +153,14 @@ func (r *PostgresRepository) GetApplication( table.Application.TemplateURI, table.Application.EpochLength, table.Application.DataAvailability, + table.Application.ConsensusType, table.Application.State, table.Application.Reason, table.Application.IinputboxBlock, + table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, table.Application.CreatedAt, table.Application.UpdatedAt, @@ -194,11 +203,14 @@ func (r *PostgresRepository) GetApplication( &app.TemplateURI, &app.EpochLength, &app.DataAvailability, + &app.ConsensusType, &app.State, &app.Reason, &app.IInputBoxBlock, + &app.LastEpochCheckBlock, &app.LastInputCheckBlock, &app.LastOutputCheckBlock, + &app.LastTournamentCheckBlock, &app.ProcessedInputs, &app.CreatedAt, &app.UpdatedAt, @@ -271,11 +283,14 @@ func (r *PostgresRepository) UpdateApplication( table.Application.TemplateURI, table.Application.EpochLength, table.Application.DataAvailability, + table.Application.ConsensusType, table.Application.State, table.Application.Reason, table.Application.IinputboxBlock, + table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, ). SET( @@ -286,12 +301,15 @@ func (r *PostgresRepository) UpdateApplication( app.TemplateHash, app.TemplateURI, app.EpochLength, - app.DataAvailability[:], + app.DataAvailability, + app.ConsensusType, app.State, app.Reason, app.IInputBoxBlock, + app.LastEpochCheckBlock, app.LastInputCheckBlock, app.LastOutputCheckBlock, + app.LastTournamentCheckBlock, app.ProcessedInputs, ). WHERE(table.Application.ID.EQ(postgres.Int(app.ID))) @@ -324,25 +342,69 @@ func (r *PostgresRepository) UpdateApplicationState( return err } -func (r *PostgresRepository) UpdateEventLastCheckBlock( - ctx context.Context, - appIDs []int64, - event model.MonitoredEvent, - blockNumber uint64, -) error { - var column postgres.ColumnFloat +func getColumnForEvent(event model.MonitoredEvent) (postgres.ColumnFloat, error) { switch event { + case model.MonitoredEvent_EpochSealed: + return table.Application.LastEpochCheckBlock, nil case model.MonitoredEvent_InputAdded: - column = table.Application.LastInputCheckBlock + return table.Application.LastInputCheckBlock, nil case model.MonitoredEvent_OutputExecuted: - column = table.Application.LastOutputCheckBlock + return table.Application.LastOutputCheckBlock, nil + case model.MonitoredEvent_CommitmentJoined: + fallthrough + case model.MonitoredEvent_MatchAdvanced: + fallthrough + case model.MonitoredEvent_MatchCreated: + fallthrough + case model.MonitoredEvent_MatchDeleted: + fallthrough + case model.MonitoredEvent_NewInnerTournament: + return table.Application.LastTournamentCheckBlock, nil case model.MonitoredEvent_ClaimSubmitted: fallthrough case model.MonitoredEvent_ClaimAccepted: fallthrough default: - return fmt.Errorf("invalid monitored event type: %v", event) + return nil, fmt.Errorf("invalid monitored event type: %v", event) + } +} + +func (r *PostgresRepository) GetEventLastCheckBlock( + ctx context.Context, + appID int64, + event model.MonitoredEvent, +) (uint64, error) { + column, err := getColumnForEvent(event) + if err != nil { + return 0, err + } + + stmt := table.Application.SELECT(column).WHERE( + table.Application.ID.EQ(postgres.Int(appID)), + ) + + sqlStr, args := stmt.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var eventBlock uint64 + err = row.Scan(&eventBlock) + if errors.Is(err, sql.ErrNoRows) { + return 0, repository.ErrNotFound } + return eventBlock, err +} + +func (r *PostgresRepository) UpdateEventLastCheckBlock( + ctx context.Context, + appIDs []int64, + event model.MonitoredEvent, + blockNumber uint64, +) error { + column, err := getColumnForEvent(event) + if err != nil { + return err + } + if len(appIDs) == 0 { return nil } @@ -362,7 +424,7 @@ func (r *PostgresRepository) UpdateEventLastCheckBlock( WHERE(table.Application.ID.IN(ids...)) sqlStr, args := updateStmt.Sql() - _, err := r.db.Exec(ctx, sqlStr, args...) + _, err = r.db.Exec(ctx, sqlStr, args...) return err } @@ -468,11 +530,14 @@ func (r *PostgresRepository) ListApplications( table.Application.TemplateURI, table.Application.EpochLength, table.Application.DataAvailability, + table.Application.ConsensusType, table.Application.State, table.Application.Reason, table.Application.IinputboxBlock, + table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, table.Application.CreatedAt, table.Application.UpdatedAt, @@ -511,6 +576,9 @@ func (r *PostgresRepository) ListApplications( table.Application.DataAvailability, postgres.Int(1), postgres.Int(4), // nolint: mnd ).EQ(postgres.Bytea(f.DataAvailability[:]))) } + if f.ConsensusType != nil { + conditions = append(conditions, table.Application.ConsensusType.EQ(postgres.NewEnumValue(f.ConsensusType.String()))) + } if len(conditions) > 0 { sel = sel.WHERE(postgres.AND(conditions...)) @@ -551,11 +619,14 @@ func (r *PostgresRepository) ListApplications( &app.TemplateURI, &app.EpochLength, &app.DataAvailability, + &app.ConsensusType, &app.State, &app.Reason, &app.IInputBoxBlock, + &app.LastEpochCheckBlock, &app.LastInputCheckBlock, &app.LastOutputCheckBlock, + &app.LastTournamentCheckBlock, &app.ProcessedInputs, &app.CreatedAt, &app.UpdatedAt, diff --git a/internal/repository/postgres/bulk.go b/internal/repository/postgres/bulk.go index 072f67d15..1402a153c 100644 --- a/internal/repository/postgres/bulk.go +++ b/internal/repository/postgres/bulk.go @@ -8,6 +8,7 @@ import ( "database/sql" "errors" "fmt" + "unsafe" "github.com/ethereum/go-ethereum/common" "github.com/go-jet/jet/v2/postgres" @@ -18,10 +19,16 @@ import ( "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" ) -func encodeSiblings(outputHashesSiblings []common.Hash) ([]byte, error) { +// byteSliceToHashSlice converts [][32]byte to []common.Hash without copying. +// This is safe because common.Hash is defined as [32]byte, so the memory layout is identical. +func byteSliceToHashSlice(b [][32]byte) []common.Hash { + return *(*[]common.Hash)(unsafe.Pointer(&b)) +} + +func encodeSiblings(siblings []common.Hash) ([]byte, error) { // 1) Make a slice of []byte - arr := make([][]byte, 0, len(outputHashesSiblings)) - for _, h := range outputHashesSiblings { + arr := make([][]byte, 0, len(siblings)) + for _, h := range siblings { // h is [32]byte // we must copy it into a slice of bytes copyH := make([]byte, len(h)) @@ -30,13 +37,13 @@ func encodeSiblings(outputHashesSiblings []common.Hash) ([]byte, error) { } // 2) Use pgtype.ByteaArray and call Set with [][]byte - var siblings pgtype.ByteaArray - if err := siblings.Set(arr); err != nil { + var pgSiblings pgtype.ByteaArray + if err := pgSiblings.Set(arr); err != nil { return nil, fmt.Errorf("failed to set ByteaArray: %w", err) } // 3) Encode it as text (the Postgres array string, e.g. '{\\x...,\\x..., ...}') - encoded, err := siblings.EncodeText(nil, nil) + encoded, err := pgSiblings.EncodeText(nil, nil) if err != nil { return nil, fmt.Errorf("failed to encode ByteaArray: %w", err) } @@ -67,8 +74,7 @@ func getOutputNextIndex( var currentIndex uint64 err := tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) if err != nil { - err = fmt.Errorf("failed to get the next output index: %w", err) - return 0, errors.Join(err, tx.Rollback(ctx)) + return 0, fmt.Errorf("failed to get the next output index: %w", err) } return currentIndex, nil } @@ -95,8 +101,33 @@ func getReportNextIndex( var currentIndex uint64 err := tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) if err != nil { - err = fmt.Errorf("failed to get the next report index: %w", err) - return 0, errors.Join(err, tx.Rollback(ctx)) + return 0, fmt.Errorf("failed to get the next report index: %w", err) + } + return currentIndex, nil +} + +func getStateHashNextIndex( + ctx context.Context, + tx pgx.Tx, + appID int64, + epochIndex uint64, +) (uint64, error) { + + query := table.StateHashes.SELECT( + postgres.COALESCE( + postgres.Float(1).ADD(postgres.MAXf(table.StateHashes.Index)), + postgres.Float(0), + ), + ).WHERE( + table.StateHashes.InputEpochApplicationID.EQ(postgres.Int64(appID)). + AND(table.StateHashes.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))), + ) + + queryStr, args := query.Sql() + var currentIndex uint64 + err := tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) + if err != nil { + return 0, fmt.Errorf("failed to get the next state hash index: %w", err) } return currentIndex, nil } @@ -135,7 +166,7 @@ func insertOutputs( sqlStr, args := stmt.Sql() _, err = tx.Exec(ctx, sqlStr, args...) if err != nil { - return errors.Join(err, tx.Rollback(ctx)) + return err } return nil } @@ -174,7 +205,60 @@ func insertReports( sqlStr, args := stmt.Sql() _, err = tx.Exec(ctx, sqlStr, args...) if err != nil { - return errors.Join(err, tx.Rollback(ctx)) + return err + } + return nil +} + +func insertStateHashes( + ctx context.Context, + tx pgx.Tx, + appID int64, + epochIndex uint64, + inputIndex uint64, + hashes [][32]byte, + machineHash common.Hash, + remainingMetaCycles uint64, +) error { + + nextIndex, err := getStateHashNextIndex(ctx, tx, appID, epochIndex) + if err != nil { + return err + } + + stmt := table.StateHashes.INSERT( + table.StateHashes.InputEpochApplicationID, + table.StateHashes.EpochIndex, + table.StateHashes.InputIndex, + table.StateHashes.Index, + table.StateHashes.MachineHash, + table.StateHashes.Repetitions, + ) + + for i, h := range hashes { + stmt = stmt.VALUES( + appID, + epochIndex, + inputIndex, + nextIndex+uint64(i), + h[:], + 1, + ) + } + + stmt = stmt.VALUES( + appID, + epochIndex, + inputIndex, + nextIndex+uint64(len(hashes)), + machineHash[:], + remainingMetaCycles, + ) + + sqlStr, args := stmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return err } return nil } @@ -216,6 +300,47 @@ func updateInput( return nil } +func updateEpochOutputsMerkleProof( + ctx context.Context, + tx pgx.Tx, + appID int64, + epochIndex uint64, + outputsHash common.Hash, + outputsHashProof []common.Hash, + machineHash common.Hash, +) error { + + proof, err := encodeSiblings(outputsHashProof) + if err != nil { + return fmt.Errorf("failed to serialize epoch '%d' OutputsMerkleProof. %w", epochIndex, err) + } + updStmt := table.Epoch. + UPDATE( + table.Epoch.OutputsMerkleRoot, + table.Epoch.OutputsMerkleProof, + table.Epoch.MachineHash, + ). + SET( + outputsHash, + proof, + machineHash, + ). + WHERE( + table.Epoch.ApplicationID.EQ(postgres.Int64(appID)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + func updateApp( ctx context.Context, tx pgx.Tx, @@ -258,23 +383,36 @@ func (r *PostgresRepository) StoreAdvanceResult( if res.Status == model.InputCompletionStatus_Accepted { err = insertOutputs(ctx, tx, appID, res.InputIndex, res.Outputs) if err != nil { - return err + return errors.Join(err, tx.Rollback(ctx)) } err = insertReports(ctx, tx, appID, res.InputIndex, res.Reports) if err != nil { - return err + return errors.Join(err, tx.Rollback(ctx)) } } - err = updateInput(ctx, tx, appID, res.InputIndex, res.Status, res.OutputsHash, *res.MachineHash) + if res.IsDaveConsensus { + err = insertStateHashes(ctx, tx, appID, res.EpochIndex, res.InputIndex, res.Hashes, res.MachineHash, res.RemainingMetaCycles) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + } + + err = updateInput(ctx, tx, appID, res.InputIndex, res.Status, res.OutputsHash, res.MachineHash) if err != nil { - return err + return errors.Join(err, tx.Rollback(ctx)) + } + + err = updateEpochOutputsMerkleProof(ctx, tx, appID, res.EpochIndex, res.OutputsHash, + byteSliceToHashSlice(res.OutputsHashProof), res.MachineHash) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) } err = updateApp(ctx, tx, appID, res.InputIndex) if err != nil { - return err + return errors.Join(err, tx.Rollback(ctx)) } err = tx.Commit(ctx) @@ -291,13 +429,23 @@ func updateEpochClaim( e *model.Epoch, ) error { + commitmentProof, err := encodeSiblings(e.CommitmentProof) + if err != nil { + return errors.Join( + fmt.Errorf("failed to serialize epoch '%d' OutputsMerkleProof. %w", e.Index, err), + tx.Rollback(ctx), + ) + } + updStmt := table.Epoch. UPDATE( - table.Epoch.ClaimHash, + table.Epoch.Commitment, + table.Epoch.CommitmentProof, table.Epoch.Status, ). SET( - e.ClaimHash, + e.Commitment, + commitmentProof, postgres.NewEnumValue(model.EpochStatus_ClaimComputed.String()), ). WHERE( @@ -398,26 +546,223 @@ func (r *PostgresRepository) StoreClaimAndProofs(ctx context.Context, epoch *mod return nil } -func (r *PostgresRepository) UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error { - updStmt := table.Input. +func insertCommitments(ctx context.Context, tx pgx.Tx, appID int64, commitments []*model.Commitment) error { + if len(commitments) < 1 { + return nil + } + + stmt := table.Commitments.INSERT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + ) + for _, c := range commitments { + stmt = stmt.VALUES( + appID, + c.EpochIndex, + c.TournamentAddress, + c.Commitment, + c.FinalStateHash, + c.SubmitterAddress, + c.BlockNumber, + c.TxHash, + ) + } + + sqlStr, args := stmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func insertMatches(ctx context.Context, tx pgx.Tx, appID int64, matches []*model.Match) error { + if len(matches) < 1 { + return nil + } + + stmt := table.Matches.INSERT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ) + for _, m := range matches { + stmt = stmt.VALUES( + appID, + m.EpochIndex, + m.TournamentAddress, + m.IDHash, + m.CommitmentOne, + m.CommitmentTwo, + m.LeftOfTwo, + m.BlockNumber, + m.TxHash, + m.Winner, + m.DeletionReason, + m.DeletionBlockNumber, + m.DeletionTxHash, + ) + } + + sqlStr, args := stmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func insertMatchAdvanced(ctx context.Context, tx pgx.Tx, appID int64, matchAdvanced []*model.MatchAdvanced) error { + if len(matchAdvanced) < 1 { + return nil + } + + stmt := table.MatchAdvances.INSERT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + ) + for _, ma := range matchAdvanced { + stmt = stmt.VALUES( + appID, + ma.EpochIndex, + ma.TournamentAddress, + ma.IDHash, + ma.OtherParent, + ma.LeftNode, + ma.BlockNumber, + ma.TxHash, + ) + } + + sqlStr, args := stmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func updateMatches(ctx context.Context, tx pgx.Tx, appID int64, matches []*model.Match) error { + for _, m := range matches { + updStmt := table.Matches.UPDATE( + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ).SET( + m.Winner, + m.DeletionReason, + m.DeletionBlockNumber, + m.DeletionTxHash, + ).WHERE( + table.Matches.ApplicationID.EQ(postgres.Int64(appID)). + AND(table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)))). + AND(table.Matches.TournamentAddress.EQ(postgres.Bytea(m.TournamentAddress.Bytes()))). + AND(table.Matches.IDHash.EQ(postgres.Bytea(m.IDHash.Bytes()))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + if cmd.RowsAffected() == 0 { + return errors.Join( + fmt.Errorf("no match found for update: app %d, epoch %d, tournament %s, idHash %s", m.ApplicationID, m.EpochIndex, m.TournamentAddress.Hex(), m.IDHash.Hex()), + tx.Rollback(ctx), + ) + } + } + return nil +} + +func updateLastProcessedBlock(ctx context.Context, tx pgx.Tx, appID int64, lastProcessedBlock uint64) error { + lastBlock := postgres.RawFloat(fmt.Sprintf("%d", lastProcessedBlock)) + appUpdateStmt := table.Application. UPDATE( - table.Input.SnapshotURI, + table.Application.LastTournamentCheckBlock, ). SET( - snapshotURI, + lastBlock, ). - WHERE( - table.Input.EpochApplicationID.EQ(postgres.Int64(appId)). - AND(table.Input.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", inputIndex)))), - ) + WHERE(postgres.AND( + table.Application.ID.EQ(postgres.Int64(appID)), + table.Application.LastTournamentCheckBlock.LT(lastBlock), + )) - sqlStr, args := updStmt.Sql() - cmd, err := r.db.Exec(ctx, sqlStr, args...) + sqlStr, args := appUpdateStmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func (r *PostgresRepository) StoreTournamentEvents( + ctx context.Context, + appID int64, + commitments []*model.Commitment, + matches []*model.Match, + matchAdvanced []*model.MatchAdvanced, + matchDeleted []*model.Match, + lastProcessedBlock uint64, +) error { + tx, err := r.db.Begin(ctx) if err != nil { return err } - if cmd.RowsAffected() == 0 { - return fmt.Errorf("no input found with appId %d and index %d", appId, inputIndex) + + err = insertCommitments(ctx, tx, appID, commitments) + if err != nil { + return err + } + + err = insertMatches(ctx, tx, appID, matches) + if err != nil { + return err + } + + err = insertMatchAdvanced(ctx, tx, appID, matchAdvanced) + if err != nil { + return err + } + + err = updateMatches(ctx, tx, appID, matchDeleted) + if err != nil { + return err + } + + err = updateLastProcessedBlock(ctx, tx, appID, lastProcessedBlock) + if err != nil { + return err } + + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil } diff --git a/internal/repository/postgres/claimer.go b/internal/repository/postgres/claimer.go index 6bbe5aaa8..69c63855f 100644 --- a/internal/repository/postgres/claimer.go +++ b/internal/repository/postgres/claimer.go @@ -12,6 +12,7 @@ import ( "github.com/jackc/pgx/v5" "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/enum" "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" ) @@ -41,7 +42,7 @@ func (r *PostgresRepository) selectOldestClaimPerApp( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, - table.Epoch.ClaimHash, + table.Epoch.OutputsMerkleRoot, table.Epoch.ClaimTransactionHash, table.Epoch.Status, table.Epoch.VirtualIndex, @@ -74,7 +75,11 @@ func (r *PostgresRepository) selectOldestClaimPerApp( table.Epoch.ApplicationID.EQ(table.Application.ID), ), ). - WHERE(table.Epoch.Status.EQ(postgres.NewEnumValue(epochStatus.String())).AND(table.Application.State.EQ(postgres.NewEnumValue(model.ApplicationState_Enabled.String())))). + WHERE( + table.Epoch.Status.EQ(postgres.NewEnumValue(epochStatus.String())). + AND(table.Application.State.EQ(enum.ApplicationState.Enabled)). + AND(table.Application.ConsensusType.NOT_EQ(enum.Consensus.Prt)), + ). ORDER_BY( table.Epoch.ApplicationID, table.Epoch.Index.ASC(), @@ -97,7 +102,7 @@ func (r *PostgresRepository) selectOldestClaimPerApp( &epoch.Index, &epoch.FirstBlock, &epoch.LastBlock, - &epoch.ClaimHash, + &epoch.OutputsMerkleRoot, &epoch.ClaimTransactionHash, &epoch.Status, &epoch.VirtualIndex, @@ -152,7 +157,7 @@ func (r *PostgresRepository) selectNewestAcceptedClaimPerApp( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, - table.Epoch.ClaimHash, + table.Epoch.OutputsMerkleRoot, table.Epoch.ClaimTransactionHash, table.Epoch.Status, table.Epoch.VirtualIndex, @@ -167,7 +172,10 @@ func (r *PostgresRepository) selectNewestAcceptedClaimPerApp( table.Epoch.ApplicationID.EQ(table.Application.ID), ), ). - WHERE(expr.AND(table.Application.State.EQ(postgres.NewEnumValue(model.ApplicationState_Enabled.String())))). + WHERE( + expr.AND(table.Application.State.EQ(enum.ApplicationState.Enabled)). + AND(table.Application.ConsensusType.NOT_EQ(enum.Consensus.Prt)), + ). ORDER_BY( table.Epoch.ApplicationID, table.Epoch.Index.DESC(), @@ -189,7 +197,7 @@ func (r *PostgresRepository) selectNewestAcceptedClaimPerApp( &epoch.Index, &epoch.FirstBlock, &epoch.LastBlock, - &epoch.ClaimHash, + &epoch.OutputsMerkleRoot, &epoch.ClaimTransactionHash, &epoch.Status, &epoch.VirtualIndex, diff --git a/internal/repository/postgres/commitment.go b/internal/repository/postgres/commitment.go new file mode 100644 index 000000000..e5abfe75e --- /dev/null +++ b/internal/repository/postgres/commitment.go @@ -0,0 +1,222 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ CommitmentRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateCommitment( + ctx context.Context, + nameOrAddress string, + c *model.Commitment, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", c.EpochIndex)), + postgres.Bytea(c.TournamentAddress.Bytes()), + postgres.Bytea(c.Commitment.Bytes()), + postgres.Bytea(c.FinalStateHash.Bytes()), + postgres.Bytea(c.SubmitterAddress.Bytes()), + postgres.RawFloat(fmt.Sprintf("%d", c.BlockNumber)), + postgres.Bytea(c.TxHash.Bytes()), + ).WHERE( + whereClause, + ) + + insertStmt := table.Commitments.INSERT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) GetCommitment( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + commitmentHex string, +) (*model.Commitment, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddr := common.HexToAddress(tournamentAddress) + commitment := common.HexToHash(commitmentHex) + + sel := table.Commitments. + SELECT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + table.Commitments.CreatedAt, + table.Commitments.UpdatedAt, + ). + FROM( + table.Commitments. + INNER_JOIN(table.Application, + table.Commitments.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Commitments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))). + AND(table.Commitments.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))). + AND(table.Commitments.Commitment.EQ(postgres.Bytea(commitment.Bytes()))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var c model.Commitment + err = row.Scan( + &c.ApplicationID, + &c.EpochIndex, + &c.TournamentAddress, + &c.Commitment, + &c.FinalStateHash, + &c.SubmitterAddress, + &c.BlockNumber, + &c.TxHash, + &c.CreatedAt, + &c.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &c, nil +} + +func (r *PostgresRepository) ListCommitments( + ctx context.Context, + nameOrAddress string, + f repository.CommitmentFilter, + p repository.Pagination, + descending bool, +) ([]*model.Commitment, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.Commitments. + SELECT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + table.Commitments.CreatedAt, + table.Commitments.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.Commitments. + INNER_JOIN(table.Application, + table.Commitments.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.Commitments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + if f.TournamentAddress != nil { + tournamentAddr := common.HexToAddress(*f.TournamentAddress) + conditions = append(conditions, table.Commitments.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.Commitments.EpochIndex.DESC()) + } else { + sel = sel.ORDER_BY(table.Commitments.EpochIndex.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var commitments []*model.Commitment + var total uint64 + for rows.Next() { + var c model.Commitment + err := rows.Scan( + &c.ApplicationID, + &c.EpochIndex, + &c.TournamentAddress, + &c.Commitment, + &c.FinalStateHash, + &c.SubmitterAddress, + &c.BlockNumber, + &c.TxHash, + &c.CreatedAt, + &c.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + commitments = append(commitments, &c) + } + + return commitments, total, nil +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/consensus.go b/internal/repository/postgres/db/rollupsdb/public/enum/consensus.go new file mode 100644 index 000000000..2e52efa8f --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/consensus.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var Consensus = &struct { + Authority postgres.StringExpression + Quorum postgres.StringExpression + Prt postgres.StringExpression +}{ + Authority: postgres.NewEnumValue("AUTHORITY"), + Quorum: postgres.NewEnumValue("QUORUM"), + Prt: postgres.NewEnumValue("PRT"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go b/internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go new file mode 100644 index 000000000..45b52d52b --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go @@ -0,0 +1,22 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var MatchDeletionReason = &struct { + Step postgres.StringExpression + Timeout postgres.StringExpression + ChildTournament postgres.StringExpression + NotDeleted postgres.StringExpression +}{ + Step: postgres.NewEnumValue("STEP"), + Timeout: postgres.NewEnumValue("TIMEOUT"), + ChildTournament: postgres.NewEnumValue("CHILD_TOURNAMENT"), + NotDeleted: postgres.NewEnumValue("NOT_DELETED"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go b/internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go new file mode 100644 index 000000000..29be1aa40 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var WinnerCommitment = &struct { + None postgres.StringExpression + One postgres.StringExpression + Two postgres.StringExpression +}{ + None: postgres.NewEnumValue("NONE"), + One: postgres.NewEnumValue("ONE"), + Two: postgres.NewEnumValue("TWO"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/application.go b/internal/repository/postgres/db/rollupsdb/public/table/application.go index 0f79630a0..851f7cd6b 100644 --- a/internal/repository/postgres/db/rollupsdb/public/table/application.go +++ b/internal/repository/postgres/db/rollupsdb/public/table/application.go @@ -17,23 +17,26 @@ type applicationTable struct { postgres.Table // Columns - ID postgres.ColumnInteger - Name postgres.ColumnString - IapplicationAddress postgres.ColumnString - IconsensusAddress postgres.ColumnString - IinputboxAddress postgres.ColumnString - IinputboxBlock postgres.ColumnFloat - TemplateHash postgres.ColumnString - TemplateURI postgres.ColumnString - EpochLength postgres.ColumnFloat - DataAvailability postgres.ColumnString - State postgres.ColumnString - Reason postgres.ColumnString - LastInputCheckBlock postgres.ColumnFloat - LastOutputCheckBlock postgres.ColumnFloat - ProcessedInputs postgres.ColumnFloat - CreatedAt postgres.ColumnTimestampz - UpdatedAt postgres.ColumnTimestampz + ID postgres.ColumnInteger + Name postgres.ColumnString + IapplicationAddress postgres.ColumnString + IconsensusAddress postgres.ColumnString + IinputboxAddress postgres.ColumnString + IinputboxBlock postgres.ColumnFloat + TemplateHash postgres.ColumnString + TemplateURI postgres.ColumnString + EpochLength postgres.ColumnFloat + DataAvailability postgres.ColumnString + ConsensusType postgres.ColumnString + State postgres.ColumnString + Reason postgres.ColumnString + LastEpochCheckBlock postgres.ColumnFloat + LastInputCheckBlock postgres.ColumnFloat + LastOutputCheckBlock postgres.ColumnFloat + LastTournamentCheckBlock postgres.ColumnFloat + ProcessedInputs postgres.ColumnFloat + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz AllColumns postgres.ColumnList MutableColumns postgres.ColumnList @@ -74,48 +77,54 @@ func newApplicationTable(schemaName, tableName, alias string) *ApplicationTable func newApplicationTableImpl(schemaName, tableName, alias string) applicationTable { var ( - IDColumn = postgres.IntegerColumn("id") - NameColumn = postgres.StringColumn("name") - IapplicationAddressColumn = postgres.StringColumn("iapplication_address") - IconsensusAddressColumn = postgres.StringColumn("iconsensus_address") - IinputboxAddressColumn = postgres.StringColumn("iinputbox_address") - IinputboxBlockColumn = postgres.FloatColumn("iinputbox_block") - TemplateHashColumn = postgres.StringColumn("template_hash") - TemplateURIColumn = postgres.StringColumn("template_uri") - EpochLengthColumn = postgres.FloatColumn("epoch_length") - DataAvailabilityColumn = postgres.StringColumn("data_availability") - StateColumn = postgres.StringColumn("state") - ReasonColumn = postgres.StringColumn("reason") - LastInputCheckBlockColumn = postgres.FloatColumn("last_input_check_block") - LastOutputCheckBlockColumn = postgres.FloatColumn("last_output_check_block") - ProcessedInputsColumn = postgres.FloatColumn("processed_inputs") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - allColumns = postgres.ColumnList{IDColumn, NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, StateColumn, ReasonColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} - mutableColumns = postgres.ColumnList{NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, StateColumn, ReasonColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} + IDColumn = postgres.IntegerColumn("id") + NameColumn = postgres.StringColumn("name") + IapplicationAddressColumn = postgres.StringColumn("iapplication_address") + IconsensusAddressColumn = postgres.StringColumn("iconsensus_address") + IinputboxAddressColumn = postgres.StringColumn("iinputbox_address") + IinputboxBlockColumn = postgres.FloatColumn("iinputbox_block") + TemplateHashColumn = postgres.StringColumn("template_hash") + TemplateURIColumn = postgres.StringColumn("template_uri") + EpochLengthColumn = postgres.FloatColumn("epoch_length") + DataAvailabilityColumn = postgres.StringColumn("data_availability") + ConsensusTypeColumn = postgres.StringColumn("consensus_type") + StateColumn = postgres.StringColumn("state") + ReasonColumn = postgres.StringColumn("reason") + LastEpochCheckBlockColumn = postgres.FloatColumn("last_epoch_check_block") + LastInputCheckBlockColumn = postgres.FloatColumn("last_input_check_block") + LastOutputCheckBlockColumn = postgres.FloatColumn("last_output_check_block") + LastTournamentCheckBlockColumn = postgres.FloatColumn("last_tournament_check_block") + ProcessedInputsColumn = postgres.FloatColumn("processed_inputs") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{IDColumn, NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, ConsensusTypeColumn, StateColumn, ReasonColumn, LastEpochCheckBlockColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, LastTournamentCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, ConsensusTypeColumn, StateColumn, ReasonColumn, LastEpochCheckBlockColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, LastTournamentCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} ) return applicationTable{ Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), //Columns - ID: IDColumn, - Name: NameColumn, - IapplicationAddress: IapplicationAddressColumn, - IconsensusAddress: IconsensusAddressColumn, - IinputboxAddress: IinputboxAddressColumn, - IinputboxBlock: IinputboxBlockColumn, - TemplateHash: TemplateHashColumn, - TemplateURI: TemplateURIColumn, - EpochLength: EpochLengthColumn, - DataAvailability: DataAvailabilityColumn, - State: StateColumn, - Reason: ReasonColumn, - LastInputCheckBlock: LastInputCheckBlockColumn, - LastOutputCheckBlock: LastOutputCheckBlockColumn, - ProcessedInputs: ProcessedInputsColumn, - CreatedAt: CreatedAtColumn, - UpdatedAt: UpdatedAtColumn, + ID: IDColumn, + Name: NameColumn, + IapplicationAddress: IapplicationAddressColumn, + IconsensusAddress: IconsensusAddressColumn, + IinputboxAddress: IinputboxAddressColumn, + IinputboxBlock: IinputboxBlockColumn, + TemplateHash: TemplateHashColumn, + TemplateURI: TemplateURIColumn, + EpochLength: EpochLengthColumn, + DataAvailability: DataAvailabilityColumn, + ConsensusType: ConsensusTypeColumn, + State: StateColumn, + Reason: ReasonColumn, + LastEpochCheckBlock: LastEpochCheckBlockColumn, + LastInputCheckBlock: LastInputCheckBlockColumn, + LastOutputCheckBlock: LastOutputCheckBlockColumn, + LastTournamentCheckBlock: LastTournamentCheckBlockColumn, + ProcessedInputs: ProcessedInputsColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, AllColumns: allColumns, MutableColumns: mutableColumns, diff --git a/internal/repository/postgres/db/rollupsdb/public/table/commitments.go b/internal/repository/postgres/db/rollupsdb/public/table/commitments.go new file mode 100644 index 000000000..b67772afa --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/commitments.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Commitments = newCommitmentsTable("public", "commitments", "") + +type commitmentsTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + TournamentAddress postgres.ColumnString + Commitment postgres.ColumnString + FinalStateHash postgres.ColumnString + SubmitterAddress postgres.ColumnString + BlockNumber postgres.ColumnFloat + TxHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type CommitmentsTable struct { + commitmentsTable + + EXCLUDED commitmentsTable +} + +// AS creates new CommitmentsTable with assigned alias +func (a CommitmentsTable) AS(alias string) *CommitmentsTable { + return newCommitmentsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new CommitmentsTable with assigned schema name +func (a CommitmentsTable) FromSchema(schemaName string) *CommitmentsTable { + return newCommitmentsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new CommitmentsTable with assigned table prefix +func (a CommitmentsTable) WithPrefix(prefix string) *CommitmentsTable { + return newCommitmentsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new CommitmentsTable with assigned table suffix +func (a CommitmentsTable) WithSuffix(suffix string) *CommitmentsTable { + return newCommitmentsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newCommitmentsTable(schemaName, tableName, alias string) *CommitmentsTable { + return &CommitmentsTable{ + commitmentsTable: newCommitmentsTableImpl(schemaName, tableName, alias), + EXCLUDED: newCommitmentsTableImpl("", "excluded", ""), + } +} + +func newCommitmentsTableImpl(schemaName, tableName, alias string) commitmentsTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + TournamentAddressColumn = postgres.StringColumn("tournament_address") + CommitmentColumn = postgres.StringColumn("commitment") + FinalStateHashColumn = postgres.StringColumn("final_state_hash") + SubmitterAddressColumn = postgres.StringColumn("submitter_address") + BlockNumberColumn = postgres.FloatColumn("block_number") + TxHashColumn = postgres.StringColumn("tx_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, TournamentAddressColumn, CommitmentColumn, FinalStateHashColumn, SubmitterAddressColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{FinalStateHashColumn, SubmitterAddressColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return commitmentsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + TournamentAddress: TournamentAddressColumn, + Commitment: CommitmentColumn, + FinalStateHash: FinalStateHashColumn, + SubmitterAddress: SubmitterAddressColumn, + BlockNumber: BlockNumberColumn, + TxHash: TxHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/epoch.go b/internal/repository/postgres/db/rollupsdb/public/table/epoch.go index 8fd8cc12c..5e7d95d7d 100644 --- a/internal/repository/postgres/db/rollupsdb/public/table/epoch.go +++ b/internal/repository/postgres/db/rollupsdb/public/table/epoch.go @@ -21,7 +21,14 @@ type epochTable struct { Index postgres.ColumnFloat FirstBlock postgres.ColumnFloat LastBlock postgres.ColumnFloat - ClaimHash postgres.ColumnString + InputIndexLowerBound postgres.ColumnFloat + InputIndexUpperBound postgres.ColumnFloat + MachineHash postgres.ColumnString + OutputsMerkleRoot postgres.ColumnString + OutputsMerkleProof postgres.ColumnString + Commitment postgres.ColumnString + CommitmentProof postgres.ColumnString + TournamentAddress postgres.ColumnString ClaimTransactionHash postgres.ColumnString Status postgres.ColumnString VirtualIndex postgres.ColumnFloat @@ -71,14 +78,21 @@ func newEpochTableImpl(schemaName, tableName, alias string) epochTable { IndexColumn = postgres.FloatColumn("index") FirstBlockColumn = postgres.FloatColumn("first_block") LastBlockColumn = postgres.FloatColumn("last_block") - ClaimHashColumn = postgres.StringColumn("claim_hash") + InputIndexLowerBoundColumn = postgres.FloatColumn("input_index_lower_bound") + InputIndexUpperBoundColumn = postgres.FloatColumn("input_index_upper_bound") + MachineHashColumn = postgres.StringColumn("machine_hash") + OutputsMerkleRootColumn = postgres.StringColumn("outputs_merkle_root") + OutputsMerkleProofColumn = postgres.StringColumn("outputs_merkle_proof") + CommitmentColumn = postgres.StringColumn("commitment") + CommitmentProofColumn = postgres.StringColumn("commitment_proof") + TournamentAddressColumn = postgres.StringColumn("tournament_address") ClaimTransactionHashColumn = postgres.StringColumn("claim_transaction_hash") StatusColumn = postgres.StringColumn("status") VirtualIndexColumn = postgres.FloatColumn("virtual_index") CreatedAtColumn = postgres.TimestampzColumn("created_at") UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - allColumns = postgres.ColumnList{ApplicationIDColumn, IndexColumn, FirstBlockColumn, LastBlockColumn, ClaimHashColumn, ClaimTransactionHashColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} - mutableColumns = postgres.ColumnList{FirstBlockColumn, LastBlockColumn, ClaimHashColumn, ClaimTransactionHashColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} + allColumns = postgres.ColumnList{ApplicationIDColumn, IndexColumn, FirstBlockColumn, LastBlockColumn, InputIndexLowerBoundColumn, InputIndexUpperBoundColumn, MachineHashColumn, OutputsMerkleRootColumn, OutputsMerkleProofColumn, CommitmentColumn, CommitmentProofColumn, TournamentAddressColumn, ClaimTransactionHashColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{FirstBlockColumn, LastBlockColumn, InputIndexLowerBoundColumn, InputIndexUpperBoundColumn, MachineHashColumn, OutputsMerkleRootColumn, OutputsMerkleProofColumn, CommitmentColumn, CommitmentProofColumn, TournamentAddressColumn, ClaimTransactionHashColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} ) return epochTable{ @@ -89,7 +103,14 @@ func newEpochTableImpl(schemaName, tableName, alias string) epochTable { Index: IndexColumn, FirstBlock: FirstBlockColumn, LastBlock: LastBlockColumn, - ClaimHash: ClaimHashColumn, + InputIndexLowerBound: InputIndexLowerBoundColumn, + InputIndexUpperBound: InputIndexUpperBoundColumn, + MachineHash: MachineHashColumn, + OutputsMerkleRoot: OutputsMerkleRootColumn, + OutputsMerkleProof: OutputsMerkleProofColumn, + Commitment: CommitmentColumn, + CommitmentProof: CommitmentProofColumn, + TournamentAddress: TournamentAddressColumn, ClaimTransactionHash: ClaimTransactionHashColumn, Status: StatusColumn, VirtualIndex: VirtualIndexColumn, diff --git a/internal/repository/postgres/db/rollupsdb/public/table/match_advances.go b/internal/repository/postgres/db/rollupsdb/public/table/match_advances.go new file mode 100644 index 000000000..4c1340e12 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/match_advances.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var MatchAdvances = newMatchAdvancesTable("public", "match_advances", "") + +type matchAdvancesTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + TournamentAddress postgres.ColumnString + IDHash postgres.ColumnString + OtherParent postgres.ColumnString + LeftNode postgres.ColumnString + BlockNumber postgres.ColumnFloat + TxHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MatchAdvancesTable struct { + matchAdvancesTable + + EXCLUDED matchAdvancesTable +} + +// AS creates new MatchAdvancesTable with assigned alias +func (a MatchAdvancesTable) AS(alias string) *MatchAdvancesTable { + return newMatchAdvancesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MatchAdvancesTable with assigned schema name +func (a MatchAdvancesTable) FromSchema(schemaName string) *MatchAdvancesTable { + return newMatchAdvancesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MatchAdvancesTable with assigned table prefix +func (a MatchAdvancesTable) WithPrefix(prefix string) *MatchAdvancesTable { + return newMatchAdvancesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MatchAdvancesTable with assigned table suffix +func (a MatchAdvancesTable) WithSuffix(suffix string) *MatchAdvancesTable { + return newMatchAdvancesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMatchAdvancesTable(schemaName, tableName, alias string) *MatchAdvancesTable { + return &MatchAdvancesTable{ + matchAdvancesTable: newMatchAdvancesTableImpl(schemaName, tableName, alias), + EXCLUDED: newMatchAdvancesTableImpl("", "excluded", ""), + } +} + +func newMatchAdvancesTableImpl(schemaName, tableName, alias string) matchAdvancesTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + TournamentAddressColumn = postgres.StringColumn("tournament_address") + IDHashColumn = postgres.StringColumn("id_hash") + OtherParentColumn = postgres.StringColumn("other_parent") + LeftNodeColumn = postgres.StringColumn("left_node") + BlockNumberColumn = postgres.FloatColumn("block_number") + TxHashColumn = postgres.StringColumn("tx_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, TournamentAddressColumn, IDHashColumn, OtherParentColumn, LeftNodeColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{LeftNodeColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return matchAdvancesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + TournamentAddress: TournamentAddressColumn, + IDHash: IDHashColumn, + OtherParent: OtherParentColumn, + LeftNode: LeftNodeColumn, + BlockNumber: BlockNumberColumn, + TxHash: TxHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/matches.go b/internal/repository/postgres/db/rollupsdb/public/table/matches.go new file mode 100644 index 000000000..dfdaa2e22 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/matches.go @@ -0,0 +1,117 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Matches = newMatchesTable("public", "matches", "") + +type matchesTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + TournamentAddress postgres.ColumnString + IDHash postgres.ColumnString + CommitmentOne postgres.ColumnString + CommitmentTwo postgres.ColumnString + LeftOfTwo postgres.ColumnString + BlockNumber postgres.ColumnFloat + TxHash postgres.ColumnString + Winner postgres.ColumnString + DeletionReason postgres.ColumnString + DeletionBlockNumber postgres.ColumnFloat + DeletionTxHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MatchesTable struct { + matchesTable + + EXCLUDED matchesTable +} + +// AS creates new MatchesTable with assigned alias +func (a MatchesTable) AS(alias string) *MatchesTable { + return newMatchesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MatchesTable with assigned schema name +func (a MatchesTable) FromSchema(schemaName string) *MatchesTable { + return newMatchesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MatchesTable with assigned table prefix +func (a MatchesTable) WithPrefix(prefix string) *MatchesTable { + return newMatchesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MatchesTable with assigned table suffix +func (a MatchesTable) WithSuffix(suffix string) *MatchesTable { + return newMatchesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMatchesTable(schemaName, tableName, alias string) *MatchesTable { + return &MatchesTable{ + matchesTable: newMatchesTableImpl(schemaName, tableName, alias), + EXCLUDED: newMatchesTableImpl("", "excluded", ""), + } +} + +func newMatchesTableImpl(schemaName, tableName, alias string) matchesTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + TournamentAddressColumn = postgres.StringColumn("tournament_address") + IDHashColumn = postgres.StringColumn("id_hash") + CommitmentOneColumn = postgres.StringColumn("commitment_one") + CommitmentTwoColumn = postgres.StringColumn("commitment_two") + LeftOfTwoColumn = postgres.StringColumn("left_of_two") + BlockNumberColumn = postgres.FloatColumn("block_number") + TxHashColumn = postgres.StringColumn("tx_hash") + WinnerColumn = postgres.StringColumn("winner") + DeletionReasonColumn = postgres.StringColumn("deletion_reason") + DeletionBlockNumberColumn = postgres.FloatColumn("deletion_block_number") + DeletionTxHashColumn = postgres.StringColumn("deletion_tx_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, TournamentAddressColumn, IDHashColumn, CommitmentOneColumn, CommitmentTwoColumn, LeftOfTwoColumn, BlockNumberColumn, TxHashColumn, WinnerColumn, DeletionReasonColumn, DeletionBlockNumberColumn, DeletionTxHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{CommitmentOneColumn, CommitmentTwoColumn, LeftOfTwoColumn, BlockNumberColumn, TxHashColumn, WinnerColumn, DeletionReasonColumn, DeletionBlockNumberColumn, DeletionTxHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return matchesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + TournamentAddress: TournamentAddressColumn, + IDHash: IDHashColumn, + CommitmentOne: CommitmentOneColumn, + CommitmentTwo: CommitmentTwoColumn, + LeftOfTwo: LeftOfTwoColumn, + BlockNumber: BlockNumberColumn, + TxHash: TxHashColumn, + Winner: WinnerColumn, + DeletionReason: DeletionReasonColumn, + DeletionBlockNumber: DeletionBlockNumberColumn, + DeletionTxHash: DeletionTxHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go b/internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go new file mode 100644 index 000000000..3ad32cd01 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go @@ -0,0 +1,96 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var StateHashes = newStateHashesTable("public", "state_hashes", "") + +type stateHashesTable struct { + postgres.Table + + // Columns + InputEpochApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + InputIndex postgres.ColumnFloat + Index postgres.ColumnFloat + MachineHash postgres.ColumnString + Repetitions postgres.ColumnInteger + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type StateHashesTable struct { + stateHashesTable + + EXCLUDED stateHashesTable +} + +// AS creates new StateHashesTable with assigned alias +func (a StateHashesTable) AS(alias string) *StateHashesTable { + return newStateHashesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new StateHashesTable with assigned schema name +func (a StateHashesTable) FromSchema(schemaName string) *StateHashesTable { + return newStateHashesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new StateHashesTable with assigned table prefix +func (a StateHashesTable) WithPrefix(prefix string) *StateHashesTable { + return newStateHashesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new StateHashesTable with assigned table suffix +func (a StateHashesTable) WithSuffix(suffix string) *StateHashesTable { + return newStateHashesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newStateHashesTable(schemaName, tableName, alias string) *StateHashesTable { + return &StateHashesTable{ + stateHashesTable: newStateHashesTableImpl(schemaName, tableName, alias), + EXCLUDED: newStateHashesTableImpl("", "excluded", ""), + } +} + +func newStateHashesTableImpl(schemaName, tableName, alias string) stateHashesTable { + var ( + InputEpochApplicationIDColumn = postgres.IntegerColumn("input_epoch_application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + InputIndexColumn = postgres.FloatColumn("input_index") + IndexColumn = postgres.FloatColumn("index") + MachineHashColumn = postgres.StringColumn("machine_hash") + RepetitionsColumn = postgres.IntegerColumn("repetitions") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{InputEpochApplicationIDColumn, EpochIndexColumn, InputIndexColumn, IndexColumn, MachineHashColumn, RepetitionsColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{InputIndexColumn, MachineHashColumn, RepetitionsColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return stateHashesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + InputEpochApplicationID: InputEpochApplicationIDColumn, + EpochIndex: EpochIndexColumn, + InputIndex: InputIndexColumn, + Index: IndexColumn, + MachineHash: MachineHashColumn, + Repetitions: RepetitionsColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go b/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go index 528eff35b..9865eb4cd 100644 --- a/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go +++ b/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go @@ -11,11 +11,16 @@ package table // this method only once at the beginning of the program. func UseSchema(schema string) { Application = Application.FromSchema(schema) + Commitments = Commitments.FromSchema(schema) Epoch = Epoch.FromSchema(schema) ExecutionParameters = ExecutionParameters.FromSchema(schema) Input = Input.FromSchema(schema) + MatchAdvances = MatchAdvances.FromSchema(schema) + Matches = Matches.FromSchema(schema) NodeConfig = NodeConfig.FromSchema(schema) Output = Output.FromSchema(schema) Report = Report.FromSchema(schema) SchemaMigrations = SchemaMigrations.FromSchema(schema) + StateHashes = StateHashes.FromSchema(schema) + Tournaments = Tournaments.FromSchema(schema) } diff --git a/internal/repository/postgres/db/rollupsdb/public/table/tournaments.go b/internal/repository/postgres/db/rollupsdb/public/table/tournaments.go new file mode 100644 index 000000000..11f815079 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/tournaments.go @@ -0,0 +1,114 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Tournaments = newTournamentsTable("public", "tournaments", "") + +type tournamentsTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + Address postgres.ColumnString + ParentTournamentAddress postgres.ColumnString + ParentMatchIDHash postgres.ColumnString + MaxLevel postgres.ColumnInteger + Level postgres.ColumnInteger + Log2step postgres.ColumnInteger + Height postgres.ColumnInteger + WinnerCommitment postgres.ColumnString + FinalStateHash postgres.ColumnString + FinishedAtBlock postgres.ColumnFloat + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type TournamentsTable struct { + tournamentsTable + + EXCLUDED tournamentsTable +} + +// AS creates new TournamentsTable with assigned alias +func (a TournamentsTable) AS(alias string) *TournamentsTable { + return newTournamentsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new TournamentsTable with assigned schema name +func (a TournamentsTable) FromSchema(schemaName string) *TournamentsTable { + return newTournamentsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new TournamentsTable with assigned table prefix +func (a TournamentsTable) WithPrefix(prefix string) *TournamentsTable { + return newTournamentsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new TournamentsTable with assigned table suffix +func (a TournamentsTable) WithSuffix(suffix string) *TournamentsTable { + return newTournamentsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newTournamentsTable(schemaName, tableName, alias string) *TournamentsTable { + return &TournamentsTable{ + tournamentsTable: newTournamentsTableImpl(schemaName, tableName, alias), + EXCLUDED: newTournamentsTableImpl("", "excluded", ""), + } +} + +func newTournamentsTableImpl(schemaName, tableName, alias string) tournamentsTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + AddressColumn = postgres.StringColumn("address") + ParentTournamentAddressColumn = postgres.StringColumn("parent_tournament_address") + ParentMatchIDHashColumn = postgres.StringColumn("parent_match_id_hash") + MaxLevelColumn = postgres.IntegerColumn("max_level") + LevelColumn = postgres.IntegerColumn("level") + Log2stepColumn = postgres.IntegerColumn("log2step") + HeightColumn = postgres.IntegerColumn("height") + WinnerCommitmentColumn = postgres.StringColumn("winner_commitment") + FinalStateHashColumn = postgres.StringColumn("final_state_hash") + FinishedAtBlockColumn = postgres.FloatColumn("finished_at_block") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, AddressColumn, ParentTournamentAddressColumn, ParentMatchIDHashColumn, MaxLevelColumn, LevelColumn, Log2stepColumn, HeightColumn, WinnerCommitmentColumn, FinalStateHashColumn, FinishedAtBlockColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{ParentTournamentAddressColumn, ParentMatchIDHashColumn, MaxLevelColumn, LevelColumn, Log2stepColumn, HeightColumn, WinnerCommitmentColumn, FinalStateHashColumn, FinishedAtBlockColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return tournamentsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + Address: AddressColumn, + ParentTournamentAddress: ParentTournamentAddressColumn, + ParentMatchIDHash: ParentMatchIDHashColumn, + MaxLevel: MaxLevelColumn, + Level: LevelColumn, + Log2step: Log2stepColumn, + Height: HeightColumn, + WinnerCommitment: WinnerCommitmentColumn, + FinalStateHash: FinalStateHashColumn, + FinishedAtBlock: FinishedAtBlockColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/epoch.go b/internal/repository/postgres/epoch.go index 00724c434..361eb61cd 100644 --- a/internal/repository/postgres/epoch.go +++ b/internal/repository/postgres/epoch.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/enum" "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" "github.com/go-jet/jet/v2/postgres" "github.com/jackc/pgx/v5" @@ -79,6 +80,9 @@ func (r *PostgresRepository) CreateEpochsAndInputs( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, + table.Epoch.InputIndexLowerBound, + table.Epoch.InputIndexUpperBound, + table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, ) @@ -108,11 +112,18 @@ func (r *PostgresRepository) CreateEpochsAndInputs( return err } + tournamentAddress := postgres.RawString("NULL") + if epoch.TournamentAddress != nil { + tournamentAddress = postgres.Bytea(epoch.TournamentAddress.Bytes()) + } epochSelectQuery := table.Application.SELECT( table.Application.ID, postgres.RawFloat(fmt.Sprintf("%d", epoch.Index)), postgres.RawFloat(fmt.Sprintf("%d", epoch.FirstBlock)), postgres.RawFloat(fmt.Sprintf("%d", epoch.LastBlock)), + postgres.RawFloat(fmt.Sprintf("%d", epoch.InputIndexLowerBound)), + postgres.RawFloat(fmt.Sprintf("%d", epoch.InputIndexUpperBound)), + tournamentAddress, postgres.NewEnumValue(epoch.Status.String()), postgres.RawFloat(fmt.Sprintf("%d", nextVirtualIndex)), ).WHERE( @@ -123,6 +134,9 @@ func (r *PostgresRepository) CreateEpochsAndInputs( ON_CONFLICT(table.Epoch.ApplicationID, table.Epoch.Index). DO_UPDATE(postgres.SET( table.Epoch.Status.SET(postgres.NewEnumValue(epoch.Status.String())), + table.Epoch.LastBlock.SET(postgres.RawFloat(fmt.Sprintf("%d", epoch.LastBlock))), + table.Epoch.InputIndexUpperBound.SET(postgres.RawFloat(fmt.Sprintf("%d", epoch.InputIndexUpperBound))), + table.Epoch.TournamentAddress.SET(tournamentAddress), )).Sql() // FIXME on conflict _, err = tx.Exec(ctx, sqlStr, args...) @@ -193,8 +207,15 @@ func (r *PostgresRepository) GetEpoch( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, - table.Epoch.ClaimHash, + table.Epoch.InputIndexLowerBound, + table.Epoch.InputIndexUpperBound, + table.Epoch.MachineHash, + table.Epoch.OutputsMerkleRoot, + table.Epoch.OutputsMerkleProof, + table.Epoch.Commitment, + table.Epoch.CommitmentProof, table.Epoch.ClaimTransactionHash, + table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, table.Epoch.CreatedAt, @@ -220,8 +241,15 @@ func (r *PostgresRepository) GetEpoch( &ep.Index, &ep.FirstBlock, &ep.LastBlock, - &ep.ClaimHash, + &ep.InputIndexLowerBound, + &ep.InputIndexUpperBound, + &ep.MachineHash, + &ep.OutputsMerkleRoot, + &ep.OutputsMerkleProof, + &ep.Commitment, + &ep.CommitmentProof, &ep.ClaimTransactionHash, + &ep.TournamentAddress, &ep.Status, &ep.VirtualIndex, &ep.CreatedAt, @@ -258,7 +286,7 @@ func (r *PostgresRepository) GetLastAcceptedEpochIndex( ). WHERE( whereClause. - AND(table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_ClaimAccepted.String()))), + AND(table.Epoch.Status.EQ(enum.EpochStatus.ClaimAccepted)), ). ORDER_BY(table.Epoch.Index.DESC()). LIMIT(1) @@ -279,6 +307,81 @@ func (r *PostgresRepository) GetLastAcceptedEpochIndex( return index, nil } +func (r *PostgresRepository) GetLastNonOpenEpoch( + ctx context.Context, + nameOrAddress string, +) (*model.Epoch, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + stmt := table.Epoch. + SELECT( + table.Epoch.ApplicationID, + table.Epoch.Index, + table.Epoch.FirstBlock, + table.Epoch.LastBlock, + table.Epoch.InputIndexLowerBound, + table.Epoch.InputIndexUpperBound, + table.Epoch.MachineHash, + table.Epoch.OutputsMerkleRoot, + table.Epoch.OutputsMerkleProof, + table.Epoch.Commitment, + table.Epoch.CommitmentProof, + table.Epoch.ClaimTransactionHash, + table.Epoch.TournamentAddress, + table.Epoch.Status, + table.Epoch.VirtualIndex, + table.Epoch.CreatedAt, + table.Epoch.UpdatedAt, + ). + FROM( + table.Epoch. + INNER_JOIN(table.Application, + table.Epoch.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Epoch.Status.NOT_EQ(enum.EpochStatus.Open)), + ). + ORDER_BY(table.Epoch.Index.DESC()). + LIMIT(1) + + sqlStr, args := stmt.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var ep model.Epoch + err = row.Scan( + &ep.ApplicationID, + &ep.Index, + &ep.FirstBlock, + &ep.LastBlock, + &ep.InputIndexLowerBound, + &ep.InputIndexUpperBound, + &ep.MachineHash, + &ep.OutputsMerkleRoot, + &ep.OutputsMerkleProof, + &ep.Commitment, + &ep.CommitmentProof, + &ep.ClaimTransactionHash, + &ep.TournamentAddress, + &ep.Status, + &ep.VirtualIndex, + &ep.CreatedAt, + &ep.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &ep, nil +} + func (r *PostgresRepository) GetEpochByVirtualIndex( ctx context.Context, nameOrAddress string, @@ -296,8 +399,15 @@ func (r *PostgresRepository) GetEpochByVirtualIndex( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, - table.Epoch.ClaimHash, + table.Epoch.InputIndexLowerBound, + table.Epoch.InputIndexUpperBound, + table.Epoch.MachineHash, + table.Epoch.OutputsMerkleRoot, + table.Epoch.OutputsMerkleProof, + table.Epoch.Commitment, + table.Epoch.CommitmentProof, table.Epoch.ClaimTransactionHash, + table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, table.Epoch.CreatedAt, @@ -323,8 +433,15 @@ func (r *PostgresRepository) GetEpochByVirtualIndex( &ep.Index, &ep.FirstBlock, &ep.LastBlock, - &ep.ClaimHash, + &ep.InputIndexLowerBound, + &ep.InputIndexUpperBound, + &ep.MachineHash, + &ep.OutputsMerkleRoot, + &ep.OutputsMerkleProof, + &ep.Commitment, + &ep.CommitmentProof, &ep.ClaimTransactionHash, + &ep.TournamentAddress, &ep.Status, &ep.VirtualIndex, &ep.CreatedAt, @@ -339,7 +456,7 @@ func (r *PostgresRepository) GetEpochByVirtualIndex( return &ep, nil } -func (r *PostgresRepository) UpdateEpoch( +func (r *PostgresRepository) UpdateEpochClaimTransactionHash( ctx context.Context, nameOrAddress string, e *model.Epoch, @@ -352,14 +469,10 @@ func (r *PostgresRepository) UpdateEpoch( updStmt := table.Epoch. UPDATE( - table.Epoch.ClaimHash, table.Epoch.ClaimTransactionHash, - table.Epoch.Status, ). SET( - e.ClaimHash, e.ClaimTransactionHash, - e.Status, ). FROM( table.Application, @@ -381,43 +494,141 @@ func (r *PostgresRepository) UpdateEpoch( return nil } -func (r *PostgresRepository) UpdateEpochsInputsProcessed( +func (r *PostgresRepository) UpdateEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64, proof *model.OutputsProof) error { + tx, err := r.db.Begin(ctx) + if err != nil { + return err + } + + err = updateEpochOutputsMerkleProof(ctx, tx, appID, epochIndex, + proof.OutputsHash, byteSliceToHashSlice(proof.OutputsHashProof), proof.MachineHash) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func (r *PostgresRepository) UpdateEpochStatus( ctx context.Context, nameOrAddress string, -) (int64, error) { + e *model.Epoch, +) error { whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) if err != nil { - return 0, err + return err } - subSelect := table.Input.SELECT(postgres.Raw("1")). + updStmt := table.Epoch. + UPDATE( + table.Epoch.Status, + ). + SET( + e.Status, + ). + FROM( + table.Application, + ). WHERE( - table.Input.EpochApplicationID.EQ(table.Epoch.ApplicationID). - AND(table.Input.EpochIndex.EQ(table.Epoch.Index)). - AND(table.Input.Status.EQ(postgres.NewEnumValue(model.InputCompletionStatus_None.String()))), + whereClause. + AND(table.Epoch.ApplicationID.EQ(table.Application.ID)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", e.Index)))), ) - notExistsClause := postgres.NOT( - postgres.EXISTS(subSelect), + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *PostgresRepository) UpdateEpochInputsProcessed( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + // Subquery to check if the previous epoch is not open or closed + prevTable := table.Epoch.AS("prev") + prevSub := prevTable.SELECT( + prevTable.Status.NOT_IN(enum.EpochStatus.Open, enum.EpochStatus.Closed), + ).WHERE(postgres.AND( + prevTable.ApplicationID.EQ(table.Epoch.ApplicationID), + prevTable.Index.EQ(table.Epoch.Index.SUB(postgres.Int64(1))), + )) + + // Condition using COALESCE for the previous epoch (returns TRUE if no previous epoch exists) + prevCondition := postgres.BoolExp(postgres.COALESCE(prevSub, postgres.Bool(true))) + + // Condition for inputs: either no inputs expected or all inputs are present and processed + hasNoInputs := table.Epoch.InputIndexUpperBound.EQ(table.Epoch.InputIndexLowerBound) + + // Subquery to count total inputs for the epoch + totalInputsSub := postgres.FloatExp(table.Input.SELECT(postgres.COUNT(postgres.STAR)). + WHERE(postgres.AND( + table.Input.EpochApplicationID.EQ(table.Epoch.ApplicationID), + table.Input.EpochIndex.EQ(table.Epoch.Index), + ))) + + // Subquery to count pending inputs (status = 'None') + pendingInputsSub := postgres.IntExp(table.Input.SELECT(postgres.COUNT(postgres.STAR)). + WHERE(postgres.AND( + table.Input.EpochApplicationID.EQ(table.Epoch.ApplicationID), + table.Input.EpochIndex.EQ(table.Epoch.Index), + table.Input.Status.EQ(enum.InputCompletionStatus.None), + ))) + + allInputsPresentAndProcessed := postgres.AND( + totalInputsSub.EQ(table.Epoch.InputIndexUpperBound.SUB(table.Epoch.InputIndexLowerBound)), + pendingInputsSub.EQ(postgres.Int64(0)), ) + inputsCondition := hasNoInputs.OR(allInputsPresentAndProcessed) + + // Update statement to set epoch status to InputsProcessed updateStmt := table.Epoch.UPDATE(table.Epoch.Status). - SET(postgres.NewEnumValue(model.EpochStatus_InputsProcessed.String())). + SET(enum.EpochStatus.InputsProcessed). FROM(table.Application). - WHERE( - table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_Closed.String())). - AND(table.Epoch.ApplicationID.EQ(table.Application.ID)). - AND(whereClause). - AND(notExistsClause), - ) + WHERE(postgres.AND( + table.Epoch.Status.EQ(postgres.NewEnumValue(model.EpochStatus_Closed.String())), + table.Epoch.ApplicationID.EQ(table.Application.ID), + table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex))), + whereClause, + prevCondition, + inputsCondition, + )). + RETURNING(table.Epoch.Index) + // Execute the update and capture the returned indexes sqlStr, args := updateStmt.Sql() - cmd, err := r.db.Exec(ctx, sqlStr, args...) + + var index uint64 + err = r.db.QueryRow(ctx, sqlStr, args...).Scan(&index) if err != nil { - return 0, err + if err == pgx.ErrNoRows { + return nil + } + return err } - return cmd.RowsAffected(), nil + if index != epochIndex { + // should not happen + return fmt.Errorf("updated epoch index mismatch: expected %d, got %d", epochIndex, index) + } + return nil } func (r *PostgresRepository) ListEpochs( @@ -439,8 +650,13 @@ func (r *PostgresRepository) ListEpochs( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, - table.Epoch.ClaimHash, + table.Epoch.InputIndexLowerBound, + table.Epoch.InputIndexUpperBound, + table.Epoch.MachineHash, + table.Epoch.OutputsMerkleRoot, + table.Epoch.Commitment, table.Epoch.ClaimTransactionHash, + table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, table.Epoch.CreatedAt, @@ -455,8 +671,12 @@ func (r *PostgresRepository) ListEpochs( ) conditions := []postgres.BoolExpression{whereClause} - if f.Status != nil { - conditions = append(conditions, table.Epoch.Status.EQ(postgres.NewEnumValue(f.Status.String()))) + if len(f.Status) > 0 { + statuses := make([]postgres.Expression, 0, len(f.Status)) + for _, status := range f.Status { + statuses = append(statuses, postgres.NewEnumValue(status.String())) + } + conditions = append(conditions, table.Epoch.Status.IN(statuses...)) } if f.BeforeBlock != nil { @@ -495,8 +715,13 @@ func (r *PostgresRepository) ListEpochs( &ep.Index, &ep.FirstBlock, &ep.LastBlock, - &ep.ClaimHash, + &ep.InputIndexLowerBound, + &ep.InputIndexUpperBound, + &ep.MachineHash, + &ep.OutputsMerkleRoot, + &ep.Commitment, &ep.ClaimTransactionHash, + &ep.TournamentAddress, &ep.Status, &ep.VirtualIndex, &ep.CreatedAt, @@ -510,3 +735,45 @@ func (r *PostgresRepository) ListEpochs( } return epochs, total, nil } + +func (r *PostgresRepository) RepeatPreviousEpochOutputsProof( + ctx context.Context, + appID int64, + epochIndex uint64, +) error { + if epochIndex == 0 { + return fmt.Errorf("cannot repeat outputs proof for epoch 0") + } + + e1 := table.Epoch.AS("e1") + e2 := table.Epoch.AS("e2") + updStmt := e1. + UPDATE( + e1.OutputsMerkleRoot, + e1.OutputsMerkleProof, + e1.MachineHash, + ). + SET( + e2.OutputsMerkleRoot, + e2.OutputsMerkleProof, + e2.MachineHash, + ). + FROM(e2). + WHERE(postgres.AND( + e1.ApplicationID.EQ(postgres.Int64(appID)), + e1.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex))), + e2.ApplicationID.EQ(postgres.Int64(appID)), + e2.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex-1))), + )) + + sqlStr, args := updStmt.Sql() + + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} diff --git a/internal/repository/postgres/input.go b/internal/repository/postgres/input.go index 6946625dd..4048e3dc5 100644 --- a/internal/repository/postgres/input.go +++ b/internal/repository/postgres/input.go @@ -382,3 +382,58 @@ func (r *PostgresRepository) ListInputs( } return inputs, total, nil } + +func (r *PostgresRepository) GetNumberOfInputs( + ctx context.Context, + nameOrAddress string, +) (uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return 0, err + } + + sel := table.Input. + SELECT(postgres.COUNT(postgres.STAR)). + FROM( + table.Input. + INNER_JOIN(table.Application, + table.Input.EpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE(whereClause) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var count uint64 + err = row.Scan(&count) + if err != nil { + return 0, err + } + return count, nil +} + +func (r *PostgresRepository) UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error { + updStmt := table.Input. + UPDATE( + table.Input.SnapshotURI, + ). + SET( + snapshotURI, + ). + WHERE( + table.Input.EpochApplicationID.EQ(postgres.Int64(appId)). + AND(table.Input.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", inputIndex)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return fmt.Errorf("no input found with appId %d and index %d", appId, inputIndex) + } + return nil +} diff --git a/internal/repository/postgres/match.go b/internal/repository/postgres/match.go new file mode 100644 index 000000000..371f94dd9 --- /dev/null +++ b/internal/repository/postgres/match.go @@ -0,0 +1,298 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ MatchRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateMatch( + ctx context.Context, + nameOrAddress string, + m *model.Match, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)), + postgres.Bytea(m.TournamentAddress.Bytes()), + postgres.Bytea(m.IDHash.Bytes()), + postgres.Bytea(m.CommitmentOne.Bytes()), + postgres.Bytea(m.CommitmentTwo.Bytes()), + postgres.Bytea(m.LeftOfTwo.Bytes()), + postgres.RawFloat(fmt.Sprintf("%d", m.BlockNumber)), + postgres.Bytea(m.TxHash.Bytes()), + postgres.NewEnumValue(m.Winner.String()), + postgres.NewEnumValue(m.DeletionReason.String()), + postgres.RawFloat(fmt.Sprintf("%d", m.DeletionBlockNumber)), + postgres.Bytea(m.DeletionTxHash.Bytes()), + ).WHERE( + whereClause, + ) + + insertStmt := table.Matches.INSERT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) UpdateMatch( + ctx context.Context, + nameOrAddress string, + m *model.Match, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + updateStmt := table.Matches. + UPDATE( + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ). + SET( + m.Winner, + m.DeletionReason, + m.DeletionBlockNumber, + postgres.Bytea(m.DeletionTxHash.Bytes()), + ). + FROM( + table.Application, + ). + WHERE( + whereClause. + AND(table.Matches.ApplicationID.EQ(postgres.Int(m.ApplicationID))). + AND(table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)))). + AND(table.Matches.TournamentAddress.EQ(postgres.Bytea(m.TournamentAddress.Bytes()))). + AND(table.Matches.IDHash.EQ(postgres.Bytea(m.IDHash.Bytes()))), + ) + + sqlStr, args := updateStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *PostgresRepository) GetMatch( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + idHashHex string, +) (*model.Match, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddr := common.HexToAddress(tournamentAddress) + idHash := common.HexToHash(idHashHex) + + sel := table.Matches. + SELECT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + table.Matches.CreatedAt, + table.Matches.UpdatedAt, + ). + FROM( + table.Matches. + INNER_JOIN(table.Application, + table.Matches.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))). + AND(table.Matches.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))). + AND(table.Matches.IDHash.EQ(postgres.Bytea(idHash.Bytes()))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var m model.Match + err = row.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.CommitmentOne, + &m.CommitmentTwo, + &m.LeftOfTwo, + &m.BlockNumber, + &m.TxHash, + &m.Winner, + &m.DeletionReason, + &m.DeletionBlockNumber, + &m.DeletionTxHash, + &m.CreatedAt, + &m.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &m, nil +} + +func (r *PostgresRepository) ListMatches( + ctx context.Context, + nameOrAddress string, + f repository.MatchFilter, + p repository.Pagination, + descending bool, +) ([]*model.Match, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.Matches. + SELECT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + table.Matches.CreatedAt, + table.Matches.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.Matches. + INNER_JOIN(table.Application, + table.Matches.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + if f.TournamentAddress != nil { + tournamentAddr := common.HexToAddress(*f.TournamentAddress) + conditions = append(conditions, table.Matches.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.Matches.EpochIndex.DESC()) + } else { + sel = sel.ORDER_BY(table.Matches.EpochIndex.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var matches []*model.Match + var total uint64 + for rows.Next() { + var m model.Match + err := rows.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.CommitmentOne, + &m.CommitmentTwo, + &m.LeftOfTwo, + &m.BlockNumber, + &m.TxHash, + &m.Winner, + &m.DeletionReason, + &m.DeletionBlockNumber, + &m.DeletionTxHash, + &m.CreatedAt, + &m.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + matches = append(matches, &m) + } + + return matches, total, nil +} diff --git a/internal/repository/postgres/match_advanced.go b/internal/repository/postgres/match_advanced.go new file mode 100644 index 000000000..97db8a5ce --- /dev/null +++ b/internal/repository/postgres/match_advanced.go @@ -0,0 +1,231 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "encoding/hex" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ MatchAdvancedRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateMatchAdvanced( + ctx context.Context, + nameOrAddress string, + m *model.MatchAdvanced, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)), + postgres.Bytea(m.TournamentAddress.Bytes()), + postgres.Bytea(m.IDHash.Bytes()), + postgres.Bytea(m.OtherParent.Bytes()), + postgres.Bytea(m.LeftNode.Bytes()), + postgres.RawFloat(fmt.Sprintf("%d", m.BlockNumber)), + postgres.Bytea(m.TxHash.Bytes()), + ).WHERE( + whereClause, + ) + + insertStmt := table.MatchAdvances.INSERT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) GetMatchAdvanced( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + idHashHex string, + parentHex string, +) (*model.MatchAdvanced, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddr := common.HexToAddress(tournamentAddress) + idHash := common.HexToHash(idHashHex) + parent, err := hex.DecodeString(parentHex) + if err != nil { + return nil, fmt.Errorf("invalid parent hex: %w", err) + } + + sel := table.MatchAdvances. + SELECT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + table.MatchAdvances.CreatedAt, + table.MatchAdvances.UpdatedAt, + ). + FROM( + table.MatchAdvances. + INNER_JOIN(table.Application, + table.MatchAdvances.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.MatchAdvances.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))). + AND(table.MatchAdvances.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))). + AND(table.MatchAdvances.IDHash.EQ(postgres.Bytea(idHash.Bytes()))). + AND(table.MatchAdvances.OtherParent.EQ(postgres.Bytea(parent))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var m model.MatchAdvanced + err = row.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.OtherParent, + &m.LeftNode, + &m.BlockNumber, + &m.TxHash, + &m.CreatedAt, + &m.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &m, nil +} + +func (r *PostgresRepository) ListMatchAdvances( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + idHashHex string, + p repository.Pagination, + descending bool, +) ([]*model.MatchAdvanced, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.MatchAdvances. + SELECT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + table.MatchAdvances.CreatedAt, + table.MatchAdvances.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.MatchAdvances. + INNER_JOIN(table.Application, + table.MatchAdvances.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + conditions = append(conditions, table.MatchAdvances.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))) + + tAddr := common.HexToAddress(tournamentAddress) + conditions = append(conditions, table.MatchAdvances.TournamentAddress.EQ(postgres.Bytea(tAddr.Bytes()))) + + idHash := common.HexToHash(idHashHex) + conditions = append(conditions, table.MatchAdvances.IDHash.EQ(postgres.Bytea(idHash.Bytes()))) + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.MatchAdvances.EpochIndex.DESC()) + } else { + sel = sel.ORDER_BY(table.MatchAdvances.EpochIndex.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var matchAdvances []*model.MatchAdvanced + var total uint64 + for rows.Next() { + var m model.MatchAdvanced + err := rows.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.OtherParent, + &m.LeftNode, + &m.BlockNumber, + &m.TxHash, + &m.CreatedAt, + &m.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + matchAdvances = append(matchAdvances, &m) + } + + return matchAdvances, total, nil +} diff --git a/internal/repository/postgres/output.go b/internal/repository/postgres/output.go index fcaf3b460..06dcea5dd 100644 --- a/internal/repository/postgres/output.go +++ b/internal/repository/postgres/output.go @@ -342,3 +342,34 @@ func (r *PostgresRepository) GetLastOutputBeforeBlock( } return &out, nil } + +func (r *PostgresRepository) GetNumberOfExecutedOutputs( + ctx context.Context, + nameOrAddress string, +) (uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return 0, err + } + + sel := table.Output. + SELECT(postgres.COUNT(postgres.STAR)). + FROM( + table.Output. + INNER_JOIN(table.Application, + table.Output.InputEpochApplicationID.EQ(table.Application.ID), + ), + ). + WHERE(whereClause.AND(table.Output.ExecutionTransactionHash.IS_NOT_NULL())) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var count uint64 + err = row.Scan(&count) + if err != nil { + return 0, err + } + return count, nil +} diff --git a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql index 6654cdcff..c13aaafcd 100644 --- a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql +++ b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql @@ -3,6 +3,31 @@ BEGIN; +DROP TRIGGER IF EXISTS "state_hashes_set_updated_at" ON "match_advances"; +DROP TABLE IF EXISTS "state_hashes"; + +DROP TRIGGER IF EXISTS "match_advances_set_updated_at" ON "match_advances"; +DROP INDEX IF EXISTS "match_advances_block_number_idx"; +DROP TABLE IF EXISTS "match_advances"; + +ALTER TABLE "tournaments" DROP CONSTRAINT "tournaments_parent_match_fkey"; + +DROP TRIGGER IF EXISTS "matches_set_updated_at" ON "matches"; +DROP INDEX IF EXISTS "matches_unique_pair_idx"; +DROP INDEX IF EXISTS "matches_app_epoch_tournament_idx"; +DROP TABLE IF EXISTS "matches"; + +DROP TRIGGER IF EXISTS "commitments_set_updated_at" ON "commitments"; +DROP INDEX IF EXISTS "commitments_final_state_idx"; +DROP INDEX IF EXISTS "commitments_app_epoch_tournament_idx"; +DROP TABLE IF EXISTS "commitments"; + +DROP TRIGGER IF EXISTS "tournaments_set_updated_at" ON "tournaments"; +DROP INDEX IF EXISTS "tournaments_parent_match_nonroot_idx"; +DROP INDEX IF EXISTS "unique_root_per_epoch_idx"; +DROP INDEX IF EXISTS "tournaments_epoch_idx"; +DROP TABLE IF EXISTS "tournaments"; + DROP TRIGGER IF EXISTS "node_config_set_updated_at" ON "node_config"; DROP TABLE IF EXISTS "node_config"; @@ -35,6 +60,9 @@ DROP TABLE IF EXISTS "application"; DROP FUNCTION IF EXISTS "update_updated_at_column"; DROP FUNCTION IF EXISTS "check_hash_siblings"; +DROP TYPE IF EXISTS "WinnerCommitment"; +DROP TYPE IF EXISTS "MatchDeletionReason"; +DROP TYPE IF EXISTS "Consensus"; DROP TYPE IF EXISTS "SnapshotPolicy"; DROP TYPE IF EXISTS "EpochStatus"; DROP TYPE IF EXISTS "DefaultBlock"; diff --git a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql index c191fad1e..293ca3995 100644 --- a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql +++ b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql @@ -34,6 +34,12 @@ CREATE TYPE "EpochStatus" AS ENUM ( CREATE TYPE "SnapshotPolicy" AS ENUM ('NONE', 'EVERY_INPUT', 'EVERY_EPOCH'); +CREATE TYPE "Consensus" AS ENUM ('AUTHORITY', 'QUORUM', 'PRT'); + +CREATE TYPE "MatchDeletionReason" AS ENUM ('STEP', 'TIMEOUT', 'CHILD_TOURNAMENT', 'NOT_DELETED'); + +CREATE TYPE "WinnerCommitment" AS ENUM ('NONE', 'ONE', 'TWO'); + CREATE FUNCTION "update_updated_at_column"() RETURNS TRIGGER AS $$ BEGIN @@ -74,10 +80,13 @@ CREATE TABLE "application" "template_uri" VARCHAR(4096) NOT NULL, "epoch_length" uint64 NOT NULL, "data_availability" data_availability NOT NULL, + "consensus_type" "Consensus" NOT NULL, "state" "ApplicationState" NOT NULL, "reason" VARCHAR(4096), + "last_epoch_check_block" uint64 NOT NULL, "last_input_check_block" uint64 NOT NULL, "last_output_check_block" uint64 NOT NULL, + "last_tournament_check_block" uint64 NOT NULL, "processed_inputs" uint64 NOT NULL, "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), @@ -119,7 +128,14 @@ CREATE TABLE "epoch" "index" uint64 NOT NULL, "first_block" uint64 NOT NULL, "last_block" uint64 NOT NULL, - "claim_hash" hash, + "input_index_lower_bound" uint64 NOT NULL, + "input_index_upper_bound" uint64 NOT NULL, + "machine_hash" hash, + "outputs_merkle_root" hash, + "outputs_merkle_proof" BYTEA[], + "commitment" hash, + "commitment_proof" BYTEA[], + "tournament_address" ethereum_address, "claim_transaction_hash" hash, "status" "EpochStatus" NOT NULL, "virtual_index" uint64 NOT NULL, @@ -151,6 +167,7 @@ CREATE TABLE "input" "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), CONSTRAINT "input_pkey" PRIMARY KEY ("epoch_application_id", "index"), + CONSTRAINT "input_epoch_index_unique" UNIQUE ("epoch_application_id", "epoch_index", "index"), CONSTRAINT "input_application_id_tx_reference_unique" UNIQUE ("epoch_application_id", "transaction_reference"), CONSTRAINT "input_epoch_id_fkey" FOREIGN KEY ("epoch_application_id", "epoch_index") REFERENCES "epoch"("application_id", "index") ON DELETE CASCADE ); @@ -181,7 +198,7 @@ CREATE TABLE "output" CREATE INDEX "output_raw_data_type_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 1 FOR 4)); -CREATE INDEX "output_raw_data_address_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 17 FOR 20) ) +CREATE INDEX "output_raw_data_address_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 17 FOR 20)) WHERE SUBSTRING("raw_data" FROM 1 FOR 4) IN ( E'\\x10321e8b', -- DelegateCallVoucher E'\\x237a816f' -- Voucher @@ -216,5 +233,176 @@ CREATE TABLE "node_config" CREATE TRIGGER "config_set_updated_at" BEFORE UPDATE ON "node_config" FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); +CREATE TABLE "tournaments" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "address" ethereum_address NOT NULL, + "parent_tournament_address" ethereum_address, + "parent_match_id_hash" hash, + "max_level" INT NOT NULL CHECK("max_level" >= 0), + "level" INT NOT NULL CHECK("level" >= 0), + "log2step" INT NOT NULL CHECK("log2step" >= 0), + "height" INT NOT NULL CHECK("height" >= 0), + "winner_commitment" hash, + "final_state_hash" hash, + "finished_at_block" uint64 DEFAULT 0, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "tournaments_pkey" PRIMARY KEY ("application_id","epoch_index","address"), + CONSTRAINT "tournaments_epoch_fkey" FOREIGN KEY ("application_id","epoch_index") + REFERENCES "epoch"("application_id","index") + ON DELETE CASCADE, + CONSTRAINT "chk_tournament_root_parent" + CHECK ( + ("level" = 0 AND "parent_tournament_address" IS NULL AND "parent_match_id_hash" IS NULL) + OR + ("level" > 0 AND "parent_tournament_address" IS NOT NULL AND "parent_match_id_hash" IS NOT NULL) + ) +); + +CREATE INDEX "tournaments_epoch_idx" + ON "tournaments"("application_id","epoch_index"); + +CREATE UNIQUE INDEX "unique_root_per_epoch_idx" + ON "tournaments"("application_id","epoch_index") + WHERE "level" = 0; + +CREATE INDEX "tournaments_parent_match_nonroot_idx" + ON "tournaments"("application_id","epoch_index","parent_tournament_address","parent_match_id_hash") + WHERE "level" > 0; + +CREATE TRIGGER "tournaments_set_updated_at" +BEFORE UPDATE ON "tournaments" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "commitments" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "tournament_address" ethereum_address NOT NULL, + "commitment" hash NOT NULL, + "final_state_hash" hash NOT NULL, + "submitter_address" ethereum_address NOT NULL, + "block_number" uint64 NOT NULL, + "tx_hash" hash NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "commitments_pkey" + PRIMARY KEY ("application_id","epoch_index","tournament_address","commitment"), + CONSTRAINT "commitments_tournament_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address") + REFERENCES "tournaments"("application_id","epoch_index","address") + ON DELETE CASCADE +); + +CREATE INDEX "commitments_app_epoch_tournament_idx" + ON "commitments"("application_id","epoch_index","tournament_address"); + +CREATE INDEX "commitments_final_state_idx" + ON "commitments"("final_state_hash"); + +CREATE TRIGGER "commitments_set_updated_at" +BEFORE UPDATE ON "commitments" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "matches" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "tournament_address" ethereum_address NOT NULL, + "id_hash" hash NOT NULL, + "commitment_one" hash NOT NULL, + "commitment_two" hash NOT NULL, + "left_of_two" hash NOT NULL, + "block_number" uint64 NOT NULL, + "tx_hash" hash NOT NULL, + "winner" "WinnerCommitment" NOT NULL, + "deletion_reason" "MatchDeletionReason" NOT NULL, + "deletion_block_number" uint64 DEFAULT 0, + "deletion_tx_hash" hash, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "matches_pkey" + PRIMARY KEY ("application_id","epoch_index","tournament_address","id_hash"), + + CONSTRAINT "matches_tournament_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address") + REFERENCES "tournaments"("application_id","epoch_index","address") + ON DELETE CASCADE, + + CONSTRAINT "matches_one_commitment_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address","commitment_one") + REFERENCES "commitments"("application_id","epoch_index","tournament_address","commitment") + ON DELETE RESTRICT, + + CONSTRAINT "matches_two_commitment_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address","commitment_two") + REFERENCES "commitments"("application_id","epoch_index","tournament_address","commitment") + ON DELETE RESTRICT +); + +CREATE INDEX "matches_app_epoch_tournament_idx" + ON "matches"("application_id","epoch_index","tournament_address"); + +CREATE UNIQUE INDEX "matches_unique_pair_idx" + ON "matches"("application_id","epoch_index","tournament_address","commitment_one","commitment_two"); + +CREATE TRIGGER "matches_set_updated_at" +BEFORE UPDATE ON "matches" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +-- Add foreign key from tournaments to matches (parent match) +ALTER TABLE "tournaments" + ADD CONSTRAINT "tournaments_parent_match_fkey" + FOREIGN KEY ("application_id","epoch_index","parent_tournament_address","parent_match_id_hash") + REFERENCES "matches"("application_id","epoch_index","tournament_address","id_hash") + ON DELETE CASCADE; + +CREATE TABLE "match_advances" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "tournament_address" ethereum_address NOT NULL, + "id_hash" hash NOT NULL, -- keccak256(abi.encode(one,two)) + "other_parent" hash NOT NULL, + "left_node" hash NOT NULL, + "block_number" uint64 NOT NULL, + "tx_hash" hash NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "match_advances_pkey" + PRIMARY KEY ("application_id","epoch_index","tournament_address","id_hash","other_parent"), + + CONSTRAINT "match_advances_matches_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address","id_hash") + REFERENCES "matches"("application_id","epoch_index","tournament_address","id_hash") + ON DELETE CASCADE +); + +CREATE INDEX "match_advances_block_number_idx" + ON "match_advances"("application_id","epoch_index","tournament_address","id_hash","block_number"); + +CREATE TRIGGER "match_advances_set_updated_at" +BEFORE UPDATE ON "match_advances" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "state_hashes" +( + "input_epoch_application_id" int4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "input_index" uint64 NOT NULL, + "index" uint64 NOT NULL, + "machine_hash" hash NOT NULL, + "repetitions" INT8 NOT NULL CHECK ("repetitions" > 0), + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "state_hashes_pkey" PRIMARY KEY ("input_epoch_application_id", "epoch_index", "index"), + CONSTRAINT "state_hashes_input_id_fkey" FOREIGN KEY ("input_epoch_application_id", "epoch_index", "input_index") REFERENCES "input"("epoch_application_id", "epoch_index", "index") ON DELETE CASCADE +); + +CREATE TRIGGER "state_hashes_set_updated_at" BEFORE UPDATE ON "state_hashes" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + COMMIT; diff --git a/internal/repository/postgres/state_hash.go b/internal/repository/postgres/state_hash.go new file mode 100644 index 000000000..e41a9dbf2 --- /dev/null +++ b/internal/repository/postgres/state_hash.go @@ -0,0 +1,97 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "fmt" + + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +func (r *PostgresRepository) ListStateHashes( + ctx context.Context, + nameOrAddress string, + f repository.StateHashFilter, + p repository.Pagination, + descending bool, +) ([]*model.StateHash, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.StateHashes. + SELECT( + table.StateHashes.InputEpochApplicationID, + table.StateHashes.EpochIndex, + table.StateHashes.InputIndex, + table.StateHashes.Index, + table.StateHashes.MachineHash, + table.StateHashes.Repetitions, + table.StateHashes.CreatedAt, + table.StateHashes.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.StateHashes.INNER_JOIN( + table.Application, + table.StateHashes.InputEpochApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.StateHashes.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.StateHashes.Index.DESC()) + } else { + sel = sel.ORDER_BY(table.StateHashes.Index.ASC()) + } + + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var stateHashes []*model.StateHash + var total uint64 + for rows.Next() { + var sh model.StateHash + err := rows.Scan( + &sh.InputEpochApplicationID, + &sh.EpochIndex, + &sh.InputIndex, + &sh.Index, + &sh.MachineHash, + &sh.Repetitions, + &sh.CreatedAt, + &sh.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + stateHashes = append(stateHashes, &sh) + } + return stateHashes, total, nil +} diff --git a/internal/repository/postgres/test_only.go b/internal/repository/postgres/test_only.go index eb521ad0b..1cbbc128e 100644 --- a/internal/repository/postgres/test_only.go +++ b/internal/repository/postgres/test_only.go @@ -19,8 +19,15 @@ func (r *PostgresRepository) CreateEpoch( table.Epoch.Index, table.Epoch.FirstBlock, table.Epoch.LastBlock, - table.Epoch.ClaimHash, + table.Epoch.InputIndexLowerBound, + table.Epoch.InputIndexUpperBound, + table.Epoch.MachineHash, + table.Epoch.OutputsMerkleRoot, + table.Epoch.OutputsMerkleProof, + table.Epoch.Commitment, + table.Epoch.CommitmentProof, table.Epoch.ClaimTransactionHash, + table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, ).VALUES( @@ -28,8 +35,15 @@ func (r *PostgresRepository) CreateEpoch( e.Index, e.FirstBlock, e.LastBlock, - e.ClaimHash, + e.InputIndexLowerBound, + e.InputIndexUpperBound, + e.MachineHash, + e.OutputsMerkleRoot, + e.OutputsMerkleProof, + e.Commitment, + e.CommitmentProof, e.ClaimTransactionHash, + e.TournamentAddress, e.Status, e.VirtualIndex, ) @@ -119,4 +133,3 @@ func (r *PostgresRepository) CreateReport( _, err := r.db.Exec(ctx, sqlStr, args...) return err } - diff --git a/internal/repository/postgres/tournament.go b/internal/repository/postgres/tournament.go new file mode 100644 index 000000000..0d70b610d --- /dev/null +++ b/internal/repository/postgres/tournament.go @@ -0,0 +1,313 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ TournamentRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateTournament( + ctx context.Context, + nameOrAddress string, + t *model.Tournament, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + insertStmt := table.Tournaments. + INSERT( + table.Tournaments.ApplicationID, + table.Tournaments.EpochIndex, + table.Tournaments.Address, + table.Tournaments.ParentTournamentAddress, + table.Tournaments.ParentMatchIDHash, + table.Tournaments.MaxLevel, + table.Tournaments.Level, + table.Tournaments.Log2step, + table.Tournaments.Height, + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + ) + + parentAddress := postgres.NULL + if t.ParentTournamentAddress != nil { + parentAddress = postgres.Bytea(t.ParentTournamentAddress.Bytes()) + } + parentMatch := postgres.NULL + if t.ParentMatchIDHash != nil { + parentMatch = postgres.Bytea(t.ParentMatchIDHash.Bytes()) + } + winnerCommitment := postgres.NULL + if t.WinnerCommitment != nil { + winnerCommitment = postgres.Bytea(t.WinnerCommitment.Bytes()) + } + finalState := postgres.NULL + if t.FinalStateHash != nil { + finalState = postgres.Bytea(t.FinalStateHash.Bytes()) + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", t.EpochIndex)), + postgres.Bytea(t.Address.Bytes()), + parentAddress, + parentMatch, + postgres.RawFloat(fmt.Sprintf("%d", t.MaxLevel)), + postgres.RawFloat(fmt.Sprintf("%d", t.Level)), + postgres.RawFloat(fmt.Sprintf("%d", t.Log2Step)), + postgres.RawFloat(fmt.Sprintf("%d", t.Height)), + winnerCommitment, + finalState, + postgres.RawFloat(fmt.Sprintf("%d", t.FinishedAtBlock)), + ).WHERE( + whereClause, + ) + + sqlStr, args := insertStmt.QUERY(selectQuery).Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) UpdateTournament( + ctx context.Context, + nameOrAddress string, + t *model.Tournament, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + winnerCommitment := postgres.NULL + if t.WinnerCommitment != nil { + winnerCommitment = postgres.Bytea(t.WinnerCommitment.Bytes()) + } + finalState := postgres.NULL + if t.FinalStateHash != nil { + finalState = postgres.Bytea(t.FinalStateHash.Bytes()) + } + + updateStmt := table.Tournaments. + UPDATE( + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + ). + SET( + winnerCommitment, + finalState, + t.FinishedAtBlock, + ). + FROM( + table.Application, + ). + WHERE(postgres.AND( + whereClause, + table.Tournaments.ApplicationID.EQ(postgres.Int(t.ApplicationID)), + table.Tournaments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", t.EpochIndex))), + table.Tournaments.Address.EQ(postgres.Bytea(t.Address.Bytes())), + )) + + sqlStr, args := updateStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *PostgresRepository) GetTournament( + ctx context.Context, + nameOrAddress string, + address string, +) (*model.Tournament, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddress := common.HexToAddress(address) + sel := table.Tournaments. + SELECT( + table.Tournaments.ApplicationID, + table.Tournaments.EpochIndex, + table.Tournaments.Address, + table.Tournaments.ParentTournamentAddress, + table.Tournaments.ParentMatchIDHash, + table.Tournaments.MaxLevel, + table.Tournaments.Level, + table.Tournaments.Log2step, + table.Tournaments.Height, + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + table.Tournaments.CreatedAt, + table.Tournaments.UpdatedAt, + ). + FROM( + table.Tournaments. + INNER_JOIN(table.Application, + table.Tournaments.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Tournaments.Address.EQ(postgres.Bytea(tournamentAddress.Bytes()))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var t model.Tournament + err = row.Scan( + &t.ApplicationID, + &t.EpochIndex, + &t.Address, + &t.ParentTournamentAddress, + &t.ParentMatchIDHash, + &t.MaxLevel, + &t.Level, + &t.Log2Step, + &t.Height, + &t.WinnerCommitment, + &t.FinalStateHash, + &t.FinishedAtBlock, + &t.CreatedAt, + &t.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &t, nil +} + +func (r *PostgresRepository) ListTournaments( + ctx context.Context, + nameOrAddress string, + f repository.TournamentFilter, + p repository.Pagination, + descending bool, +) ([]*model.Tournament, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.Tournaments. + SELECT( + table.Tournaments.ApplicationID, + table.Tournaments.EpochIndex, + table.Tournaments.Address, + table.Tournaments.ParentTournamentAddress, + table.Tournaments.ParentMatchIDHash, + table.Tournaments.MaxLevel, + table.Tournaments.Level, + table.Tournaments.Log2step, + table.Tournaments.Height, + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + table.Tournaments.CreatedAt, + table.Tournaments.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.Tournaments. + INNER_JOIN(table.Application, + table.Tournaments.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.Tournaments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + if f.Level != nil { + conditions = append(conditions, table.Tournaments.Level.EQ(postgres.RawInt(fmt.Sprintf("%d", *f.Level)))) + } + if f.ParentTournamentAddress != nil { + conditions = append(conditions, table.Tournaments.ParentTournamentAddress.EQ(postgres.Bytea(f.ParentTournamentAddress.Bytes()))) + } + if f.ParentMatchIDHash != nil { + conditions = append(conditions, table.Tournaments.ParentMatchIDHash.EQ(postgres.Bytea(f.ParentMatchIDHash.Bytes()))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.Tournaments.EpochIndex.DESC(), table.Tournaments.Level.DESC()) + } else { + sel = sel.ORDER_BY(table.Tournaments.EpochIndex.ASC(), table.Tournaments.Level.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var tournaments []*model.Tournament + var total uint64 + for rows.Next() { + var t model.Tournament + err := rows.Scan( + &t.ApplicationID, + &t.EpochIndex, + &t.Address, + &t.ParentTournamentAddress, + &t.ParentMatchIDHash, + &t.MaxLevel, + &t.Level, + &t.Log2Step, + &t.Height, + &t.WinnerCommitment, + &t.FinalStateHash, + &t.FinishedAtBlock, + &t.CreatedAt, + &t.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + tournaments = append(tournaments, &t) + } + + return tournaments, total, nil +} diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 5a1990a0f..a7314d17b 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -25,10 +25,11 @@ type Pagination struct { type ApplicationFilter struct { State *ApplicationState DataAvailability *DataAvailabilitySelector + ConsensusType *Consensus } type EpochFilter struct { - Status *EpochStatus + Status []EpochStatus BeforeBlock *uint64 } @@ -57,6 +58,27 @@ type ReportFilter struct { InputIndex *uint64 } +type StateHashFilter struct { + EpochIndex *uint64 +} + +type TournamentFilter struct { + EpochIndex *uint64 + Level *uint64 + ParentTournamentAddress *common.Address + ParentMatchIDHash *common.Hash +} + +type CommitmentFilter struct { + EpochIndex *uint64 + TournamentAddress *string +} + +type MatchFilter struct { + EpochIndex *uint64 + TournamentAddress *string +} + type ApplicationRepository interface { CreateApplication(ctx context.Context, app *Application, withExecutionParameters bool) (int64, error) GetApplication(ctx context.Context, nameOrAddress string) (*Application, error) @@ -69,6 +91,7 @@ type ApplicationRepository interface { GetExecutionParameters(ctx context.Context, applicationID int64) (*ExecutionParameters, error) UpdateExecutionParameters(ctx context.Context, ep *ExecutionParameters) error + GetEventLastCheckBlock(ctx context.Context, appID int64, event MonitoredEvent) (uint64, error) UpdateEventLastCheckBlock(ctx context.Context, appIDs []int64, event MonitoredEvent, blockNumber uint64) error GetLastSnapshot(ctx context.Context, nameOrAddress string) (*Input, error) @@ -80,10 +103,14 @@ type EpochRepository interface { GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) GetLastAcceptedEpochIndex(ctx context.Context, nameOrAddress string) (uint64, error) + GetLastNonOpenEpoch(ctx context.Context, nameOrAddress string) (*Epoch, error) GetEpochByVirtualIndex(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) - UpdateEpoch(ctx context.Context, nameOrAddress string, e *Epoch) error - UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) (int64, error) + UpdateEpochClaimTransactionHash(ctx context.Context, nameOrAddress string, e *Epoch) error + UpdateEpochStatus(ctx context.Context, nameOrAddress string, e *Epoch) error + UpdateEpochInputsProcessed(ctx context.Context, nameOrAddress string, epochIndex uint64) error + UpdateEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64, proof *OutputsProof) error + RepeatPreviousEpochOutputsProof(ctx context.Context, appID int64, epochIndex uint64) error ListEpochs(ctx context.Context, nameOrAddress string, f EpochFilter, p Pagination, descending bool) ([]*Epoch, uint64, error) } @@ -94,6 +121,8 @@ type InputRepository interface { GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) GetLastProcessedInput(ctx context.Context, appAddress string) (*Input, error) ListInputs(ctx context.Context, nameOrAddress string, f InputFilter, p Pagination, descending bool) ([]*Input, uint64, error) + GetNumberOfInputs(ctx context.Context, nameOrAddress string) (uint64, error) + UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error } type OutputRepository interface { @@ -101,6 +130,7 @@ type OutputRepository interface { UpdateOutputsExecution(ctx context.Context, nameOrAddress string, executedOutputs []*Output, blockNumber uint64) error ListOutputs(ctx context.Context, nameOrAddress string, f OutputFilter, p Pagination, descending bool) ([]*Output, uint64, error) GetLastOutputBeforeBlock(ctx context.Context, nameOrAddress string, block uint64) (*Output, error) + GetNumberOfExecutedOutputs(ctx context.Context, nameOrAddress string) (uint64, error) } type ReportRepository interface { @@ -108,10 +138,42 @@ type ReportRepository interface { ListReports(ctx context.Context, nameOrAddress string, f ReportFilter, p Pagination, descending bool) ([]*Report, uint64, error) } +type StateHashRepository interface { + ListStateHashes(ctx context.Context, nameOrAddress string, f StateHashFilter, p Pagination, descending bool) ([]*StateHash, uint64, error) +} + +type TournamentRepository interface { + CreateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + UpdateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + GetTournament(ctx context.Context, nameOrAddress string, address string) (*Tournament, error) + ListTournaments(ctx context.Context, nameOrAddress string, f TournamentFilter, + p Pagination, descending bool) ([]*Tournament, uint64, error) +} + +type CommitmentRepository interface { + CreateCommitment(ctx context.Context, nameOrAddress string, c *Commitment) error + GetCommitment(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, commitmentHex string) (*Commitment, error) + ListCommitments(ctx context.Context, nameOrAddress string, f CommitmentFilter, p Pagination, descending bool) ([]*Commitment, uint64, error) +} + +type MatchRepository interface { + CreateMatch(ctx context.Context, nameOrAddress string, m *Match) error + UpdateMatch(ctx context.Context, nameOrAddress string, m *Match) error + GetMatch(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, idHashHex string) (*Match, error) + ListMatches(ctx context.Context, nameOrAddress string, f MatchFilter, p Pagination, descending bool) ([]*Match, uint64, error) +} + +type MatchAdvancedRepository interface { + CreateMatchAdvanced(ctx context.Context, nameOrAddress string, m *MatchAdvanced) error + GetMatchAdvanced(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, idHashHex string, parentHex string) (*MatchAdvanced, error) + ListMatchAdvances(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, idHashHex string, p Pagination, descending bool) ([]*MatchAdvanced, uint64, error) +} + type BulkOperationsRepository interface { - StoreAdvanceResult(ctx context.Context, appId int64, ar *AdvanceResult) error + StoreAdvanceResult(ctx context.Context, appID int64, result *AdvanceResult) error StoreClaimAndProofs(ctx context.Context, epoch *Epoch, outputs []*Output) error - UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error + StoreTournamentEvents(ctx context.Context, appID int64, commitments []*Commitment, matches []*Match, + matchAdvanced []*MatchAdvanced, matchDeleted []*Match, lastBlock uint64) error } type NodeConfigRepository interface { @@ -159,6 +221,11 @@ type Repository interface { InputRepository OutputRepository ReportRepository + StateHashRepository + TournamentRepository + CommitmentRepository + MatchRepository + MatchAdvancedRepository BulkOperationsRepository NodeConfigRepository ClaimerRepository diff --git a/internal/validator/validator.go b/internal/validator/validator.go index 4641d90d9..2254b0f68 100644 --- a/internal/validator/validator.go +++ b/internal/validator/validator.go @@ -10,13 +10,15 @@ import ( "errors" "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/merkle" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + pkgm "github.com/cartesi/rollups-node/pkg/machine" "github.com/cartesi/rollups-node/pkg/service" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" ) type Service struct { @@ -82,17 +84,17 @@ func (s *Service) Tick() []error { } return errs } -func (s *Service) Stop(b bool) []error { +func (s *Service) Stop(_ bool) []error { return nil } -func (v *Service) String() string { - return v.Name +func (s *Service) String() string { + return s.Name } // The maximum height for the Merkle tree of all outputs produced // by an application -const MAX_OUTPUT_TREE_HEIGHT = merkle.TREE_DEPTH +const MAX_OUTPUT_TREE_HEIGHT = merkle.TREE_DEPTH //nolint: revive type ValidatorRepository interface { ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination, descending bool) ([]*Application, uint64, error) @@ -103,6 +105,7 @@ type ValidatorRepository interface { GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) // FIXME migrate to list GetEpochByVirtualIndex(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) StoreClaimAndProofs(ctx context.Context, epoch *Epoch, outputs []*Output) error + ListStateHashes(ctx context.Context, nameOrAddress string, f repository.StateHashFilter, p repository.Pagination, descending bool) ([]*StateHash, uint64, error) } func getAllRunningApplications(ctx context.Context, er ValidatorRepository) ([]*Application, uint64, error) { @@ -111,23 +114,23 @@ func getAllRunningApplications(ctx context.Context, er ValidatorRepository) ([]* } func getProcessedEpochs(ctx context.Context, er ValidatorRepository, address string) ([]*Epoch, uint64, error) { - f := repository.EpochFilter{Status: Pointer(EpochStatus_InputsProcessed)} + f := repository.EpochFilter{Status: []EpochStatus{EpochStatus_InputsProcessed}} return er.ListEpochs(ctx, address, f, repository.Pagination{}, false) } // setApplicationInoperable marks an application as inoperable with the given reason, // logs any error that occurs during the update, and returns an error with the reason. -func (v *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...interface{}) error { +func (s *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...any) error { reason := fmt.Sprintf(reasonFmt, args...) appAddress := app.IApplicationAddress.String() // Log the reason first - v.Logger.Error(reason, "application", appAddress) + s.Logger.Error(reason, "application", appAddress) // Update application state - err := v.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) + err := s.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) if err != nil { - v.Logger.Error("failed to update application state to inoperable", "app", appAddress, "err", err) + s.Logger.Error("failed to update application state to inoperable", "app", appAddress, "err", err) } // Return the error with the reason @@ -136,10 +139,10 @@ func (v *Service) setApplicationInoperable(ctx context.Context, app *Application // validateApplication calculates, validates and stores the claim and/or proofs // for each processed epoch of the application. -func (v *Service) validateApplication(ctx context.Context, app *Application) error { - v.Logger.Debug("Starting validation", "application", app.Name) +func (s *Service) validateApplication(ctx context.Context, app *Application) error { + s.Logger.Debug("Starting validation", "application", app.Name) appAddress := app.IApplicationAddress.String() - processedEpochs, _, err := getProcessedEpochs(ctx, v.repository, appAddress) + processedEpochs, _, err := getProcessedEpochs(ctx, s.repository, appAddress) if err != nil { return fmt.Errorf( "failed to get processed epochs of application %v. %w", @@ -148,33 +151,34 @@ func (v *Service) validateApplication(ctx context.Context, app *Application) err } for _, epoch := range processedEpochs { - v.Logger.Debug("Started calculating claim", + s.Logger.Debug("Started calculating outputs merkle root", "application", appAddress, "epoch_index", epoch.Index, "last_block", epoch.LastBlock, ) - claim, outputs, err := v.createClaimAndProofs(ctx, app, epoch) + merkleRoot, outputs, err := s.computeMerkleTreeAndProofs(ctx, app, epoch) if err != nil { - v.Logger.Error("failed to create claim and proofs.", "error", err) + s.Logger.Error("failed to create claim and proofs.", "error", err) return err } - v.Logger.Info("Claim Computed", + s.Logger.Info("OutputsMerkleRoot Computed", "application", appAddress, "epoch_index", epoch.Index, - "claimHash", *claim, + "outputs_merkle_root", *merkleRoot, ) // The Cartesi Machine calculates the root hash of the outputs Merkle // tree after each input. Therefore, the root hash calculated after the - // last input in the epoch must match the claim hash calculated by the - // Validator. We first retrieve the hash calculated by the - // Cartesi Machine... - input, err := v.repository.GetLastInput( - ctx, - appAddress, - epoch.Index, - ) + // last input in the epoch must match the one calculated by the Validator + // So we need to validate the application state. + if *epoch.OutputsMerkleRoot != *merkleRoot { + return s.setApplicationInoperable(ctx, app, + "epoch %v outputs merkle root does not match computed one. Expected: %v, Got %v", + epoch.Index, *epoch.OutputsMerkleRoot, *merkleRoot) + } + + input, err := s.repository.GetLastInput(ctx, appAddress, epoch.Index) if err != nil { return fmt.Errorf( "failed to get the last Input for epoch %v of application %v. %w", @@ -182,25 +186,74 @@ func (v *Service) validateApplication(ctx context.Context, app *Application) err ) } - if input.OutputsHash == nil { - return v.setApplicationInoperable(ctx, app, - "inconsistent state: machine claim for epoch %v of application %v was not found", - epoch.Index, appAddress) + // DaveConsensus can have empty epochs. Authority and Quorum don't. + if !app.IsDaveConsensus() || input != nil { + if input.OutputsHash == nil { + return s.setApplicationInoperable(ctx, app, + "inconsistent state: epoch %v last input (%v) outputs merkle root is not defined", + epoch.Index, input.Index) + } + + // ...and compare it to the hash calculated by the Validator + if *epoch.OutputsMerkleRoot != *input.OutputsHash { + return s.setApplicationInoperable(ctx, app, + "computed outputs merkle root does not match epoch %v last input %v merkle root. Expected: %v, Got %v", + epoch.Index, input.Index, *input.OutputsHash, *epoch.OutputsMerkleRoot) + } + + if *epoch.MachineHash != *input.MachineHash { + return s.setApplicationInoperable(ctx, app, + "epoch %v machine hash does not match epoch last input (%v) machine hash. Expected: %v, Got %v", + epoch.Index, input.Index, *input.MachineHash, *epoch.MachineHash) + } + } else { // empty epochs + if epoch.VirtualIndex > 0 { + previousEpoch, err := s.repository.GetEpochByVirtualIndex(ctx, appAddress, epoch.VirtualIndex-1) + if err != nil { + return fmt.Errorf( + "failed to get previous epoch for epoch %v (%v) of application %v. %w", + epoch.Index, epoch.VirtualIndex, appAddress, err, + ) + } + if *epoch.MachineHash != *previousEpoch.MachineHash { + return s.setApplicationInoperable(ctx, app, + "epoch %v machine hash does not match previous epoch %v machine hash. Expected: %v, Got %v", + epoch.Index, previousEpoch.Index, *previousEpoch.MachineHash, *epoch.MachineHash) + } + if *epoch.OutputsMerkleRoot != *previousEpoch.OutputsMerkleRoot { + return s.setApplicationInoperable(ctx, app, + "epoch %v outputs merkle root does not match previous epoch %v one. Expected: %v, Got %v", + epoch.Index, previousEpoch.Index, *previousEpoch.OutputsMerkleRoot, *epoch.OutputsMerkleRoot) + } + } else { // first epoch + if *epoch.MachineHash != app.TemplateHash { + return s.setApplicationInoperable(ctx, app, + "epoch %v machine hash does not match for application template hash. Expected: %v, Got %v", + epoch.Index, app.TemplateHash, *epoch.MachineHash) + } + if *epoch.OutputsMerkleRoot != s.pristineRootHash { + return s.setApplicationInoperable(ctx, app, + "epoch %v outputs merkle root does not match pristine root hash. Expected: %v, Got %v", + epoch.Index, s.pristineRootHash, *epoch.OutputsMerkleRoot) + } + } } - // ...and compare it to the hash calculated by the Validator - if *input.OutputsHash != *claim { - return v.setApplicationInoperable(ctx, app, - "validator claim does not match machine claim for epoch %v of application %v. Expected: %v, Got %v", - epoch.Index, appAddress, *input.OutputsHash, *claim) + if app.IsDaveConsensus() { + commitment, proof, err := s.buildCommitment(ctx, app, epoch) + if err != nil { + return fmt.Errorf("failed to compute commitment for epoch %v (%v) of application %v. %w", + epoch.Index, epoch.VirtualIndex, appAddress, err, + ) + } + epoch.Commitment = commitment + epoch.CommitmentProof = proof.Siblings } - // update the epoch status and its claim epoch.Status = EpochStatus_ClaimComputed - epoch.ClaimHash = claim // store the epoch and proofs in the database - err = v.repository.StoreClaimAndProofs(ctx, epoch, outputs) + err = s.repository.StoreClaimAndProofs(ctx, epoch, outputs) if err != nil { return fmt.Errorf( "failed to store claim and proofs for epoch %v of application %v. %w", @@ -210,7 +263,7 @@ func (v *Service) validateApplication(ctx context.Context, app *Application) err } if len(processedEpochs) == 0 { - v.Logger.Debug("no processed epochs to validate", + s.Logger.Debug("no processed epochs to validate", "app", app.IApplicationAddress, ) } @@ -218,17 +271,66 @@ func (v *Service) validateApplication(ctx context.Context, app *Application) err return nil } -// createClaimAndProofs calculates the claim and proofs for an epoch. It returns +func (s *Service) buildCommitment(ctx context.Context, app *Application, epoch *Epoch) (*common.Hash, *merkle.Proof, error) { + if app == nil || epoch == nil { + return nil, nil, fmt.Errorf("application or epoch is nil") + } + if !app.IsDaveConsensus() { + return nil, nil, nil + } + s.Logger.Debug("DaveConsensus: Building commitment for epoch", + "application", app.Name, + "epoch", epoch.Index) + + builder := merkle.Builder{} + inputCount := epoch.InputIndexUpperBound - epoch.InputIndexLowerBound + if inputCount > 0 { + statesHashes, total, err := s.repository.ListStateHashes(ctx, app.IApplicationAddress.String(), + repository.StateHashFilter{EpochIndex: &epoch.Index}, repository.Pagination{}, false) + if err != nil { + return nil, nil, fmt.Errorf("failed to list state hashes for epoch %d of application %s: %w", + epoch.Index, app.Name, err) + } + if total < inputCount { + return nil, nil, fmt.Errorf("not enough state hashes for epoch %d of application %s: expected at least %d, got %d", + epoch.Index, app.Name, inputCount, total) + } + if uint64(len(statesHashes)) != total { + return nil, nil, fmt.Errorf("inconsistent number of state hashes for epoch %d of application %s: expected %d, got %d", epoch.Index, app.Name, total, len(statesHashes)) + } + for _, stateHash := range statesHashes { + builder.AppendRepeatedUint64(merkle.TreeLeaf(stateHash.MachineHash), stateHash.Repetitions) + } + } + + remainingInputs := pkgm.InputsPerEpoch - inputCount + remainingStrides := remainingInputs << pkgm.Log2StridesPerInput + if remainingStrides > 0 { + builder.AppendRepeatedUint64(merkle.TreeLeaf(*epoch.MachineHash), remainingStrides) + } + + epochCommitmentTree := builder.Build() + commitment := epochCommitmentTree.GetRootHash() + proof := epochCommitmentTree.ProveLast() + s.Logger.Info("DaveConsensus epoch commitment built", + "application", app.Name, + "epoch", epoch.Index, + "commitment", commitment.String()) + return &commitment, proof, nil + +} + +// computeMerkleTreeAndProofs calculates the claim and proofs for an epoch. It returns // the claim and the epoch outputs updated with their hash and proofs. In case // the epoch has no outputs, there are no proofs and it returns the pristine // claim for the first epoch or the previous epoch claim otherwise. -func (v *Service) createClaimAndProofs( +func (s *Service) computeMerkleTreeAndProofs( ctx context.Context, app *Application, epoch *Epoch, ) (*common.Hash, []*Output, error) { appAddress := app.IApplicationAddress.String() - epochOutputs, _, err := v.repository.ListOutputs(ctx, appAddress, repository.OutputFilter{ + epochOutputs, _, err := s.repository.ListOutputs(ctx, appAddress, repository.OutputFilter{ BlockRange: &repository.Range{ Start: epoch.FirstBlock, End: epoch.LastBlock, @@ -245,7 +347,7 @@ func (v *Service) createClaimAndProofs( var previousEpoch *Epoch if epoch.VirtualIndex > 0 { - previousEpoch, err = v.repository.GetEpochByVirtualIndex(ctx, appAddress, epoch.VirtualIndex-1) + previousEpoch, err = s.repository.GetEpochByVirtualIndex(ctx, appAddress, epoch.VirtualIndex-1) if err != nil { return nil, nil, fmt.Errorf( "failed to get previous epoch for epoch %v (%v) of application %v. %w", @@ -259,15 +361,15 @@ func (v *Service) createClaimAndProofs( // and there is no previous epoch if previousEpoch == nil { // this is the first epoch, return the pristine claim - return &v.pristineRootHash, nil, nil + return &s.pristineRootHash, nil, nil } // if there are no outputs and there is a previous epoch, return its claim - if previousEpoch.ClaimHash == nil { - return nil, nil, v.setApplicationInoperable(ctx, app, + if previousEpoch.OutputsMerkleRoot == nil { + return nil, nil, s.setApplicationInoperable(ctx, app, "invalid application state for epoch %v (%v) of application %v. Previous epoch has no claim.", epoch.Index, epoch.VirtualIndex, appAddress) } - return previousEpoch.ClaimHash, nil, nil + return previousEpoch.OutputsMerkleRoot, nil, nil } var pre []common.Hash @@ -275,11 +377,11 @@ func (v *Service) createClaimAndProofs( // it there is no previous epoch if previousEpoch == nil { // there are only new outputs, use a dummy pre context - pre = v.pristinePostContext + pre = s.pristinePostContext index = 0 } else { // retrieve the previous output, one not existing is ok... handled below - lastOutput, err := v.repository.GetLastOutputBeforeBlock(ctx, appAddress, epoch.FirstBlock) + lastOutput, err := s.repository.GetLastOutputBeforeBlock(ctx, appAddress, epoch.FirstBlock) if err != nil { return nil, nil, fmt.Errorf( "failed to get previous output for epoch %v (%v) of application %v. %w", @@ -288,12 +390,12 @@ func (v *Service) createClaimAndProofs( } if lastOutput == nil { // there are only new outputs, use a dummy pre context - pre = v.pristinePostContext + pre = s.pristinePostContext index = 0 } else { // there are previous outputs, create a pre context from the last output. if lastOutput.Hash == nil || len(lastOutput.OutputHashesSiblings) != merkle.TREE_DEPTH { - return nil, nil, v.setApplicationInoperable(ctx, app, + return nil, nil, s.setApplicationInoperable(ctx, app, "Inconsistent application state (%v). Last output (%d) before epoch %d has no hash or invalid hash siblings.", app.Name, lastOutput.Index, epoch.Index) } @@ -302,7 +404,7 @@ func (v *Service) createClaimAndProofs( // make sure no output got skipped if index != epochOutputs[0].Index { - return nil, nil, v.setApplicationInoperable(ctx, app, + return nil, nil, s.setApplicationInoperable(ctx, app, "Inconsistent application state (%v). Output index mismatch. "+ "Last output (%d) before epoch %d and first output (%d) are not sequential.", app.Name, lastOutput.Index, epoch.Index, epochOutputs[0].Index) @@ -313,7 +415,7 @@ func (v *Service) createClaimAndProofs( // we have outputs to compute, gather the values to call ComputeSiblingsMatrix outputHashes := make([]common.Hash, 0, len(epochOutputs)) for _, output := range epochOutputs { - hash := crypto.Keccak256Hash(output.RawData[:]) + hash := crypto.Keccak256Hash(output.RawData) // update outputs with their hash output.Hash = &hash // add them to the leaves slice @@ -321,7 +423,7 @@ func (v *Service) createClaimAndProofs( } // compute and store siblings - siblings, err := merkle.ComputeSiblingsMatrix(pre, outputHashes, v.pristinePostContext, index) + siblings, err := merkle.ComputeSiblingsMatrix(pre, outputHashes, s.pristinePostContext, index) if err != nil { return nil, nil, err } diff --git a/internal/validator/validator_test.go b/internal/validator/validator_test.go index 731790518..55cb9a950 100644 --- a/internal/validator/validator_test.go +++ b/internal/validator/validator_test.go @@ -43,9 +43,9 @@ func (s *ValidatorSuite) SetupSubTest() { serviceArgs := &service.CreateInfo{Name: "validator", Impl: validator} err := service.Create(context.Background(), serviceArgs, &validator.Service) s.Require().Nil(err) - dummyClaimHash := common.HexToHash("0x4128b6c65e6131a6823bab8deee051078080bb82d505015976efe2fb3b4c91c0") + dummyOutputsMerkleRoot := common.HexToHash("0x0a162946e56158bac0673e6dd3bdfdc1e4a0e7744a120fdb640050c8d7abe1c6") dummyEpochs = []Epoch{ - {Index: 0, VirtualIndex: 0, FirstBlock: 0, LastBlock: 9, ClaimHash: &dummyClaimHash}, + {Index: 0, VirtualIndex: 0, FirstBlock: 0, LastBlock: 9, OutputsMerkleRoot: &dummyOutputsMerkleRoot, MachineHash: &validator.pristineRootHash}, {Index: 1, VirtualIndex: 1, FirstBlock: 10, LastBlock: 19}, {Index: 2, VirtualIndex: 2, FirstBlock: 20, LastBlock: 29}, {Index: 3, VirtualIndex: 3, FirstBlock: 30, LastBlock: 39}, @@ -140,7 +140,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofSuccess() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, false, ).Return([]*Output{}, uint64(0), nil) - claimHash, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[0]) + claimHash, _, err := validator.computeMerkleTreeAndProofs(nil, &app, &dummyEpochs[0]) claimHashRef, _, err := merkle.CreateProofs(nil, merkle.TREE_DEPTH) s.ErrorIs(nil, err) s.NotNil(claimHash) @@ -157,7 +157,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofSuccess() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, false, ).Return([]*Output{&output}, uint64(1), nil) - claimHash, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[0]) + claimHash, _, err := validator.computeMerkleTreeAndProofs(nil, &app, &dummyEpochs[0]) s.ErrorIs(nil, err) s.NotNil(claimHash) repo.AssertExpectations(s.T()) @@ -172,9 +172,9 @@ func (s *ValidatorSuite) TestCreateClaimAndProofSuccess() { mock.Anything, mock.Anything, mock.Anything, ).Return(&dummyEpochs[0], nil).Once() - claimHash, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + claimHash, _, err := validator.computeMerkleTreeAndProofs(nil, &app, &dummyEpochs[1]) s.ErrorIs(nil, err) - s.Equal(dummyEpochs[0].ClaimHash, claimHash) + s.Equal(dummyEpochs[0].OutputsMerkleRoot, claimHash) repo.AssertExpectations(s.T()) }) @@ -199,13 +199,14 @@ func (s *ValidatorSuite) TestCreateClaimAndProofSuccess() { mock.Anything, mock.Anything, mock.Anything, ).Return(&dummyOutputs[0], nil).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + _, _, err := validator.computeMerkleTreeAndProofs(nil, &app, &dummyEpochs[1]) s.ErrorIs(nil, err) repo.AssertExpectations(s.T()) }) } func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { + ctx := context.Background() app := Application{ Name: "dummy-application-name", } @@ -217,7 +218,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, false, ).Return([]*Output{}, uint64(0), xerror).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[0]) + _, _, err := validator.computeMerkleTreeAndProofs(ctx, &app, &dummyEpochs[0]) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -232,7 +233,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { mock.Anything, mock.Anything, mock.Anything, ).Return(&dummyEpochs[0], xerror).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + _, _, err := validator.computeMerkleTreeAndProofs(ctx, &app, &dummyEpochs[1]) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -244,7 +245,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { ).Return([]*Output{}, uint64(0), nil).Once() invalidEpoch := dummyEpochs[0] - invalidEpoch.ClaimHash = nil + invalidEpoch.OutputsMerkleRoot = nil repo.On("GetEpochByVirtualIndex", mock.Anything, mock.Anything, mock.Anything, ).Return(&invalidEpoch, nil).Once() @@ -253,7 +254,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, ).Return(nil).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + _, _, err := validator.computeMerkleTreeAndProofs(ctx, &app, &dummyEpochs[1]) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -272,7 +273,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { mock.Anything, mock.Anything, mock.Anything, ).Return(&Output{}, xerror).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + _, _, err := validator.computeMerkleTreeAndProofs(ctx, &app, &dummyEpochs[1]) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -295,7 +296,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, ).Return(nil).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + _, _, err := validator.computeMerkleTreeAndProofs(ctx, &app, &dummyEpochs[1]) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -318,7 +319,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, ).Return(nil).Once() - _, _, err := validator.createClaimAndProofs(nil, &app, &dummyEpochs[1]) + _, _, err := validator.computeMerkleTreeAndProofs(ctx, &app, &dummyEpochs[1]) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -326,6 +327,7 @@ func (s *ValidatorSuite) TestCreateClaimAndProofFailures() { } func (s *ValidatorSuite) TestValidateApplicationSuccess() { + ctx := context.Background() app := Application{ Name: "dummy-application-name", } @@ -334,7 +336,7 @@ func (s *ValidatorSuite) TestValidateApplicationSuccess() { mock.Anything, app.IApplicationAddress.String(), mock.Anything, mock.Anything, false, ).Return(([]*Epoch)(nil), uint64(0), nil).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.ErrorIs(nil, err) repo.AssertExpectations(s.T()) }) @@ -342,6 +344,7 @@ func (s *ValidatorSuite) TestValidateApplicationSuccess() { s.Run("FirstEpochNoOutputs", func() { input := Input{ EpochApplicationID: app.ID, + MachineHash: &validator.pristineRootHash, OutputsHash: &validator.pristineRootHash, } @@ -361,29 +364,30 @@ func (s *ValidatorSuite) TestValidateApplicationSuccess() { mock.Anything, mock.Anything, mock.Anything, ).Return(nil).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.ErrorIs(nil, err) repo.AssertExpectations(s.T()) }) } func (s *ValidatorSuite) TestValidateApplicationFailure() { + ctx := context.Background() app := Application{ Name: "dummy-application-name", } xerror := fmt.Errorf("Error") - + s.Run("getProcessedEpochsFailure", func() { repo.On("ListEpochs", mock.Anything, app.IApplicationAddress.String(), mock.Anything, mock.Anything, false, ).Return([]*Epoch{}, uint64(0), xerror).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.NotNil(err) repo.AssertExpectations(s.T()) }) - - s.Run("createClaimAndProofsFailure", func() { + + s.Run("computeMerkleTreeAndProofsFailure", func() { repo.On("ListEpochs", mock.Anything, app.IApplicationAddress.String(), mock.Anything, mock.Anything, false, ).Return([]*Epoch{&dummyEpochs[0]}, uint64(1), nil).Once() @@ -392,11 +396,11 @@ func (s *ValidatorSuite) TestValidateApplicationFailure() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, false, ).Return([]*Output{}, uint64(0), xerror).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.NotNil(err) repo.AssertExpectations(s.T()) }) - + s.Run("GetLastInputFailure", func() { input := Input{ EpochApplicationID: app.ID, @@ -415,7 +419,7 @@ func (s *ValidatorSuite) TestValidateApplicationFailure() { mock.Anything, app.IApplicationAddress.String(), dummyEpochs[0].Index, ).Return(&input, xerror).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -470,7 +474,7 @@ func (s *ValidatorSuite) TestValidateApplicationFailure() { mock.Anything, mock.Anything, mock.Anything, mock.Anything, ).Return(nil).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.NotNil(err) repo.AssertExpectations(s.T()) }) @@ -479,6 +483,7 @@ func (s *ValidatorSuite) TestValidateApplicationFailure() { input := Input{ EpochApplicationID: app.ID, OutputsHash: &validator.pristineRootHash, + MachineHash: &validator.pristineRootHash, } repo.On("ListEpochs", @@ -497,7 +502,7 @@ func (s *ValidatorSuite) TestValidateApplicationFailure() { mock.Anything, mock.Anything, mock.Anything, ).Return(xerror).Once() - err := validator.validateApplication(nil, &app) + err := validator.validateApplication(ctx, &app) s.ErrorIs(err, xerror) repo.AssertExpectations(s.T()) }) @@ -573,6 +578,12 @@ func (m *Mockrepo) StoreClaimAndProofs(ctx context.Context, epoch *Epoch, output return args.Error(0) } +func (m *Mockrepo) ListStateHashes(ctx context.Context, nameOrAddress string, + f repository.StateHashFilter, p repository.Pagination, descending bool) ([]*StateHash, uint64, error) { + args := m.Called(ctx, nameOrAddress, f, p, descending) + return args.Get(0).([]*StateHash), args.Get(1).(uint64), args.Error(2) +} + func (m *Mockrepo) UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error { args := m.Called(ctx, appID, state, reason) return args.Error(0) diff --git a/pkg/emulator/machine.go b/pkg/emulator/machine.go index 54b2d24fa..7fb624d43 100644 --- a/pkg/emulator/machine.go +++ b/pkg/emulator/machine.go @@ -7,6 +7,7 @@ package emulator // #include +// #include // #include "cartesi-machine/machine-c-api.h" import "C" @@ -16,6 +17,11 @@ import ( "unsafe" ) +const HashSize = C.sizeof_cm_hash + +// Common type aliases +type Hash = [HashSize]byte + // ----------------------------------------------------------------------------- // Machine Methods // ----------------------------------------------------------------------------- @@ -171,22 +177,19 @@ func (m *Machine) GetRegAddress(reg RegID) (uint64, error) { } // get_root_hash -func (m *Machine) GetRootHash() ([]byte, error) { - var hash C.cm_hash +func (m *Machine) GetRootHash() (Hash, error) { + var cHash C.cm_hash var err error - var result []byte m.callCAPI(func() { - err = newError(C.cm_get_root_hash(m.ptr, &hash)) - if err == nil { - result = C.GoBytes(unsafe.Pointer(&hash), 32) - } + err = newError(C.cm_get_root_hash(m.ptr, &cHash)) }) - if err != nil { - return nil, err + return Hash{}, err } - return result, nil + + // Zero-copy: reinterpret C array as Go array + return *(*Hash)(unsafe.Pointer(&cHash)), nil } // get_runtime_config diff --git a/pkg/ethutil/filter.go b/pkg/ethutil/filter.go index d0439277c..d841a3635 100644 --- a/pkg/ethutil/filter.go +++ b/pkg/ethutil/filter.go @@ -87,7 +87,7 @@ func queryBlockRangeTooLarge(err error) bool { // (From, To) block ranges into multiple smaller calls when it detects the // provider rejected the query for this specific reason. Detection is a // heuristic and implemented in the function queryBlockRangeTooLarge. It -// potentially has to be adjusted to accomodate each provider. +// potentially has to be adjusted to accommodate each provider. func (f *Filter) ChunkedFilterLogs( ctx context.Context, client *ethclient.Client, diff --git a/pkg/ethutil/search.go b/pkg/ethutil/search.go new file mode 100644 index 000000000..b2629ac94 --- /dev/null +++ b/pkg/ethutil/search.go @@ -0,0 +1,113 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package ethutil + +import ( + "context" + "fmt" + "math/big" + "slices" +) + +// TransitionQueryFn for binary search +type TransitionQueryFn func(ctx context.Context, block uint64) (*big.Int, error) + +type OnHitFn func(block uint64) error + +// sortAndCallOnHit sorts the transition blocks and calls onHit for each in chronological order +func sortAndCallOnHit(transitionBlocks []uint64, onHit OnHitFn) (uint64, error) { + slices.Sort(transitionBlocks) + for _, block := range transitionBlocks { + if err := onHit(block); err != nil { + return 0, err + } + } + return uint64(len(transitionBlocks)), nil +} + +// FindTransitions performs divide-and-conquer search for transitions using oracle +// and calls onHit for each transition block in chronological order. +// The prevValue is optional and used to detect a transition exactly at startBlock. +// It assumes the queried value is monotonic (e.g., increasing counter); if it can revert, +// intermediate transitions may be missed when net change is zero. +func FindTransitions(ctx context.Context, startBlock, endBlock uint64, prevValue *big.Int, + transitionQuery TransitionQueryFn, onHit OnHitFn) (uint64, error) { + if startBlock > endBlock { + return 0, nil + } + + type interval struct { + StartBlock, EndBlock uint64 + StartValue, EndValue *big.Int + } + + startValue, err := transitionQuery(ctx, startBlock) + if err != nil { + return 0, fmt.Errorf("transitionQuery(startBlock=%d): %w", startBlock, err) + } + + var transitionBlocks []uint64 + if prevValue != nil { + comparisonResult := prevValue.Cmp(startValue) + if comparisonResult > 0 { + return 0, fmt.Errorf("monotonic assumption violated: prevValue %s > startValue %s at block %d", + prevValue.String(), startValue.String(), startBlock) + } + if comparisonResult < 0 { + transitionBlocks = append(transitionBlocks, startBlock) + } + } + + if startBlock == endBlock { + return sortAndCallOnHit(transitionBlocks, onHit) + } + + endValue, err := transitionQuery(ctx, endBlock) + if err != nil { + return 0, fmt.Errorf("transitionQuery(endBlock=%d): %w", endBlock, err) + } + + if startValue.Cmp(endValue) == 0 { + // No further transitions, but may have added startBlock + return sortAndCallOnHit(transitionBlocks, onHit) + } + + // First phase: collect all transition blocks + stack := []interval{{StartBlock: startBlock, EndBlock: endBlock, StartValue: startValue, EndValue: endValue}} + for len(stack) > 0 { + // Check for context cancellation + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + // Pop from stack + iv := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if iv.StartBlock == iv.EndBlock { + // Found a transition block + transitionBlocks = append(transitionBlocks, iv.StartBlock) + continue + } + + midBlock := iv.StartBlock + (iv.EndBlock-iv.StartBlock)/2 //nolint:mnd + midValue, err := transitionQuery(ctx, midBlock) + if err != nil { + return 0, fmt.Errorf("transitionQuery(midBlock=%d): %w", midBlock, err) + } + + // Add new intervals to stack if there are transitions + if midValue.Cmp(iv.EndValue) != 0 { + stack = append(stack, interval{StartBlock: midBlock + 1, EndBlock: iv.EndBlock, StartValue: midValue, EndValue: iv.EndValue}) + } + if iv.StartValue.Cmp(midValue) != 0 { + stack = append(stack, interval{StartBlock: iv.StartBlock, EndBlock: midBlock, StartValue: iv.StartValue, EndValue: midValue}) + } + } + + // Second phase: sort transition blocks and call onHit in chronological order + return sortAndCallOnHit(transitionBlocks, onHit) +} diff --git a/pkg/ethutil/search_test.go b/pkg/ethutil/search_test.go new file mode 100644 index 000000000..86de1d7f5 --- /dev/null +++ b/pkg/ethutil/search_test.go @@ -0,0 +1,245 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package ethutil + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/suite" +) + +type SearchSuite struct { + suite.Suite + ctx context.Context +} + +func TestSearchSuite(t *testing.T) { + suite.Run(t, new(SearchSuite)) +} + +func (s *SearchSuite) SetupTest() { + s.ctx = context.Background() +} + +// Helper to create a mock transitionQuery from a map +func mockTransitionQuery(values map[uint64]*big.Int) TransitionQueryFn { + return func(_ context.Context, block uint64) (*big.Int, error) { + if val, ok := values[block]; ok { + return val, nil + } + return big.NewInt(0), nil // default + } +} + +// Helper to create an onHit that collects blocks +func collectOnHit(blocks *[]uint64) OnHitFn { + return func(block uint64) error { + *blocks = append(*blocks, block) + return nil + } +} + +// Helper to create an onHit that returns error +func errorOnHit(err error) OnHitFn { + return func(_ uint64) error { + return err + } +} + +// Helper to create a transitionQuery that returns error +func errorTransitionQuery(err error) TransitionQueryFn { + return func(_ context.Context, _ uint64) (*big.Int, error) { + return nil, err + } +} + +func (s *SearchSuite) TestNoTransitionsWhenValuesEqual() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + 10: big.NewInt(1), + } + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 10, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Empty(blocks) +} + +func (s *SearchSuite) TestSingleTransition() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + 1: big.NewInt(2), + } + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 1, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal([]uint64{1}, blocks) +} + +func (s *SearchSuite) TestMultipleTransitions() { + values := map[uint64]*big.Int{ + 1: big.NewInt(1), + 2: big.NewInt(1), + 3: big.NewInt(2), + 4: big.NewInt(2), + 5: big.NewInt(3), + 6: big.NewInt(3), + } + var blocks []uint64 + count, err := FindTransitions(s.ctx, 1, 6, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal(count, uint64(2)) + s.Equal([]uint64{3, 5}, blocks) + + blocks = []uint64{} + previousValue := big.NewInt(0) + count, err = FindTransitions(s.ctx, 1, 6, previousValue, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal(count, uint64(3)) + s.Equal([]uint64{1, 3, 5}, blocks) +} + +func (s *SearchSuite) TestTransitionAtBoundary() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + 1: big.NewInt(2), + } + var blocks []uint64 + count, err := FindTransitions(s.ctx, 0, 1, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal(count, uint64(1)) + s.Equal([]uint64{1}, blocks) + + blocks = []uint64{} + previousValue := big.NewInt(0) + count, err = FindTransitions(s.ctx, 0, 1, previousValue, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal(count, uint64(2)) + s.Equal([]uint64{0, 1}, blocks) + +} + +func (s *SearchSuite) TestStartEqualsEnd() { + values := map[uint64]*big.Int{ + 5: big.NewInt(1), + } + var blocks []uint64 + count, err := FindTransitions(s.ctx, 5, 5, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal(count, uint64(0)) + s.Empty(blocks) + + blocks = []uint64{} + previousValue := big.NewInt(0) + count, err = FindTransitions(s.ctx, 5, 5, previousValue, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal(count, uint64(1)) + s.Equal([]uint64{5}, blocks) +} + +func (s *SearchSuite) TestContextCancellation() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + 10: big.NewInt(2), + } + ctx, cancel := context.WithCancel(s.ctx) + cancel() + var blocks []uint64 + _, err := FindTransitions(ctx, 0, 10, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.Error(err) + s.Equal(context.Canceled, err) +} + +func (s *SearchSuite) TestTransitionQueryErrorAtStart() { + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 10, nil, errorTransitionQuery(fmt.Errorf("query error")), collectOnHit(&blocks)) + s.Error(err) + s.Contains(err.Error(), "transitionQuery(startBlock=0): query error") + s.Empty(blocks) +} + +func (s *SearchSuite) TestTransitionQueryErrorAtEnd() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + } + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 10, nil, func(_ context.Context, block uint64) (*big.Int, error) { + if block == 10 { + return nil, fmt.Errorf("query error at end") + } + return values[block], nil + }, collectOnHit(&blocks)) + s.Error(err) + s.Contains(err.Error(), "transitionQuery(endBlock=10): query error at end") + s.Empty(blocks) +} + +func (s *SearchSuite) TestTransitionQueryErrorAtMid() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + 10: big.NewInt(2), + } + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 10, nil, func(_ context.Context, block uint64) (*big.Int, error) { + if block == 5 { + return nil, fmt.Errorf("query error at mid") + } + if val, ok := values[block]; ok { + return val, nil + } + return big.NewInt(0), nil + }, collectOnHit(&blocks)) + s.Error(err) + s.Contains(err.Error(), "transitionQuery(midBlock=5): query error at mid") + s.Empty(blocks) +} + +func (s *SearchSuite) TestOnHitError() { + values := map[uint64]*big.Int{ + 0: big.NewInt(1), + 1: big.NewInt(2), + } + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 1, nil, mockTransitionQuery(values), errorOnHit(fmt.Errorf("onHit error"))) + s.Error(err) + s.Contains(err.Error(), "onHit error") + s.Empty(blocks) +} + +func (s *SearchSuite) TestLargeRange() { + values := make(map[uint64]*big.Int) + for i := uint64(0); i <= 100; i++ { + if i < 50 { + values[i] = big.NewInt(1) + } else { + values[i] = big.NewInt(2) + } + } + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 100, nil, mockTransitionQuery(values), collectOnHit(&blocks)) + s.NoError(err) + s.Equal([]uint64{50}, blocks) +} + +func (s *SearchSuite) TestNoValuesDefined() { + var blocks []uint64 + _, err := FindTransitions(s.ctx, 0, 10, nil, mockTransitionQuery(map[uint64]*big.Int{}), collectOnHit(&blocks)) + s.NoError(err) + s.Empty(blocks) +} + +func (s *SearchSuite) TestMonotonicViolation() { + values := map[uint64]*big.Int{ + 0: big.NewInt(2), + 1: big.NewInt(1), + } + var blocks []uint64 + previousValue := big.NewInt(3) // 3 > 2, violation + _, err := FindTransitions(s.ctx, 0, 1, previousValue, mockTransitionQuery(values), collectOnHit(&blocks)) + s.Error(err) + s.Contains(err.Error(), "monotonic assumption violated") + s.Empty(blocks) +} diff --git a/pkg/ethutil/transaction.go b/pkg/ethutil/transaction.go index 11852f810..26c793d35 100644 --- a/pkg/ethutil/transaction.go +++ b/pkg/ethutil/transaction.go @@ -16,7 +16,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" ) -const PollInterval = 100 * time.Millisecond +const PollInterval = 500 * time.Millisecond // Prepare the transaction, send it, and wait for the receipt. func sendTransaction( @@ -58,7 +58,9 @@ func _prepareTransaction( if err != nil { return nil, fmt.Errorf("failed to get gas price: %v", err) } - txOpts.Nonce = big.NewInt(int64(nonce)) + nonceBigInt := &big.Int{} + nonceBigInt.SetUint64(nonce) + txOpts.Nonce = nonceBigInt txOpts.Value = txValue txOpts.GasLimit = gasLimit txOpts.GasPrice = gasPrice diff --git a/pkg/machine/backend.go b/pkg/machine/backend.go index 7e880be64..9d0e7d6a8 100644 --- a/pkg/machine/backend.go +++ b/pkg/machine/backend.go @@ -3,7 +3,10 @@ package machine -import "time" +import ( + "encoding/json" + "time" +) type BreakReason int32 @@ -16,6 +19,15 @@ const ( ReachedTargetMcycle BreakReason = 0x5 ) +type HashCollectorState struct { + Period uint64 + Phase uint64 + MaxHashes uint64 + BundleLog2 int32 + Hashes []Hash + BackTree json.RawMessage +} + // This Backend interface covers the methods used from the emulator / remote machine server. // It is to abstract the emulator package and allow for easier testing and mocking in unit tests. type Backend interface { @@ -23,6 +35,8 @@ type Backend interface { Store(directory string, timeout time.Duration) error Run(mcycleEnd uint64, timeout time.Duration) (BreakReason, error) + RunAndCollectRootHashes(mcycleEnd uint64, state *HashCollectorState, timeout time.Duration, + ) (reason BreakReason, err error) IsAtManualYield(timeout time.Duration) (bool, error) ReadMCycle(timeout time.Duration) (uint64, error) @@ -30,7 +44,10 @@ type Backend interface { SendCmioResponse(reason uint16, data []byte, timeout time.Duration) error ReceiveCmioRequest(timeout time.Duration) (cmd uint8, reason uint16, data []byte, err error) - GetRootHash(timeout time.Duration) ([]byte, error) + WriteMemory(address uint64, data []byte, timeout time.Duration) error + + GetRootHash(timeout time.Duration) (Hash, error) + GetProof(address uint64, log2size int32, timeout time.Duration) ([]Hash, error) Delete() ForkServer(timeout time.Duration) (Backend, string, uint32, error) diff --git a/pkg/machine/backend_test.go b/pkg/machine/backend_test.go index 806ea2dbb..927ca382b 100644 --- a/pkg/machine/backend_test.go +++ b/pkg/machine/backend_test.go @@ -50,9 +50,14 @@ func (m *MockBackend) ReceiveCmioRequest(timeout time.Duration) (uint8, uint16, return args.Get(0).(uint8), args.Get(1).(uint16), args.Get(2).([]byte), args.Error(3) } -func (m *MockBackend) GetRootHash(timeout time.Duration) ([]byte, error) { +func (m *MockBackend) GetRootHash(timeout time.Duration) (Hash, error) { args := m.Called(timeout) - return args.Get(0).([]byte), args.Error(1) + return args.Get(0).(Hash), args.Error(1) +} + +func (m *MockBackend) WriteMemory(address uint64, data []byte, timeout time.Duration) error { + args := m.Called(address, data, timeout) + return args.Error(0) } func (m *MockBackend) Delete() { @@ -79,31 +84,43 @@ func (m *MockBackend) CmioRxBufferSize() uint64 { return args.Get(0).(uint64) } +func (m *MockBackend) RunAndCollectRootHashes(mcycleEnd uint64, state *HashCollectorState, timeout time.Duration) (reason BreakReason, err error) { + args := m.Called(mcycleEnd, state, timeout) + return args.Get(0).(BreakReason), args.Error(1) +} + +func (m *MockBackend) GetProof(address uint64, log2size int32, timeout time.Duration) ([]Hash, error) { + args := m.Called(address, log2size, timeout) + return args.Get(0).([]Hash), args.Error(1) +} + // Helper functions for setting up common mock scenarios -func randomFakeHash() []byte { - data := make([]byte, HashSize) - _, _ = rand.Read(data) - return data +func randomFakeHash() Hash { + hash := Hash{} + _, _ = rand.Read(hash[:]) + return hash } // SetupAccepted configures the mock for a successful advance/inspect operation func (m *MockBackend) SetupAccepted(reqType requestType) { + hash := randomFakeHash() m.On("SendCmioResponse", uint16(reqType), mock.Anything, mock.AnythingOfType("time.Duration")).Return(nil) m.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil) m.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(0), nil) m.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonAccepted), hash[:], nil) m.On("CmioRxBufferSize").Return(uint64(1024)) } // SetupRejected configures the mock for a rejected advance/inspect operation func (m *MockBackend) SetupRejected(reqType requestType) { + hash := randomFakeHash() m.On("SendCmioResponse", uint16(reqType), mock.Anything, mock.AnythingOfType("time.Duration")).Return(nil) m.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil) m.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(0), nil) m.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonRejected), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonRejected), hash[:], nil) m.On("CmioRxBufferSize").Return(uint64(1024)) } @@ -119,11 +136,12 @@ func (m *MockBackend) SetupException(reqType requestType) { // SetupForLoad configures the mock for successful machine loading func (m *MockBackend) SetupForLoad() { + hash := randomFakeHash() m.On("NewMachineRuntimeConfig").Return(`{"concurrency":{"update_merkle_tree":1}}`, nil).Once() m.On("Load", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("time.Duration")).Return(nil).Once() m.On("IsAtManualYield", mock.AnythingOfType("time.Duration")).Return(true, nil).Once() m.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), make([]byte, HashSize), nil).Once() + uint8(0), uint16(ManualYieldReasonAccepted), hash[:], nil).Once() } // SetupForCleanup configures the mock for cleanup operations @@ -148,7 +166,7 @@ func (m *MockBackend) SetupNotAtManualYield() { } // SetupForHash configures the mock for successful hash retrieval -func (m *MockBackend) SetupForHash(hash []byte) { +func (m *MockBackend) SetupForHash(hash Hash) { m.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return(hash, nil) } @@ -159,14 +177,14 @@ func NewMockBackend() *MockBackend { // MockBackendFactory creates a backend factory that returns the provided mock func MockBackendFactory(backend *MockBackend) BackendFactory { - return func(address string, timeout time.Duration) (Backend, string, uint32, error) { + return func(_ string, _ time.Duration) (Backend, string, uint32, error) { return backend, "127.0.0.1:12345", 12345, nil } } // FailingMockBackendFactory creates a backend factory that always fails func FailingMockBackendFactory(err error) BackendFactory { - return func(address string, timeout time.Duration) (Backend, string, uint32, error) { + return func(_ string, _ time.Duration) (Backend, string, uint32, error) { return nil, "", 0, err } } diff --git a/pkg/machine/implementation.go b/pkg/machine/implementation.go index 8350d784e..c0e582107 100644 --- a/pkg/machine/implementation.go +++ b/pkg/machine/implementation.go @@ -47,6 +47,36 @@ const ( // Constants const maxOutputs = 65536 // 2^16 +const CheckpointAddress uint64 = 0x7ffff000 +const TxBufferAddress uint64 = 0x60800000 +const HashLog2Size = 5 // 32 bytes + +const ( + // log2 value of the maximal number of micro instructions that emulates a big instruction + Log2UarchSpanToBarch uint64 = 20 + // log2 value of the maximal number of big instructions that executes an input + Log2BarchSpanToInput uint64 = 48 + // log2 value of the maximal number of inputs that allowed in an epoch + Log2InputSpanToEpoch uint64 = 24 + // gap of each leaf in the commitment tree, should use the same value as ArbitrationConstants.sol:log2step(0) + Log2Stride uint64 = 44 + // log2 value of the maximal number of micro instructions that executes an input + Log2UarchSpanToInput uint64 = Log2BarchSpanToInput + Log2UarchSpanToBarch // 68 + + UarchSpanToBarch uint64 = (1 << Log2UarchSpanToBarch) - 1 // 1_048_575 + BarchSpanToInput uint64 = (1 << Log2BarchSpanToInput) - 1 // 281_474_976_710_655 + InputSpanToEpoch uint64 = (1 << Log2InputSpanToEpoch) - 1 // 16_777_215 + + BigStepsInStride uint64 = 1 << (Log2Stride - Log2UarchSpanToBarch) // 16_777_216 + StrideCountInInput uint64 = 1 << (Log2BarchSpanToInput + Log2UarchSpanToBarch - Log2Stride) // 16_777_216 + + StrideCountInEpoch uint64 = 1 << (Log2InputSpanToEpoch + Log2BarchSpanToInput + Log2UarchSpanToBarch - Log2Stride) + + Log2StridesPerInput uint64 = Log2BarchSpanToInput + Log2UarchSpanToBarch - Log2Stride + + InputsPerEpoch uint64 = 1 << Log2InputSpanToEpoch +) + // machineImpl implements the Machine interface by wrapping an emulator.RemoteMachine type machineImpl struct { backend Backend @@ -83,23 +113,16 @@ func (m *machineImpl) Fork(ctx context.Context) (Machine, error) { // Hash returns the machine's merkle tree root hash func (m *machineImpl) Hash(ctx context.Context) (Hash, error) { - hash := Hash{} if err := checkContext(ctx); err != nil { - return hash, err + return Hash{}, err } - hashSlice, err := m.backend.GetRootHash(m.params.LoadDeadline) + hash, err := m.backend.GetRootHash(m.params.LoadDeadline) if err != nil { err := fmt.Errorf("could not get the machine's root hash: %w", err) return hash, errors.Join(ErrMachineInternal, err) } - if len(hashSlice) != HashSize { - err := fmt.Errorf("invalid machine root hash length: expected 32 bytes, got %d bytes", len(hashSlice)) - return hash, errors.Join(ErrMachineInternal, err) - } - - copy(hash[:], hashSlice) return hash, nil } @@ -126,30 +149,54 @@ func (m *machineImpl) OutputsHash(ctx context.Context) (Hash, error) { return outputsHash, nil } +func (m *machineImpl) OutputsHashProof(ctx context.Context) ([]Hash, error) { + if err := checkContext(ctx); err != nil { + return nil, err + } + siblings, err := m.backend.GetProof(TxBufferAddress, HashLog2Size, m.params.LoadDeadline) + if err != nil { + err := fmt.Errorf("could not get outputs hash machine proof: %w", err) + return nil, errors.Join(ErrMachineInternal, err) + } + return siblings, nil +} + +func (m *machineImpl) WriteCheckpointHash(ctx context.Context, hash Hash) error { + if err := checkContext(ctx); err != nil { + return err + } + + err := m.backend.WriteMemory(CheckpointAddress, hash[:], m.params.FastDeadline) + if err != nil { + err := fmt.Errorf("could not write checkpoint hash in to machine memory: %w", err) + return errors.Join(ErrMachineInternal, err) + } + return nil +} + // Advance sends an input to the machine and processes it -func (m *machineImpl) Advance(ctx context.Context, input []byte) (bool, []Output, []Report, Hash, error) { +func (m *machineImpl) Advance(ctx context.Context, input []byte, computeHashes bool) (bool, []Output, []Report, []Hash, uint64, Hash, error) { outputsHash := Hash{} - // TODO: return the exception reason - accepted, outputs, reports, data, err := m.process(ctx, input, AdvanceStateRequest) + accepted, outputs, reports, hashes, remaining, data, err := m.process(ctx, input, AdvanceStateRequest, computeHashes) if err != nil { - return accepted, outputs, reports, outputsHash, err + return accepted, outputs, reports, hashes, remaining, outputsHash, err } if accepted { if length := len(data); length != HashSize { err = fmt.Errorf("%w (it has %d bytes)", ErrHashLength, length) - return accepted, outputs, reports, outputsHash, err + return accepted, outputs, reports, hashes, remaining, outputsHash, err } copy(outputsHash[:], data) } - return accepted, outputs, reports, outputsHash, nil + return accepted, outputs, reports, hashes, remaining, outputsHash, nil } // Inspect sends a query to the machine and returns the results func (m *machineImpl) Inspect(ctx context.Context, query []byte) (bool, []Report, error) { // TODO: return the exception reason - accepted, _, reports, _, err := m.process(ctx, query, InspectStateRequest) + accepted, _, reports, _, _, _, err := m.process(ctx, query, InspectStateRequest, false) return accepted, reports, err } @@ -255,41 +302,50 @@ func (m *machineImpl) process( ctx context.Context, request []byte, reqType requestType, -) (bool, []Output, []Report, []byte, error) { + computeHashes bool, +) (bool, []Output, []Report, []Hash, uint64, []byte, error) { if err := checkContext(ctx); err != nil { - return false, nil, nil, nil, err + return false, nil, nil, nil, 0, nil, err } // Check payload length limit if length := uint64(len(request)); length > m.backend.CmioRxBufferSize() { - return false, nil, nil, nil, ErrPayloadLengthLimitExceeded + return false, nil, nil, nil, 0, nil, ErrPayloadLengthLimitExceeded } err := m.backend.SendCmioResponse(uint16(reqType), request, m.params.FastDeadline) if err != nil { - return false, nil, nil, nil, err + return false, nil, nil, nil, 0, nil, err } - outputs, reports, err := m.run(ctx, reqType) + outputs, reports, hashes, remaining, err := m.run(ctx, reqType, computeHashes) if err != nil { - return false, outputs, reports, nil, err + return false, outputs, reports, nil, 0, nil, err } accepted, data, err := m.wasLastRequestAccepted(ctx) - return accepted, outputs, reports, data, err + return accepted, outputs, reports, hashes, remaining, data, err } // run runs the machine until it manually yields. // It returns any collected responses. -func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, []Report, error) { +func (m *machineImpl) run(ctx context.Context, reqType requestType, computeHashes bool) ([]Output, []Report, []Hash, uint64, error) { startTime := time.Now() currentCycle, err := m.readMCycle(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, 0, err } limitCycle := currentCycle + m.params.AdvanceMaxCycles + stepTimeout := m.params.AdvanceIncDeadline + runTimeout := m.params.AdvanceMaxDeadline + if reqType == InspectStateRequest { + limitCycle = currentCycle + m.params.InspectMaxCycles + stepTimeout = m.params.InspectIncDeadline + runTimeout = m.params.InspectMaxDeadline + } + m.logger.Debug("run", "startingCycle", currentCycle, "limitCycle", limitCycle, @@ -298,11 +354,27 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ outputs := []Output{} reports := []Report{} - stepTimeout := m.params.AdvanceIncDeadline - runTimeout := m.params.AdvanceMaxDeadline - if reqType == InspectStateRequest { - stepTimeout = m.params.InspectIncDeadline - runTimeout = m.params.InspectMaxDeadline + var hashCollectorState *HashCollectorState + if computeHashes { + hashCollectorState = &HashCollectorState{ + Period: BigStepsInStride, + Phase: 0, + MaxHashes: 0, + BundleLog2: 0, + Hashes: []Hash{}, + } + } + hashes := func() []Hash { + if computeHashes { + return hashCollectorState.Hashes + } + return []Hash{} + } + remainingMetaCycles := func() uint64 { + if computeHashes { + return StrideCountInInput - uint64(len(hashCollectorState.Hashes)) + } + return 0 } for { @@ -312,17 +384,18 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ // Steps the machine as many times as needed until it manually/automatically yields. for yt == nil { if time.Since(startTime) > runTimeout { - return outputs, reports, fmt.Errorf("run operation timed out: %w", ErrDeadlineExceeded) + werr := fmt.Errorf("run operation timed out: %w", ErrDeadlineExceeded) + return outputs, reports, hashes(), remainingMetaCycles(), werr } - yt, currentCycle, err = m.step(ctx, currentCycle, limitCycle, stepTimeout) + yt, currentCycle, err = m.runIncrementInterval(ctx, currentCycle, limitCycle, hashCollectorState, stepTimeout) if err != nil && err != ErrReachedTargetMcycle { - return outputs, reports, err + return outputs, reports, hashes(), remainingMetaCycles(), err } } // Returns with the responses when the machine manually yields. if *yt == ManualYield { - return outputs, reports, nil + return outputs, reports, hashes(), remainingMetaCycles(), nil } // Asserts the machine yielded automatically. @@ -333,7 +406,8 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ _, yieldReason, data, err := m.backend.ReceiveCmioRequest(m.params.FastDeadline) if err != nil { - return outputs, reports, fmt.Errorf("could not read output/report: %w", err) + werr := fmt.Errorf("could not read output/report: %w", err) + return outputs, reports, hashes(), remainingMetaCycles(), werr } switch automaticYieldReason(yieldReason) { @@ -342,7 +416,7 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ case AutomaticYieldReasonOutput: // TODO: should we remove this? if len(outputs) == maxOutputs { - return outputs, reports, ErrOutputsLimitExceeded + return outputs, reports, hashes(), remainingMetaCycles(), ErrOutputsLimitExceeded } outputs = append(outputs, data) case AutomaticYieldReasonReport: @@ -353,14 +427,15 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ } } -// step runs the machine for at most machine.inc cycles (or the amount of cycles left to reach +// runIncrementInterval runs the machine for at most machine.inc cycles (or the amount of cycles left to reach // limitCycle, whichever is the lowest). -// It returns the yield type and the machine cycle after the step. -// If the machine did not manually/automatically yield, the yield type will be nil (meaning step +// It returns the yield type and the machine cycle after the increment interval. +// If the machine did not manually/automatically yield, the yield type will be nil (meaning runIncrementInterval // must be called again to complete the computation). -func (m *machineImpl) step(ctx context.Context, +func (m *machineImpl) runIncrementInterval(ctx context.Context, currentCycle Cycle, limitCycle Cycle, + hashCollectorState *HashCollectorState, timeout time.Duration, ) (*yieldType, Cycle, error) { startingCycle := currentCycle @@ -376,7 +451,7 @@ func (m *machineImpl) step(ctx context.Context, m.logger.Debug("machine step before run", "currentCycle", currentCycle, "increment", increment) // Runs the machine. - breakReason, err := m.backend.Run(currentCycle+increment, timeout) + breakReason, err := m.backend_run(currentCycle+increment, hashCollectorState, timeout) if err != nil { return nil, 0, err } @@ -414,6 +489,14 @@ func (m *machineImpl) step(ctx context.Context, } } +func (m *machineImpl) backend_run(mcycleEnd uint64, hashCollectorState *HashCollectorState, timeout time.Duration) (BreakReason, error) { + if hashCollectorState != nil { + m.logger.Debug("Running with root hash collection") + return m.backend.RunAndCollectRootHashes(mcycleEnd, hashCollectorState, timeout) + } + return m.backend.Run(mcycleEnd, timeout) +} + // Helper functions func checkContext(ctx context.Context) error { diff --git a/pkg/machine/implementation_test.go b/pkg/machine/implementation_test.go index d38a78889..dae8aa1de 100644 --- a/pkg/machine/implementation_test.go +++ b/pkg/machine/implementation_test.go @@ -102,12 +102,12 @@ func (s *ImplementationSuite) TestHash() { hash, err := machine.Hash(ctx) require.NoError(err) - require.Equal(expectedHash, hash[:]) + require.Equal(expectedHash, hash) mockBackend.AssertExpectations(s.T()) // Test hash with backend error mockBackend2 := NewMockBackend() - mockBackend2.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return(([]byte)(nil), errors.New("hash failed")) + mockBackend2.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return((Hash)(Hash{}), errors.New("hash failed")) machine2 := &machineImpl{ backend: mockBackend2, logger: s.logger, @@ -121,27 +121,11 @@ func (s *ImplementationSuite) TestHash() { require.Contains(err.Error(), "could not get the machine's root hash") mockBackend2.AssertExpectations(s.T()) - // Test hash with invalid length - mockBackend3 := NewMockBackend() - mockBackend3.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return(make([]byte, 16), nil) // Invalid length - machine3 := &machineImpl{ - backend: mockBackend3, - logger: s.logger, - params: model.ExecutionParameters{ - LoadDeadline: time.Second * 5, - }, - } - _, err = machine3.Hash(ctx) - require.Error(err) - require.ErrorIs(err, ErrMachineInternal) - require.Contains(err.Error(), "invalid machine root hash length") - // Test hash with canceled context canceledCtx, cancel := context.WithCancel(ctx) cancel() _, err = machine.Hash(canceledCtx) require.ErrorIs(err, ErrCanceled) - mockBackend3.AssertExpectations(s.T()) } // Test OutputsHash method @@ -153,7 +137,7 @@ func (s *ImplementationSuite) TestOutputsHash() { mockBackend := NewMockBackend() expectedHash := randomFakeHash() mockBackend.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), expectedHash, nil) + uint8(0), uint16(ManualYieldReasonAccepted), expectedHash[:], nil) machine := &machineImpl{ backend: mockBackend, @@ -165,7 +149,7 @@ func (s *ImplementationSuite) TestOutputsHash() { hash, err := machine.OutputsHash(ctx) require.NoError(err) - require.Equal(expectedHash, hash[:]) + require.Equal(expectedHash, hash) mockBackend.AssertExpectations(s.T()) // Test outputs hash with rejected request @@ -239,7 +223,7 @@ func (s *ImplementationSuite) TestAdvance() { } input := []byte("test input") - accepted, outputs, reports, hash, err := machine.Advance(ctx, input) + accepted, outputs, reports, _, _, hash, err := machine.Advance(ctx, input, false) require.NoError(err) require.True(accepted) require.Empty(outputs) @@ -261,7 +245,7 @@ func (s *ImplementationSuite) TestAdvance() { AdvanceMaxDeadline: time.Second * 10, }, } - accepted, outputs, reports, hash, err = machine2.Advance(ctx, input) + accepted, outputs, reports, _, _, hash, err = machine2.Advance(ctx, input, false) require.NoError(err) require.False(accepted) require.Empty(outputs) @@ -283,7 +267,7 @@ func (s *ImplementationSuite) TestAdvance() { AdvanceMaxDeadline: time.Second * 10, }, } - accepted, outputs, reports, hash, err = machine3.Advance(ctx, input) + accepted, outputs, reports, _, _, hash, err = machine3.Advance(ctx, input, false) require.ErrorIs(err, ErrException) require.False(accepted) require.Equal(Hash{}, hash) @@ -304,7 +288,7 @@ func (s *ImplementationSuite) TestAdvance() { }, } largeInput := make([]byte, 10) - _, _, _, _, err = machine4.Advance(ctx, largeInput) + _, _, _, _, _, _, err = machine4.Advance(ctx, largeInput, false) require.ErrorIs(err, ErrPayloadLengthLimitExceeded) mockBackend4.AssertExpectations(s.T()) @@ -327,7 +311,7 @@ func (s *ImplementationSuite) TestAdvance() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine5.Advance(ctx, input) + _, _, _, _, _, _, err = machine5.Advance(ctx, input, false) require.Error(err) require.ErrorIs(err, ErrHashLength) mockBackend5.AssertExpectations(s.T()) @@ -560,8 +544,9 @@ func (s *ImplementationSuite) TestHelperMethods() { // Test wasLastRequestAccepted mockBackend3 := NewMockBackend() + expectedHash3 := randomFakeHash() mockBackend3.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonAccepted), expectedHash3[:], nil) machine3 := &machineImpl{ backend: mockBackend3, logger: s.logger, @@ -576,8 +561,9 @@ func (s *ImplementationSuite) TestHelperMethods() { mockBackend3.AssertExpectations(s.T()) mockBackend4 := NewMockBackend() + expectedHash4 := randomFakeHash() mockBackend4.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonRejected), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonRejected), expectedHash4[:], nil) machine4 := &machineImpl{ backend: mockBackend4, logger: s.logger, @@ -672,7 +658,7 @@ func (s *ImplementationSuite) TestRun() { }, } - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Empty(outputs) require.Empty(reports) @@ -692,7 +678,7 @@ func (s *ImplementationSuite) TestRun() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, err = machine2.run(ctx, AdvanceStateRequest) + _, _, _, _, err = machine2.run(ctx, AdvanceStateRequest, false) require.Error(err) require.Contains(err.Error(), "read cycle failed") mockBackend2.AssertExpectations(s.T()) @@ -715,7 +701,7 @@ func (s *ImplementationSuite) TestRun() { }, } - _, _, err = machine3.run(ctx, AdvanceStateRequest) + _, _, _, _, err = machine3.run(ctx, AdvanceStateRequest, false) require.NoError(err) mockBackend3.AssertExpectations(s.T()) @@ -740,87 +726,87 @@ func (s *ImplementationSuite) TestStep() { mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(150), nil) machine.backend = mockBackend - yieldType, cycle, err := machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err := machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.NoError(err) require.NotNil(yieldType) require.Equal(ManualYield, *yieldType) require.Equal(uint64(150), cycle) mockBackend.AssertExpectations(s.T()) - // Test step with automatic yield + // Test runIncrementInterval with automatic yield mockBackend2 := NewMockBackend() mockBackend2.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedAutomatically, nil) mockBackend2.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(200), nil) machine.backend = mockBackend2 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.NoError(err) require.NotNil(yieldType) require.Equal(AutomaticYield, *yieldType) require.Equal(uint64(200), cycle) mockBackend2.AssertExpectations(s.T()) - // Test step with soft yield (no yield) + // Test runIncrementInterval with soft yield (no yield) mockBackend3 := NewMockBackend() mockBackend3.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedSoftly, nil) mockBackend3.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(150), nil) machine.backend = mockBackend3 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.NoError(err) require.Nil(yieldType) require.Equal(uint64(150), cycle) mockBackend3.AssertExpectations(s.T()) - // Test step with reached target mcycle + // Test runIncrementInterval with reached target mcycle mockBackend4 := NewMockBackend() mockBackend4.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(ReachedTargetMcycle, nil) mockBackend4.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(1000), nil) machine.backend = mockBackend4 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.ErrorIs(err, ErrReachedTargetMcycle) require.Nil(yieldType) require.Equal(uint64(1000), cycle) mockBackend4.AssertExpectations(s.T()) - // Test step with halted + // Test runIncrementInterval with halted mockBackend5 := NewMockBackend() mockBackend5.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(Halted, nil) mockBackend5.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(500), nil) machine.backend = mockBackend5 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.ErrorIs(err, ErrHalted) require.Nil(yieldType) require.Equal(uint64(500), cycle) - // Test step already at limit cycle - yieldType, cycle, err = machine.step(ctx, 1000, 1000, time.Second) + // Test runIncrementInterval already at limit cycle + yieldType, cycle, err = machine.runIncrementInterval(ctx, 1000, 1000, nil, time.Second) require.ErrorIs(err, ErrReachedLimitMcycle) require.Nil(yieldType) require.Equal(uint64(0), cycle) mockBackend5.AssertExpectations(s.T()) - // Test step with backend run error + // Test runIncrementInterval with backend run error mockBackend6 := NewMockBackend() mockBackend6.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration"), ).Return(BreakReason(0), errors.New("run failed")) machine.backend = mockBackend6 - yieldType, _, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, _, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.Error(err) require.Contains(err.Error(), "run failed") require.Nil(yieldType) mockBackend6.AssertExpectations(s.T()) - // Test step with read cycle error + // Test runIncrementInterval with read cycle error mockBackend7 := NewMockBackend() mockBackend7.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil) mockBackend7.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(0), errors.New("read cycle failed")) machine.backend = mockBackend7 - yieldType, _, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, _, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.Error(err) require.Contains(err.Error(), "read cycle failed") require.Nil(yieldType) @@ -854,7 +840,7 @@ func (s *ImplementationSuite) TestProcess() { } input := []byte("test input") - accepted, outputs, reports, data, err := machine.process(ctx, input, AdvanceStateRequest) + accepted, outputs, reports, _, _, data, err := machine.process(ctx, input, AdvanceStateRequest, false) require.NoError(err) require.True(accepted) require.Empty(outputs) @@ -876,7 +862,7 @@ func (s *ImplementationSuite) TestProcess() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine2.process(ctx, input, AdvanceStateRequest) + _, _, _, _, _, _, err = machine2.process(ctx, input, AdvanceStateRequest, false) require.ErrorIs(err, ErrPayloadLengthLimitExceeded) mockBackend2.AssertExpectations(s.T()) @@ -899,7 +885,7 @@ func (s *ImplementationSuite) TestProcess() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine3.process(ctx, input, AdvanceStateRequest) + _, _, _, _, _, _, err = machine3.process(ctx, input, AdvanceStateRequest, false) require.Error(err) require.Contains(err.Error(), "send failed") mockBackend3.AssertExpectations(s.T()) @@ -920,7 +906,7 @@ func (s *ImplementationSuite) TestProcess() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine4.process(ctx, input, AdvanceStateRequest) + _, _, _, _, _, _, err = machine4.process(ctx, input, AdvanceStateRequest, false) require.Error(err) require.Contains(err.Error(), "read cycle failed") mockBackend4.AssertExpectations(s.T()) @@ -955,7 +941,7 @@ func (s *ImplementationSuite) TestRunWithAutomaticYields() { mockBackend.On("Run", uint64(150), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil).Once() mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(100), nil).Once() - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Len(outputs, 1) require.Equal([]byte("test output"), outputs[0]) @@ -993,7 +979,7 @@ func (s *ImplementationSuite) TestRunWithAutomaticYieldsReports() { mockBackend.On("Run", uint64(150), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil).Once() mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(100), nil).Once() - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Empty(outputs) require.Len(reports, 1) @@ -1057,7 +1043,7 @@ func (s *ImplementationSuite) TestMultipleAutomaticYields() { mockBackend.On("Run", uint64(150), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil).Once() mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(60), nil).Once() - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Len(outputs, 2) diff --git a/pkg/machine/libcartesi.go b/pkg/machine/libcartesi.go index aadee3b43..5def485ad 100644 --- a/pkg/machine/libcartesi.go +++ b/pkg/machine/libcartesi.go @@ -4,7 +4,9 @@ package machine import ( + "encoding/base64" "encoding/json" + "errors" "fmt" "time" @@ -16,16 +18,82 @@ type RemoteMachineInterface interface { SetTimeout(timeoutMs int64) error Load(dir string, runtimeConfig string) error Run(mcycleEnd uint64) (emulator.BreakReason, error) - GetRootHash() ([]byte, error) + GetRootHash() (emulator.Hash, error) + GetProof(address uint64, log2size int32) (string, error) ReadReg(reg emulator.RegID) (uint64, error) SendCmioResponse(reason uint16, data []byte) error ReceiveCmioRequest() (uint8, uint16, []byte, error) + WriteMemory(address uint64, data []byte) error Store(directory string) error Delete() ForkServer() (*emulator.RemoteMachine, string, uint32, error) ShutdownServer() error } +type proofJson struct { + Log2RootSize int32 `json:"log2_root_size"` + Log2TargetSize int32 `json:"log2_target_size"` + RootHash Hash `json:"root_hash"` + Siblings []Hash `json:"sibling_hashes"` + TargetAddress uint64 `json:"target_address"` + TargetHash Hash `json:"target_hash"` +} + +func decodeB64To32(dst *Hash, s string) error { + // accepts Std (with '=') and Raw (without '=') + n, err := base64.StdEncoding.Decode(dst[:], []byte(s)) + if err == nil { + if n != HashSize { + return fmt.Errorf("provided hash base64 size is %d bytes (expected %d)", n, HashSize) + } + return nil + } + + // fallback RawStdEncoding + n, err2 := base64.RawStdEncoding.Decode(dst[:], []byte(s)) + if err2 != nil { + return fmt.Errorf("invalid hash base64 (std: %v, raw: %w)", err, err2) + } + if n != HashSize { + return fmt.Errorf("provided hash base64 size is %d bytes (expected %d)", n, HashSize) + } + return nil +} + +func (p *proofJson) UnmarshalJSON(data []byte) error { + var aux struct { + Log2RootSize int32 `json:"log2_root_size"` + Log2TargetSize int32 `json:"log2_target_size"` + RootHash string `json:"root_hash"` + Siblings []string `json:"sibling_hashes"` + TargetAddress uint64 `json:"target_address"` + TargetHash string `json:"target_hash"` + } + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + p.Log2RootSize = aux.Log2RootSize + p.Log2TargetSize = aux.Log2TargetSize + p.TargetAddress = aux.TargetAddress + + if err := decodeB64To32(&p.RootHash, aux.RootHash); err != nil { + return fmt.Errorf("root_hash: %w", err) + } + if err := decodeB64To32(&p.TargetHash, aux.TargetHash); err != nil { + return fmt.Errorf("target_hash: %w", err) + } + + p.Siblings = make([]Hash, len(aux.Siblings)) + for i, s := range aux.Siblings { + if err := decodeB64To32(&p.Siblings[i], s); err != nil { + return fmt.Errorf("sibling_hashes[%d]: %w", i, err) + } + } + return nil +} + func NewLibCartesiBackend(address string, timeout time.Duration) (Backend, string, uint32, error) { rm, address, pid, err := emulator.SpawnServer(address, timeout) if err != nil { @@ -54,13 +122,30 @@ func (e *LibCartesiBackend) Run(mcycleEnd uint64, timeout time.Duration) (BreakR return BreakReason(br), err } -func (e *LibCartesiBackend) GetRootHash(timeout time.Duration) ([]byte, error) { +func (e *LibCartesiBackend) GetRootHash(timeout time.Duration) (Hash, error) { if err := e.inner.SetTimeout(timeout.Milliseconds()); err != nil { - return nil, fmt.Errorf("failed to set operation timeout: %w", err) + return Hash{}, fmt.Errorf("failed to set operation timeout: %w", err) } return e.inner.GetRootHash() } +func (e *LibCartesiBackend) GetProof(address uint64, log2size int32, timeout time.Duration) ([]Hash, error) { + if err := e.inner.SetTimeout(timeout.Milliseconds()); err != nil { + return nil, fmt.Errorf("failed to set operation timeout: %w", err) + } + jsonMessage, err := e.inner.GetProof(address, log2size) + if err != nil { + return nil, fmt.Errorf("failed to get proof: %w", err) + } + proof := &proofJson{} + err = json.Unmarshal([]byte(jsonMessage), proof) + if err != nil { + println("Failed to unmarshal proof JSON:", err.Error()) + return nil, fmt.Errorf("failed to unmarshal proof JSON: %w", err) + } + return proof.Siblings, nil +} + func (e *LibCartesiBackend) IsAtManualYield(timeout time.Duration) (bool, error) { if err := e.inner.SetTimeout(timeout.Milliseconds()); err != nil { return false, fmt.Errorf("failed to set operation timeout: %w", err) @@ -104,6 +189,13 @@ func (e *LibCartesiBackend) Store(directory string, timeout time.Duration) error return e.inner.Store(directory) } +func (e *LibCartesiBackend) WriteMemory(address uint64, data []byte, timeout time.Duration) error { + if err := e.inner.SetTimeout(timeout.Milliseconds()); err != nil { + return fmt.Errorf("failed to set operation timeout: %w", err) + } + return e.inner.WriteMemory(address, data) +} + func (e *LibCartesiBackend) Delete() { e.inner.Delete() } @@ -138,3 +230,125 @@ func (e *LibCartesiBackend) NewMachineRuntimeConfig() (string, error) { func (e *LibCartesiBackend) CmioRxBufferSize() uint64 { return 1 << emulator.CmioRxBufferLog2Size } + +func (e *LibCartesiBackend) RunAndCollectRootHashes( + mcycleEnd uint64, + state *HashCollectorState, + timeout time.Duration, +) (reason BreakReason, err error) { + if state == nil { + return Failed, errors.New("nil state") + } + if state.Period == 0 { + return Failed, errors.New("State.Period must be > 0") + } + + // Set up timeout management: calculate absolute deadline if timeout is specified + var deadline time.Time + hasDeadline := timeout > 0 + if hasDeadline { + deadline = time.Now().Add(timeout) + } + remaining := func() time.Duration { + if !hasDeadline { + return 0 + } + d := time.Until(deadline) + if d <= 0 { + return time.Nanosecond + } + return d + } + checkDeadline := func() error { + if hasDeadline && time.Now().After(deadline) { + return errors.New("runWithRootHashes: deadline exceeded") + } + return nil + } + + if err := checkDeadline(); err != nil { + return Failed, err + } + cur, err := e.ReadMCycle(remaining()) + if err != nil { + return Failed, err + } + + collected := (uint64)(0) + + for { + if err := checkDeadline(); err != nil { + return Failed, err + } + if cur >= mcycleEnd { + // No more cycles to execute + return ReachedTargetMcycle, nil + } + + // Calculate the next collection point: distance to the next multiple of the period + // This ensures we collect hashes at regular intervals aligned with the period + var step uint64 + if r := state.Phase % state.Period; r == 0 { + step = state.Period + } else { + step = state.Period - r + } + + nextHashCycle := cur + step + target := min(nextHashCycle, mcycleEnd) + + // Run the machine until target cycle or until it yields/halts + br, err := e.Run(target, remaining()) + if err != nil { + return Failed, err + } + + // Check where we stopped after the run + if err := checkDeadline(); err != nil { + return Failed, err + } + pos, err := e.ReadMCycle(remaining()) + if err != nil { + return Failed, err + } + + advanced := pos - cur + state.Phase = (state.Phase + advanced) % state.Period + cur = pos + + // Only collect hash if we reached the exact boundary (pos == nextHashCycle) + // This ensures "hash after each complete period", matching the C API behavior + // and avoiding duplicate collections if the machine stops early due to yields + if pos == nextHashCycle { + if err := checkDeadline(); err != nil { + return Failed, err + } + h, err := e.GetRootHash(remaining()) + if err != nil { + return Failed, err + } + + state.Hashes = append(state.Hashes, h) + + collected++ + if state.MaxHashes > 0 && collected >= state.MaxHashes { + return YieldedSoftly, nil + } + } + + switch br { + case ReachedTargetMcycle: + if cur >= mcycleEnd { + return ReachedTargetMcycle, nil + } + case YieldedManually: + return br, nil + case YieldedAutomatically, YieldedSoftly, Halted: + return br, nil + case Failed: + return Failed, errors.New("run failed") + default: + return Failed, errors.New("unknown break reason") + } + } +} diff --git a/pkg/machine/libcartesi_test.go b/pkg/machine/libcartesi_test.go index 9b21b6953..4f78d08b1 100644 --- a/pkg/machine/libcartesi_test.go +++ b/pkg/machine/libcartesi_test.go @@ -98,10 +98,7 @@ func (s *LibCartesiSuite) TestRun() { func (s *LibCartesiSuite) TestGetRootHash() { require := s.Require() - expectedHash := make([]byte, 32) - for i := range expectedHash { - expectedHash[i] = byte(i) - } + expectedHash := randomFakeHash() // Test successful get root hash s.mockRemoteMachine.On("SetTimeout", int64(5000)).Return(nil) @@ -126,7 +123,7 @@ func (s *LibCartesiSuite) TestGetRootHash() { s.mockRemoteMachine = new(MockRemoteMachine) s.backend = &LibCartesiBackend{inner: s.mockRemoteMachine} s.mockRemoteMachine.On("SetTimeout", int64(5000)).Return(nil) - s.mockRemoteMachine.On("GetRootHash").Return([]byte(nil), errors.New("hash error")) + s.mockRemoteMachine.On("GetRootHash").Return(Hash{}, errors.New("hash error")) _, err = s.backend.GetRootHash(5 * time.Second) require.Error(err) @@ -437,9 +434,14 @@ func (m *MockRemoteMachine) Run(mcycleEnd uint64) (emulator.BreakReason, error) return args.Get(0).(emulator.BreakReason), args.Error(1) } -func (m *MockRemoteMachine) GetRootHash() ([]byte, error) { +func (m *MockRemoteMachine) GetRootHash() (emulator.Hash, error) { args := m.Called() - return args.Get(0).([]byte), args.Error(1) + return args.Get(0).(Hash), args.Error(1) +} + +func (m *MockRemoteMachine) GetProof(address uint64, log2size int32) (string, error) { + args := m.Called(address, log2size) + return args.Get(0).(string), args.Error(1) } func (m *MockRemoteMachine) ReadReg(reg emulator.RegID) (uint64, error) { @@ -462,6 +464,11 @@ func (m *MockRemoteMachine) Store(directory string) error { return args.Error(0) } +func (m *MockRemoteMachine) WriteMemory(address uint64, data []byte) error { + args := m.Called(address, data) + return args.Error(0) +} + func (m *MockRemoteMachine) Delete() { m.Called() } diff --git a/pkg/machine/machine.go b/pkg/machine/machine.go index 45e6d1735..2e29e3164 100644 --- a/pkg/machine/machine.go +++ b/pkg/machine/machine.go @@ -52,14 +52,18 @@ type Machine interface { Fork(ctx context.Context) (Machine, error) // Hash returns the machine's merkle tree root hash. Hash(ctx context.Context) (Hash, error) - // OutputsHash returns the outputs hash stored in the cmio tx buffer. + // OutputsHash returns the outputs merkle root hash stored in the cmio tx buffer. OutputsHash(ctx context.Context) (Hash, error) + // OutputsHashProof returns the proof that the outputs merkle root hash is stored in the cmio tx buffer. + OutputsHashProof(ctx context.Context) ([]Hash, error) + // WriteCheckpointHash writes the given checkpoint hash to the machine memory. + WriteCheckpointHash(ctx context.Context, hash Hash) error // Advance sends an input to the machine. // It returns a boolean indicating whether or not the request was accepted. // It also returns the corresponding outputs, reports, and the hash of the outputs. // In case the request is not accepted, the function does not return outputs. - Advance(ctx context.Context, input []byte) (bool, []Output, []Report, Hash, error) + Advance(ctx context.Context, input []byte, computeHashes bool) (bool, []Output, []Report, []Hash, uint64, Hash, error) // Inspect sends a query to the machine. // It returns a boolean indicating whether or not the request was accepted diff --git a/pkg/machine/machine_test.go b/pkg/machine/machine_test.go index 5325e2ec2..868acd8cc 100644 --- a/pkg/machine/machine_test.go +++ b/pkg/machine/machine_test.go @@ -214,7 +214,7 @@ func (s *MachineSuite) TestMachineInterface() { require.Equal(Hash{6, 7, 8, 9, 10}, outputsHash) // Test Advance - accepted, outputs, reports, advanceHash, err := machine.Advance(ctx, []byte("input")) + accepted, outputs, reports, _, _, advanceHash, err := machine.Advance(ctx, []byte("input"), false) require.NoError(err) require.True(accepted) require.Len(outputs, 2) @@ -279,7 +279,7 @@ func (s *MachineSuite) TestMachineInterfaceErrors() { require.Contains(err.Error(), "outputs hash error") // Test Advance error - _, _, _, _, err = machine.Advance(ctx, []byte("input")) + _, _, _, _, _, _, err = machine.Advance(ctx, []byte("input"), false) require.Error(err) require.Contains(err.Error(), "advance error") @@ -310,11 +310,18 @@ type MockMachine struct { OutputsHashReturn Hash OutputsHashError error - AdvanceAcceptedReturn bool - AdvanceOutputsReturn []Output - AdvanceReportsReturn []Report - AdvanceHashReturn Hash - AdvanceError error + OutputsHashProofReturn []Hash + OutputsHashProofError error + + CheckpointHashError error + + AdvanceAcceptedReturn bool + AdvanceOutputsReturn []Output + AdvanceReportsReturn []Report + AdvanceHashesReturn []Hash + AdvanceRemainingReturn uint64 + AdvanceHashReturn Hash + AdvanceError error InspectAcceptedReturn bool InspectReportsReturn []Report @@ -339,12 +346,22 @@ func (m *MockMachine) OutputsHash(_ context.Context) (Hash, error) { return m.OutputsHashReturn, m.OutputsHashError } -func (m *MockMachine) Advance(_ context.Context, _ []byte) ( - bool, []Output, []Report, Hash, error, +func (m *MockMachine) OutputsHashProof(_ context.Context) ([]Hash, error) { + return m.OutputsHashProofReturn, m.OutputsHashProofError +} + +func (m *MockMachine) WriteCheckpointHash(_ context.Context, _ Hash) error { + return m.CheckpointHashError +} + +func (m *MockMachine) Advance(_ context.Context, _ []byte, _ bool) ( + bool, []Output, []Report, []Hash, uint64, Hash, error, ) { return m.AdvanceAcceptedReturn, m.AdvanceOutputsReturn, m.AdvanceReportsReturn, + m.AdvanceHashesReturn, + m.AdvanceRemainingReturn, m.AdvanceHashReturn, m.AdvanceError } diff --git a/pkg/service/service.go b/pkg/service/service.go index c689580c4..97660c492 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -103,6 +103,7 @@ type CreateInfo struct { Impl ServiceImpl ServeMux *http.ServeMux Context context.Context + Cancel context.CancelFunc } // Service stores runtime information. @@ -151,7 +152,10 @@ func Create(ctx context.Context, c *CreateInfo, s *Service) error { s.Context = c.Context } if s.Cancel == nil { - s.Context, s.Cancel = context.WithCancel(c.Context) + if c.Cancel == nil { + s.Context, c.Cancel = context.WithCancel(c.Context) + } + s.Cancel = c.Cancel } // ticker @@ -246,6 +250,7 @@ func (s *Service) Stop(force bool) []error { elapsed := time.Since(start) s.Running.Store(false) + s.Cancel() if len(errs) > 0 { s.Logger.Error("Stop", "force", force, diff --git a/test/tooling/db/db.go b/test/tooling/db/db.go index c981b5168..747f0234f 100644 --- a/test/tooling/db/db.go +++ b/test/tooling/db/db.go @@ -22,18 +22,18 @@ func SetupTestPostgres(endpoint string) error { schema, err := schema.New(endpoint) if err != nil { - return err + return fmt.Errorf("failed to create schema: %w", err) } defer schema.Close() err = schema.Downgrade() if err != nil { - return err + return fmt.Errorf("failed to downgrade schema: %w", err) } err = schema.Upgrade() if err != nil { - return err + return fmt.Errorf("failed to upgrade schema: %w", err) } return nil diff --git a/test/validator/validator_test.go b/test/validator/validator_test.go index c8d2e6034..052c1a5ed 100644 --- a/test/validator/validator_test.go +++ b/test/validator/validator_test.go @@ -91,6 +91,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPristineClaim() { DataAvailability: model.DataAvailability_InputBox[:], EpochLength: 10, State: model.ApplicationState_Enabled, + ConsensusType: model.Consensus_Authority, } _, err := s.repository.CreateApplication(s.ctx, app, false) s.Require().Nil(err) @@ -120,10 +121,12 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPristineClaim() { // Store the input advance result machinehash1 := crypto.Keccak256Hash([]byte("machine-hash1")) advanceResult := model.AdvanceResult{ - InputIndex: input.Index, - Status: model.InputCompletionStatus_Accepted, - OutputsHash: pristineRootHash, - MachineHash: &machinehash1, + InputIndex: input.Index, + Status: model.InputCompletionStatus_Accepted, + OutputsProof: model.OutputsProof{ + OutputsHash: pristineRootHash, + MachineHash: machinehash1, + }, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -134,12 +137,12 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPristineClaim() { updatedEpoch, err := s.repository.GetEpoch(s.ctx, app.IApplicationAddress.String(), epoch.Index) s.Require().Nil(err) s.Require().NotNil(updatedEpoch) - s.Require().NotNil(updatedEpoch.ClaimHash) + s.Require().NotNil(updatedEpoch.OutputsMerkleRoot) // epoch status was updated s.Equal(model.EpochStatus_ClaimComputed, updatedEpoch.Status) // claim is pristine claim - s.Equal(pristineRootHash, *updatedEpoch.ClaimHash) + s.Equal(pristineRootHash, *updatedEpoch.OutputsMerkleRoot) }) } @@ -157,6 +160,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { DataAvailability: model.DataAvailability_InputBox[:], EpochLength: 10, State: model.ApplicationState_Enabled, + ConsensusType: model.Consensus_Authority, } _, err := s.repository.CreateApplication(s.ctx, app, false) s.Require().Nil(err) @@ -164,18 +168,19 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { // insert the first epoch with a claim firstEpochClaim := pristineRootHash firstEpoch := model.Epoch{ - ApplicationID: 1, - Index: 0, - VirtualIndex: 0, - Status: model.EpochStatus_ClaimComputed, - ClaimHash: &firstEpochClaim, - FirstBlock: 0, - LastBlock: 9, + ApplicationID: 1, + Index: 0, + VirtualIndex: 0, + Status: model.EpochStatus_ClaimComputed, + OutputsMerkleRoot: &firstEpochClaim, + FirstBlock: 0, + LastBlock: 9, } // we add an input to the epoch because they must have at least one and // because without it the claim hash check will fail firstEpochInput := model.Input{ + EpochIndex: firstEpoch.Index, Index: 0, BlockNumber: 9, RawData: []byte("data"), @@ -194,6 +199,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { } secondEpochInput := model.Input{ + EpochIndex: secondEpoch.Index, Index: 1, BlockNumber: 19, RawData: []byte("data2"), @@ -210,10 +216,13 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { // Store the input advance result machinehash1 := crypto.Keccak256Hash([]byte("machine-hash1")) advanceResult := model.AdvanceResult{ - InputIndex: firstEpochInput.Index, - Status: model.InputCompletionStatus_Accepted, - OutputsHash: firstEpochClaim, - MachineHash: &machinehash1, + EpochIndex: firstEpochInput.EpochIndex, + InputIndex: firstEpochInput.Index, + Status: model.InputCompletionStatus_Accepted, + OutputsProof: model.OutputsProof{ + OutputsHash: firstEpochClaim, + MachineHash: machinehash1, + }, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -224,12 +233,15 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { // Store the input advance result machinehash2 := crypto.Keccak256Hash([]byte("machine-hash2")) advanceResult = model.AdvanceResult{ + EpochIndex: secondEpochInput.EpochIndex, InputIndex: secondEpochInput.Index, Status: model.InputCompletionStatus_Accepted, // since there are no new outputs in the second epoch, // the machine OutputsHash will remain the same - OutputsHash: firstEpochClaim, - MachineHash: &machinehash2, + OutputsProof: model.OutputsProof{ + OutputsHash: firstEpochClaim, + MachineHash: machinehash2, + }, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -240,12 +252,12 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { updatedEpoch, err := s.repository.GetEpoch(s.ctx, app.IApplicationAddress.String(), secondEpoch.Index) s.Require().Nil(err) s.Require().NotNil(updatedEpoch) - s.Require().NotNil(updatedEpoch.ClaimHash) + s.Require().NotNil(updatedEpoch.OutputsMerkleRoot) // epoch status was updated s.Equal(model.EpochStatus_ClaimComputed, updatedEpoch.Status) // claim is the same from previous epoch - s.Equal(firstEpochClaim, *updatedEpoch.ClaimHash) + s.Equal(firstEpochClaim, *updatedEpoch.OutputsMerkleRoot) }) } @@ -260,6 +272,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() DataAvailability: model.DataAvailability_InputBox[:], EpochLength: 10, State: model.ApplicationState_Enabled, + ConsensusType: model.Consensus_Authority, } _, err := s.repository.CreateApplication(s.ctx, app, false) s.Require().Nil(err) @@ -304,11 +317,13 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() // Store the input advance result machinehash1 := crypto.Keccak256Hash([]byte("machine-hash1")) advanceResult := model.AdvanceResult{ - InputIndex: input.Index, - Status: model.InputCompletionStatus_Accepted, - OutputsHash: expectedClaim, - Outputs: [][]byte{outputRawData}, - MachineHash: &machinehash1, + InputIndex: input.Index, + Status: model.InputCompletionStatus_Accepted, + Outputs: [][]byte{outputRawData}, + OutputsProof: model.OutputsProof{ + OutputsHash: expectedClaim, + MachineHash: machinehash1, + }, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -319,12 +334,12 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() updatedEpoch, err := s.repository.GetEpoch(s.ctx, app.IApplicationAddress.String(), epoch.Index) s.Require().Nil(err) s.Require().NotNil(updatedEpoch) - s.Require().NotNil(updatedEpoch.ClaimHash) + s.Require().NotNil(updatedEpoch.OutputsMerkleRoot) // epoch status was updated s.Equal(model.EpochStatus_ClaimComputed, updatedEpoch.Status) // claim is the expected new claim - s.Equal(expectedClaim, *updatedEpoch.ClaimHash) + s.Equal(expectedClaim, *updatedEpoch.OutputsMerkleRoot) updatedOutput, err := s.repository.GetOutput(s.ctx, app.IApplicationAddress.String(), output.Index) s.Require().Nil(err) @@ -347,6 +362,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() DataAvailability: model.DataAvailability_InputBox[:], EpochLength: 10, State: model.ApplicationState_Enabled, + ConsensusType: model.Consensus_Authority, } _, err := s.repository.CreateApplication(s.ctx, app, false) s.Require().Nil(err) @@ -393,17 +409,19 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() machinehash1 := crypto.Keccak256Hash([]byte("machine-hash1")) advanceResult := model.AdvanceResult{ - InputIndex: firstInput.Index, - Status: model.InputCompletionStatus_Accepted, - OutputsHash: firstEpochClaim, - Outputs: [][]byte{firstOutputData}, - MachineHash: &machinehash1, + InputIndex: firstInput.Index, + Status: model.InputCompletionStatus_Accepted, + Outputs: [][]byte{firstOutputData}, + OutputsProof: model.OutputsProof{ + MachineHash: machinehash1, + OutputsHash: firstEpochClaim, + }, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) // update epoch with its claim and insert it in the db - firstEpoch.ClaimHash = &firstEpochClaim + firstEpoch.OutputsMerkleRoot = &firstEpochClaim firstOutput.OutputHashesSiblings = firstEpochProofs err = s.repository.StoreClaimAndProofs(s.ctx, &firstEpoch, []*model.Output{&firstOutput}) s.Require().Nil(err) @@ -419,6 +437,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() } secondInput := model.Input{ + EpochIndex: secondEpoch.Index, Index: 1, BlockNumber: 19, RawData: []byte("data2"), @@ -444,11 +463,14 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() machinehash2 := crypto.Keccak256Hash([]byte("machine-hash2")) advanceResult = model.AdvanceResult{ - InputIndex: secondInput.Index, - Status: model.InputCompletionStatus_Accepted, - OutputsHash: expectedEpochClaim, - Outputs: [][]byte{secondOutputData}, - MachineHash: &machinehash2, + EpochIndex: secondInput.EpochIndex, + InputIndex: secondInput.Index, + Status: model.InputCompletionStatus_Accepted, + Outputs: [][]byte{secondOutputData}, + OutputsProof: model.OutputsProof{ + OutputsHash: expectedEpochClaim, + MachineHash: machinehash2, + }, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -463,13 +485,13 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() ) s.Require().Nil(err) s.Require().NotNil(updatedSecondEpoch) - s.Require().NotNil(updatedSecondEpoch.ClaimHash) + s.Require().NotNil(updatedSecondEpoch.OutputsMerkleRoot) // assert epoch status was changed s.Equal(model.EpochStatus_ClaimComputed, updatedSecondEpoch.Status) // assert second epoch claim is a new claim - s.NotEqual(firstEpochClaim, *updatedSecondEpoch.ClaimHash) - s.Equal(expectedEpochClaim, *updatedSecondEpoch.ClaimHash) + s.NotEqual(firstEpochClaim, *updatedSecondEpoch.OutputsMerkleRoot) + s.Equal(expectedEpochClaim, *updatedSecondEpoch.OutputsMerkleRoot) updatedSecondOutput, err := s.repository.GetOutput( s.ctx,