From 7cfa77858960de6b063de64a54e0c502ab3c69dd Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Sat, 20 Sep 2025 16:55:27 -0300 Subject: [PATCH 1/8] feat(prt): modify database and models to support PRT tournament data --- .../root/app/register/register.go | 28 +- internal/config/config.go | 12 + internal/jsonrpc/jsonrpc-discover.json | 739 ++++++++++++++++++ internal/jsonrpc/jsonrpc.go | 586 +++++++++++++- internal/jsonrpc/jsonrpc_test.go | 143 ++-- internal/jsonrpc/service.go | 11 +- internal/jsonrpc/types.go | 74 ++ internal/jsonrpc/util_test.go | 19 +- internal/model/models.go | 476 +++++++++-- internal/repository/postgres/application.go | 26 +- internal/repository/postgres/bulk.go | 312 +++++++- internal/repository/postgres/commitment.go | 226 ++++++ .../public/enum/matchdeletionreason.go | 22 + .../rollupsdb/public/enum/winnercommitment.go | 20 + .../db/rollupsdb/public/table/application.go | 121 +-- .../db/rollupsdb/public/table/commitments.go | 102 +++ .../db/rollupsdb/public/table/epoch.go | 7 +- .../rollupsdb/public/table/match_advances.go | 102 +++ .../db/rollupsdb/public/table/matches.go | 117 +++ .../db/rollupsdb/public/table/state_hashes.go | 96 +++ .../public/table/table_use_schema.go | 5 + .../db/rollupsdb/public/table/tournaments.go | 114 +++ internal/repository/postgres/epoch.go | 38 + internal/repository/postgres/input.go | 24 + internal/repository/postgres/match.go | 298 +++++++ .../repository/postgres/match_advanced.go | 231 ++++++ .../000001_create_initial_schema.down.sql | 27 + .../000001_create_initial_schema.up.sql | 180 ++++- internal/repository/postgres/state_hash.go | 97 +++ internal/repository/postgres/test_only.go | 2 + internal/repository/postgres/tournament.go | 313 ++++++++ internal/repository/repository.go | 64 +- test/tooling/db/db.go | 6 +- 33 files changed, 4407 insertions(+), 231 deletions(-) create mode 100644 internal/repository/postgres/commitment.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/table/commitments.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/table/match_advances.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/table/matches.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go create mode 100644 internal/repository/postgres/db/rollupsdb/public/table/tournaments.go create mode 100644 internal/repository/postgres/match.go create mode 100644 internal/repository/postgres/match_advanced.go create mode 100644 internal/repository/postgres/state_hash.go create mode 100644 internal/repository/postgres/tournament.go diff --git a/cmd/cartesi-rollups-cli/root/app/register/register.go b/cmd/cartesi-rollups-cli/root/app/register/register.go index 0e151b01f..281e6e3f1 100644 --- a/cmd/cartesi-rollups-cli/root/app/register/register.go +++ b/cmd/cartesi-rollups-cli/root/app/register/register.go @@ -210,19 +210,21 @@ func run(cmd *cobra.Command, args []string) { } application := model.Application{ - Name: validName, - IApplicationAddress: address, - IConsensusAddress: consensus, - IInputBoxAddress: *inputBoxAddress, - TemplateURI: templatePath, - TemplateHash: parsedTemplateHash, - EpochLength: epochLength, - DataAvailability: encodedDA, - ConsensusType: consensusType, - State: applicationState, - IInputBoxBlock: inputBoxBlockNumber, - LastInputCheckBlock: 0, - LastOutputCheckBlock: 0, + Name: validName, + IApplicationAddress: address, + IConsensusAddress: consensus, + IInputBoxAddress: *inputBoxAddress, + TemplateURI: templatePath, + TemplateHash: parsedTemplateHash, + EpochLength: epochLength, + DataAvailability: encodedDA, + ConsensusType: consensusType, + State: applicationState, + IInputBoxBlock: inputBoxBlockNumber, + LastEpochCheckBlock: 0, + LastInputCheckBlock: 0, + LastOutputCheckBlock: 0, + LastTournamentCheckBlock: 0, } // load execution parameters from a file? diff --git a/internal/config/config.go b/internal/config/config.go index c63fe099d..5a9446e64 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -135,6 +135,18 @@ func ToAddressFromString(s string) (Address, error) { return common.BytesToAddress(b), nil } +func ToHashFromString(s string) (common.Hash, error) { + if len(s) < 3 || (!strings.HasPrefix(s, "0x") && !strings.HasPrefix(s, "0X")) { + return common.Hash{}, fmt.Errorf("invalid hash '%s'", s) + } + s = s[2:] + b, err := hex.DecodeString(s) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(b), nil +} + func ToApplicationNameFromString(s string) (string, error) { if s == "" { return "", fmt.Errorf("application name cannot be empty") diff --git a/internal/jsonrpc/jsonrpc-discover.json b/internal/jsonrpc/jsonrpc-discover.json index 2be1f5cc4..892fdda10 100644 --- a/internal/jsonrpc/jsonrpc-discover.json +++ b/internal/jsonrpc/jsonrpc-discover.json @@ -497,6 +497,466 @@ } } }, + { + "name": "cartesi_listTournaments", + "summary": "Retrieve a List of Tournaments", + "description": "Returns a paginated list of Tournaments, with options to filter by epoch index or level.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter tournaments by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "level", + "description": "Filter tournaments by level.", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "parent_tournament_address", + "description": "Filter tournaments by parent tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": false + }, + { + "name": "parent_match_id_hash", + "description": "Filter tournaments by parent match id hash (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": false + }, + { + "name": "limit", + "description": "The maximum number of tournaments to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of tournaments to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/TournamentListResult" + } + } + }, + { + "name": "cartesi_getTournament", + "summary": "Retrieve a specific tournament", + "description": "Retrieves a single tournament from the application using the specified address.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "address", + "description": "The address of the tournament (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/TournamentGetResult" + } + } + }, + { + "name": "cartesi_listCommitments", + "summary": "List commitments", + "description": "Returns a paginated list of commitments for the specified application. Can filter by epoch index and tournament address.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter commitments by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "tournament_address", + "description": "Filter commitments by tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": false + }, + { + "name": "limit", + "description": "The maximum number of commitments to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of commitments to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/CommitmentListResult" + } + } + }, + { + "name": "cartesi_getCommitment", + "summary": "Get a specific commitment", + "description": "Fetches a single commitment by application, epoch index, tournament address and commitment hash.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "The index of the epoch (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "The tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "commitment", + "description": "The commitment hash (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/CommitmentGetResult" + } + } + }, + { + "name": "cartesi_listMatches", + "summary": "List matches", + "description": "Returns a paginated list of matches for the specified application. Can filter by epoch index and tournament address.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter matches by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": false + }, + { + "name": "tournament_address", + "description": "Filter matches by tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": false + }, + { + "name": "limit", + "description": "The maximum number of matches to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of matches to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchListResult" + } + } + }, + { + "name": "cartesi_getMatch", + "summary": "Get a specific match", + "description": "Fetches a single match by application, epoch index, tournament address and ID hash.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "The index of the epoch (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "The tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "id_hash", + "description": "The ID hash of the match (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchGetResult" + } + } + }, + { + "name": "cartesi_listMatchAdvances", + "summary": "List match advances", + "description": "Returns a paginated list of match advances for the specified match.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "Filter match advances by a specific epoch index (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "Filter match advances by tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "id_hash", + "description": "The ID hash of the match (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + }, + { + "name": "limit", + "description": "The maximum number of match advances to return per page.", + "schema": { + "type": "integer", + "minimum": 1, + "default": 50 + }, + "required": false + }, + { + "name": "offset", + "description": "The starting point for the list of match advances to return.", + "schema": { + "type": "integer", + "minimum": 0, + "default": 0 + }, + "required": false + }, + { + "name": "descending", + "description": "if true, the list will be sorted in descending order by epoch index.", + "schema": { + "type": "boolean", + "default": false + }, + "required": false + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchAdvancedListResult" + } + } + }, + { + "name": "cartesi_getMatchAdvanced", + "summary": "Get a specific match advance", + "description": "Fetches a single match advance by application, epoch index, tournament address, ID hash and parent.", + "params": [ + { + "name": "application", + "description": "The application's name or hex encoded address.", + "schema": { + "$ref": "#/components/schemas/NameOrAddress" + }, + "required": true + }, + { + "name": "epoch_index", + "description": "The index of the epoch (hex encoded).", + "schema": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "required": true + }, + { + "name": "tournament_address", + "description": "The tournament address (hex encoded).", + "schema": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "required": true + }, + { + "name": "id_hash", + "description": "The ID hash of the match advance (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + }, + { + "name": "parent", + "description": "The parent hash of the match advance (hex encoded).", + "schema": { + "$ref": "#/components/schemas/Hash" + }, + "required": true + } + ], + "result": { + "name": "result", + "schema": { + "$ref": "#/components/schemas/MatchAdvancedGetResult" + } + } + }, { "name": "cartesi_getChainId", "summary": "Get node's chain ID", @@ -583,6 +1043,9 @@ "last_output_check_block": { "$ref": "#/components/schemas/UnsignedInteger" }, + "last_tournament_check_block": { + "$ref": "#/components/schemas/UnsignedInteger" + }, "processed_inputs": { "$ref": "#/components/schemas/UnsignedInteger" }, @@ -667,6 +1130,10 @@ "$ref": "#/components/schemas/EthereumAddress", "nullable": true }, + "commitment": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, "status": { "$ref": "#/components/schemas/EpochStatus" }, @@ -1136,6 +1603,278 @@ "$ref": "#/components/schemas/EthereumAddress" } ] + }, + "WinnerCommitment": { + "type": "string", + "enum": [ + "NONE", + "ONE", + "TWO" + ] + }, + "MatchDeletionReason": { + "type": "string", + "enum": [ + "STEP", + "TIMEOUT", + "CHILD_TOURNAMENT", + "NOT_DELETED" + ] + }, + "Tournament": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "parent_tournament_address": { + "$ref": "#/components/schemas/EthereumAddress", + "nullable": true + }, + "parent_match_id_hash": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "max_level": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "level": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "log2step": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "height": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "winner_commitment": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "final_state_hash": { + "$ref": "#/components/schemas/Hash", + "nullable": true + }, + "finished_at_block": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "TournamentListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Tournament" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "TournamentGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Tournament" + } + } + }, + "Commitment": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "commitment": { + "$ref": "#/components/schemas/Hash" + }, + "final_state_hash": { + "$ref": "#/components/schemas/Hash" + }, + "submitter_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "CommitmentListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Commitment" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "CommitmentGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Commitment" + } + } + }, + "Match": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "id_hash": { + "$ref": "#/components/schemas/Hash" + }, + "commitment_one": { + "$ref": "#/components/schemas/ByteArray" + }, + "commitment_two": { + "$ref": "#/components/schemas/ByteArray" + }, + "left_of_two": { + "$ref": "#/components/schemas/ByteArray" + }, + "block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "winner_commitment": { + "$ref": "#/components/schemas/WinnerCommitment" + }, + "deletion_reason": { + "$ref": "#/components/schemas/MatchDeletionReason" + }, + "deletion_block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "deletion_tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "MatchListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Match" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "MatchGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Match" + } + } + }, + "MatchAdvanced": { + "type": "object", + "properties": { + "epoch_index": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tournament_address": { + "$ref": "#/components/schemas/EthereumAddress" + }, + "id_hash": { + "$ref": "#/components/schemas/Hash" + }, + "other_parent": { + "$ref": "#/components/schemas/ByteArray" + }, + "left_node": { + "$ref": "#/components/schemas/ByteArray" + }, + "block_number": { + "$ref": "#/components/schemas/UnsignedInteger" + }, + "tx_hash": { + "$ref": "#/components/schemas/Hash" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + } + } + }, + "MatchAdvancedListResult": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MatchAdvanced" + } + }, + "pagination": { + "$ref": "#/components/schemas/Pagination" + } + } + }, + "MatchAdvancedGetResult": { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/MatchAdvanced" + } + } } } } diff --git a/internal/jsonrpc/jsonrpc.go b/internal/jsonrpc/jsonrpc.go index a66d60d9e..9749e85da 100644 --- a/internal/jsonrpc/jsonrpc.go +++ b/internal/jsonrpc/jsonrpc.go @@ -26,18 +26,20 @@ var discoverSpec embed.FS const ( // Maximum allowed body size (1 MB). - MAX_BODY_SIZE = 1 << 20 + MAX_BODY_SIZE = 1 << 20 //nolint: revive // Maximum amount of items to list (10,000). - LIST_ITEM_LIMIT = 10000 + LIST_ITEM_LIMIT = 10000 //nolint: revive + // Default amount of item on a list (50) + LIST_ITEM_DEFAULT = 50 //nolint: revive ) const ( - JSONRPC_RESOURCE_NOT_FOUND int = -32001 - JSONRPC_PARSE_ERROR int = -32700 - JSONRPC_INVALID_REQUEST int = -32600 - JSONRPC_METHOD_NOT_FOUND int = -32601 - JSONRPC_INVALID_PARAMS int = -32602 - JSONRPC_INTERNAL_ERROR int = -32603 + JSONRPC_RESOURCE_NOT_FOUND int = -32001 //nolint: revive + JSONRPC_PARSE_ERROR int = -32700 //nolint: revive + JSONRPC_INVALID_REQUEST int = -32600 //nolint: revive + JSONRPC_METHOD_NOT_FOUND int = -32601 //nolint: revive + JSONRPC_INVALID_PARAMS int = -32602 //nolint: revive + JSONRPC_INTERNAL_ERROR int = -32603 //nolint: revive ) type rpcHandler = func(*Service, http.ResponseWriter, *http.Request, RPCRequest) @@ -57,7 +59,15 @@ var jsonrpcHandlers = dispatchTable{ "cartesi_getOutput": handleGetOutput, "cartesi_listReports": handleListReports, "cartesi_getReport": handleGetReport, - "cartesi_getChainId": handleGetChainId, + "cartesi_listTournaments": handleListTournaments, + "cartesi_getTournament": handleGetTournament, + "cartesi_listCommitments": handleListCommitments, + "cartesi_getCommitment": handleGetCommitment, + "cartesi_listMatches": handleListMatches, + "cartesi_getMatch": handleGetMatch, + "cartesi_listMatchAdvances": handleListMatchAdvances, + "cartesi_getMatchAdvanced": handleGetMatchAdvanced, + "cartesi_getChainId": handleGetChainID, "cartesi_getNodeVersion": handleGetNodeVersion, } @@ -118,7 +128,7 @@ func handleListApplications(s *Service, w http.ResponseWriter, r *http.Request, } // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } // Cap limit to 10,000. if params.Limit > LIST_ITEM_LIMIT { @@ -207,7 +217,7 @@ func handleListEpochs(s *Service, w http.ResponseWriter, r *http.Request, req RP // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -385,7 +395,7 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -433,7 +443,7 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP return } - var resultInputs []*DecodedInput + resultInputs := make([]*DecodedInput, 0, len(inputs)) for _, in := range inputs { decoded, err := DecodeInput(in, s.inputABI) if err != nil { @@ -441,9 +451,6 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP } resultInputs = append(resultInputs, decoded) } - if resultInputs == nil { - resultInputs = []*DecodedInput{} - } // Format response according to spec result := struct { @@ -554,7 +561,7 @@ func ParseOutputType(s string) ([]byte, error) { if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { s = s[2:] } - if len(s) != 8 { // nolint: mnd + if len(s) != 8 { //nolint: mnd return []byte{}, fmt.Errorf("invalid output type: expected exactly 4 bytes") } // Decode the hex string into bytes. @@ -575,7 +582,7 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -638,7 +645,7 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R return } - var resultOutputs []*DecodedOutput + resultOutputs := make([]*DecodedOutput, 0, len(outputs)) for _, out := range outputs { decoded, err := DecodeOutput(out, s.outputABI) if err != nil { @@ -651,9 +658,6 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Application not found", nil) return } - if resultOutputs == nil { - resultOutputs = []*DecodedOutput{} - } // Format response according to spec result := struct { @@ -735,7 +739,7 @@ func handleListReports(s *Service, w http.ResponseWriter, r *http.Request, req R // Use default values if not provided if params.Limit <= 0 { - params.Limit = 50 + params.Limit = LIST_ITEM_DEFAULT } if params.Limit > LIST_ITEM_LIMIT { @@ -851,8 +855,540 @@ func handleGetReport(s *Service, w http.ResponseWriter, r *http.Request, req RPC writeRPCResult(w, req.ID, response) } -func handleGetChainId(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { +func handleListTournaments(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListTournamentsParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create tournament filter based on params + tournamentFilter := repository.TournamentFilter{} + if params.EpochIndex != nil { + epochIndex, err := parseIndex(*params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + tournamentFilter.EpochIndex = &epochIndex + } + + if params.Level != nil { + level, err := parseIndex(*params.Level, "level") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + tournamentFilter.Level = &level + } + + if params.ParentTournamentAddress != nil { + parentAddress, err := config.ToAddressFromString(*params.ParentTournamentAddress) + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid parent tournament address: %v", err), nil) + } + tournamentFilter.ParentTournamentAddress = &parentAddress + } + + if params.ParentMatchIDHash != nil { + parentMatchIDHash, err := config.ToHashFromString(*params.ParentMatchIDHash) + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid parent match ID hash: %v", err), nil) + } + tournamentFilter.ParentMatchIDHash = &parentMatchIDHash + } + + tournaments, total, err := s.repository.ListTournaments(r.Context(), params.Application, tournamentFilter, repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + }, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve tournaments from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if tournaments == nil { + tournaments = []*model.Tournament{} + } + + // Format response according to spec + result := struct { + Data []*model.Tournament `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: tournaments, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetTournament(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetTournamentParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Validate tournament address + if _, err := config.ToAddressFromString(params.Address); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + tournament, err := s.repository.GetTournament(r.Context(), params.Application, params.Address) + if err != nil { + s.Logger.Error("Unable to retrieve tournament from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if tournament == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Tournament not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.Tournament `json:"data"` + }{ + Data: tournament, + } + + writeRPCResult(w, req.ID, response) +} + +func handleListCommitments(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListCommitmentsParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create commitment filter based on params + commitmentFilter := repository.CommitmentFilter{} + if params.EpochIndex != nil { + epochIndex, err := parseIndex(*params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + commitmentFilter.EpochIndex = &epochIndex + } + + if params.TournamentAddress != nil { + if _, err := config.ToAddressFromString(*params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + commitmentFilter.TournamentAddress = params.TournamentAddress + } + + commitments, total, err := s.repository.ListCommitments(r.Context(), params.Application, commitmentFilter, repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + }, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve commitments from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if commitments == nil { + commitments = []*model.Commitment{} + } + + // Format response according to spec + result := struct { + Data []*model.Commitment `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: commitments, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetCommitment(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetCommitmentParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := hex.DecodeString(params.Commitment); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid commitment hex: %v", err), nil) + return + } + + commitment, err := s.repository.GetCommitment(r.Context(), params.Application, epochIndex, params.TournamentAddress, params.Commitment) + if err != nil { + s.Logger.Error("Unable to retrieve commitment from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if commitment == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Commitment not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.Commitment `json:"data"` + }{ + Data: commitment, + } + + writeRPCResult(w, req.ID, response) +} + +func handleListMatches(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListMatchesParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create match filter based on params + matchFilter := repository.MatchFilter{} + if params.EpochIndex != nil { + epochIndex, err := parseIndex(*params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + matchFilter.EpochIndex = &epochIndex + } + + if params.TournamentAddress != nil { + if _, err := config.ToAddressFromString(*params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + matchFilter.TournamentAddress = params.TournamentAddress + } + + matches, total, err := s.repository.ListMatches(r.Context(), params.Application, matchFilter, repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + }, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve matches from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if matches == nil { + matches = []*model.Match{} + } + + // Format response according to spec + result := struct { + Data []*model.Match `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: matches, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetMatch(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetMatchParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.IDHash); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid ID hash: %v", err), nil) + return + } + + match, err := s.repository.GetMatch(r.Context(), params.Application, epochIndex, params.TournamentAddress, params.IDHash) + if err != nil { + s.Logger.Error("Unable to retrieve match from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if match == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Match not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.Match `json:"data"` + }{ + Data: match, + } + + writeRPCResult(w, req.ID, response) +} + +func handleListMatchAdvances(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params ListMatchAdvancesParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Use default values if not provided + if params.Limit <= 0 { + params.Limit = LIST_ITEM_DEFAULT + } + + if params.Limit > LIST_ITEM_LIMIT { + params.Limit = LIST_ITEM_LIMIT + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + // Create match advance filter based on params + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.IDHash); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid ID hash: %v", err), nil) + return + } + + pagination := repository.Pagination{ + Limit: params.Limit, + Offset: params.Offset, + } + matchAdvances, total, err := s.repository.ListMatchAdvances(r.Context(), params.Application, epochIndex, + params.TournamentAddress, params.IDHash, pagination, params.Descending) + if err != nil { + s.Logger.Error("Unable to retrieve match advances from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if matchAdvances == nil { + matchAdvances = []*model.MatchAdvanced{} + } + + // Format response according to spec + result := struct { + Data []*model.MatchAdvanced `json:"data"` + Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + } `json:"pagination"` + }{ + Data: matchAdvances, + Pagination: struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + }{ + TotalCount: total, + Limit: params.Limit, + Offset: params.Offset, + }, + } + + writeRPCResult(w, req.ID, result) +} + +func handleGetMatchAdvanced(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { + var params GetMatchAdvancedParams + if err := UnmarshalParams(req.Params, ¶ms); err != nil { + s.Logger.Debug("Invalid parameters", "err", err) + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) + return + } + + // Validate application parameter + if err := validateNameOrAddress(params.Application); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid application identifier: %v", err), nil) + return + } + + epochIndex, err := parseIndex(params.EpochIndex, "epoch_index") + if err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, err.Error(), nil) + return + } + + if _, err := config.ToAddressFromString(params.TournamentAddress); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid tournament address: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.IDHash); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid ID hash: %v", err), nil) + return + } + + if _, err := config.ToHashFromString(params.Parent); err != nil { + writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid parent hash: %v", err), nil) + return + } + + matchAdvanced, err := s.repository.GetMatchAdvanced(r.Context(), params.Application, epochIndex, + params.TournamentAddress, params.IDHash, params.Parent) + if err != nil { + s.Logger.Error("Unable to retrieve match advanced from repository", "err", err) + writeRPCError(w, req.ID, JSONRPC_INTERNAL_ERROR, "Internal server error", nil) + return + } + if matchAdvanced == nil { + writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "Match advanced not found", nil) + return + } + + // Format response according to spec + response := struct { + Data *model.MatchAdvanced `json:"data"` + }{ + Data: matchAdvanced, + } + + writeRPCResult(w, req.ID, response) +} +func handleGetChainID(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { config, err := repository.LoadNodeConfig[evmreader.PersistentConfig](r.Context(), s.repository, evmreader.EvmReaderConfigKey) if errors.Is(err, repository.ErrNotFound) { writeRPCError(w, req.ID, JSONRPC_RESOURCE_NOT_FOUND, "EVM Reader config not found", nil) @@ -873,7 +1409,7 @@ func handleGetChainId(s *Service, w http.ResponseWriter, r *http.Request, req RP writeRPCResult(w, req.ID, result) } -func handleGetNodeVersion(s *Service, w http.ResponseWriter, _ *http.Request, req RPCRequest) { +func handleGetNodeVersion(_ *Service, w http.ResponseWriter, _ *http.Request, req RPCRequest) { result := struct { Data string `json:"data"` }{ diff --git a/internal/jsonrpc/jsonrpc_test.go b/internal/jsonrpc/jsonrpc_test.go index e211a8954..e093bcdb0 100644 --- a/internal/jsonrpc/jsonrpc_test.go +++ b/internal/jsonrpc/jsonrpc_test.go @@ -140,7 +140,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_getApplication", @@ -185,7 +185,7 @@ func TestMethod(t *testing.T) { // NodeConfig provision nr := uint64(0xdeadbeef) - repository.SaveNodeConfig(ctx, s.repository, + err := repository.SaveNodeConfig(ctx, s.repository, &model.NodeConfig[evmreader.PersistentConfig]{ Key: evmreader.EvmReaderConfigKey, Value: evmreader.PersistentConfig{ @@ -193,6 +193,7 @@ func TestMethod(t *testing.T) { }, }, ) + assert.Nil(t, err, "on test case: %v, when saving evm reader config", t.Name()) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", @@ -245,7 +246,7 @@ func TestMethod(t *testing.T) { app := uint64(1) nr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: nr, @@ -280,7 +281,7 @@ func TestMethod(t *testing.T) { app := uint64(1) nr := uint64(1) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: nr, @@ -345,7 +346,7 @@ func TestMethod(t *testing.T) { app := uint64(2) enr := uint64(1) inr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -379,7 +380,7 @@ func TestMethod(t *testing.T) { app := uint64(2) enr := uint64(1) inr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -450,7 +451,7 @@ func TestMethod(t *testing.T) { nr := uint64(0) epochIndex := uint64(0xdeadbeef) - appID := s.newTestApplication(t, ctx, 0, nr) + appID := s.newTestApplication(ctx, t, 0, nr) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: epochIndex, @@ -536,7 +537,7 @@ func TestMethod(t *testing.T) { app := uint64(2) enr := uint64(1) inr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -571,7 +572,7 @@ func TestMethod(t *testing.T) { enr := uint64(1) inr := uint64(1) onr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -660,7 +661,7 @@ func TestMethod(t *testing.T) { app := uint64(1) - s.newTestApplication(t, ctx, 0, app) + s.newTestApplication(ctx, t, 0, app) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_getProcessedInputCount", @@ -716,7 +717,7 @@ func TestMethod(t *testing.T) { enr := uint64(1) inr := uint64(1) onr := uint64(0) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -802,7 +803,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listApplications", @@ -827,7 +828,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listApplications", @@ -850,7 +851,7 @@ func TestMethod(t *testing.T) { many := uint64(100) limit := uint64(many / 2) for i := range many { - s.newTestApplication(t, ctx, 0, i) + s.newTestApplication(ctx, t, 0, i) } { // offset == 0, descending = false @@ -867,7 +868,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -888,7 +889,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -909,7 +910,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -930,7 +931,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Application]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, nameToNumber(resp.Result.Data[i].Name)) @@ -971,7 +972,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listEpochs", @@ -991,7 +992,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listEpochs", @@ -1017,7 +1018,7 @@ func TestMethod(t *testing.T) { nr := uint64(1) many := uint64(100) limit := uint64(many / 2) - appID := s.newTestApplication(t, ctx, 0, nr) + appID := s.newTestApplication(ctx, t, 0, nr) for i := range many { err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, @@ -1043,7 +1044,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1065,7 +1066,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1087,7 +1088,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1109,7 +1110,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]model.Epoch]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, resp.Result.Data[i].Index) @@ -1150,7 +1151,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listInputs", @@ -1198,7 +1199,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listOutputs", @@ -1220,7 +1221,7 @@ func TestMethod(t *testing.T) { app := uint64(3) enr := uint64(1) inr := uint64(1) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -1276,7 +1277,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1298,7 +1299,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1320,7 +1321,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1342,7 +1343,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1383,7 +1384,7 @@ func TestMethod(t *testing.T) { ctx := context.Background() nr := uint64(1) - s.newTestApplication(t, ctx, 0, nr) + s.newTestApplication(ctx, t, 0, nr) body := s.doRequest(t, 0, fmt.Appendf([]byte{}, `{ "jsonrpc": "2.0", "method": "cartesi_listReports", @@ -1405,7 +1406,7 @@ func TestMethod(t *testing.T) { app := uint64(3) enr := uint64(1) inr := uint64(1) - appID := s.newTestApplication(t, ctx, 0, app) + appID := s.newTestApplication(ctx, t, 0, app) err := s.repository.CreateEpoch(ctx, &model.Epoch{ ApplicationID: appID, Index: enr, @@ -1459,7 +1460,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1481,7 +1482,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := i + 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1503,7 +1504,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 1 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1525,7 +1526,7 @@ func TestMethod(t *testing.T) { resp := testRPCResponse[[]Result]{} assert.Nil(t, json.Unmarshal(body, &resp)) - assert.Equal(t, int(limit), len(resp.Result.Data)) + assert.Equal(t, limit, uint64(len(resp.Result.Data))) for i := range limit { nr := many - i - 2 assert.Equal(t, nr, uint64(resp.Result.Data[i].Index)) @@ -1534,6 +1535,21 @@ func TestMethod(t *testing.T) { }) }) + //////////////////////////////////////////////////////////////////////// + // Place holder for new tournament data methods + //////////////////////////////////////////////////////////////////////// + t.Run("cartesi_NEW_METHODS", func(_ *testing.T) { + // TODO: implement proper tests for tournament data methods + testHistogram.inc("cartesi_getTournament") + testHistogram.inc("cartesi_listTournaments") + testHistogram.inc("cartesi_getCommitment") + testHistogram.inc("cartesi_getMatch") + testHistogram.inc("cartesi_listMatchAdvances") + testHistogram.inc("cartesi_listCommitments") + testHistogram.inc("cartesi_listMatches") + testHistogram.inc("cartesi_getMatchAdvanced") + }) + // tested methods, implemented methods and discover methods must match: data, err := discoverSpec.ReadFile("jsonrpc-discover.json") assert.Nil(t, err) @@ -1542,23 +1558,56 @@ func TestMethod(t *testing.T) { err = json.Unmarshal(data, &schema) assert.Nil(t, err) - result := hist{ - "rpc.discover": 1, // +1, because it doesn't show up in the jsonrpc file - } - + allMethods := make(map[string]bool) + tested := make(map[string]bool) for k := range testHistogram { - result.inc(k) + allMethods[k] = true + tested[k] = true } + implemented := make(map[string]bool) for k := range jsonrpcHandlers { - result.inc(k) + allMethods[k] = true + implemented[k] = true } + specified := make(map[string]bool) for _, v := range schema.Methods { - result.inc(v.Name) + allMethods[v.Name] = true + specified[v.Name] = true + } + + // Check each method + var errors []string + for method := range allMethods { + hasTest := tested[method] + hasImpl := implemented[method] + hasSpec := specified[method] + + // All methods must be tested and implemented + // rpc.discover is not discovered (not in schema), others must be + expectedInSpec := method != "rpc.discover" + + var missing []string + if !hasTest { + missing = append(missing, "tests") + } + if !hasImpl { + missing = append(missing, "implementation") + } + if hasSpec != expectedInSpec { + if expectedInSpec { + missing = append(missing, "specification") + } else { + missing = append(missing, "should not be in specification") + } + } + if len(missing) > 0 { + errors = append(errors, fmt.Sprintf("Method %s is missing: %v", method, missing)) + } } - for k, v := range result { - assert.Equal(t, v, 3, "method %v is not: tested && implemented && discovered", k) + if len(errors) > 0 { + t.Errorf("Method coverage issues:\n%s", strings.Join(errors, "\n")) } } diff --git a/internal/jsonrpc/service.go b/internal/jsonrpc/service.go index 4941fb25d..1030f7067 100644 --- a/internal/jsonrpc/service.go +++ b/internal/jsonrpc/service.go @@ -72,8 +72,11 @@ func Create(ctx context.Context, c *CreateInfo) (*Service, error) { mux := http.NewServeMux() mux.HandleFunc("/rpc", s.handleRPC) s.server = &http.Server{ - Addr: c.Config.JsonrpcApiAddress, - Handler: services.CorsMiddleware(mux), // FIXME: add proper cors config + Addr: c.Config.JsonrpcApiAddress, + Handler: services.CorsMiddleware(mux), // FIXME: add proper cors config + WriteTimeout: 30 * time.Second, //nolint: mnd + ReadTimeout: 30 * time.Second, //nolint: mnd + ReadHeaderTimeout: 10 * time.Second, //nolint: mnd } return s, nil @@ -96,9 +99,9 @@ func (s *Service) Tick() []error { return nil } -func (s *Service) Stop(force bool) []error { +func (s *Service) Stop(_ bool) []error { var errs []error - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) //nolint: mnd defer cancel() if err := s.server.Shutdown(ctx); err != nil { errs = append(errs, err) diff --git a/internal/jsonrpc/types.go b/internal/jsonrpc/types.go index e175a4dad..bc0a1da02 100644 --- a/internal/jsonrpc/types.go +++ b/internal/jsonrpc/types.go @@ -231,6 +231,80 @@ type GetReportParams struct { ReportIndex string `json:"report_index"` } +// ListTournamentsParams aligns with the OpenRPC specification +type ListTournamentsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + Level *string `json:"level,omitempty"` + ParentTournamentAddress *string `json:"parent_tournament_address,omitempty"` + ParentMatchIDHash *string `json:"parent_match_id_hash,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetTournamentParams aligns with the OpenRPC specification +type GetTournamentParams struct { + Application string `json:"application"` + Address string `json:"address"` +} + +// ListCommitmentsParams aligns with the OpenRPC specification +type ListCommitmentsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + TournamentAddress *string `json:"tournament_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetCommitmentParams aligns with the OpenRPC specification +type GetCommitmentParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + Commitment string `json:"commitment"` +} + +// ListMatchesParams aligns with the OpenRPC specification +type ListMatchesParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + TournamentAddress *string `json:"tournament_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetMatchParams aligns with the OpenRPC specification +type GetMatchParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` +} + +// ListMatchAdvancesParams aligns with the OpenRPC specification +type ListMatchAdvancesParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index,omitempty"` + TournamentAddress string `json:"tournament_address,omitempty"` + IDHash string `json:"id_hash"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetMatchAdvancedParams aligns with the OpenRPC specification +type GetMatchAdvancedParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` + Parent string `json:"parent"` +} + // ----------------------------------------------------------------------------- // ABI Decoding helpers (provided code) // ----------------------------------------------------------------------------- diff --git a/internal/jsonrpc/util_test.go b/internal/jsonrpc/util_test.go index 241c0a6ef..8bb661f03 100644 --- a/internal/jsonrpc/util_test.go +++ b/internal/jsonrpc/util_test.go @@ -9,13 +9,13 @@ import ( "encoding/json" "fmt" "io" - "log/slog" "math/big" "net/http" "net/http/httptest" "strings" "testing" + "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/pkg/service" @@ -46,7 +46,10 @@ func (x *hex64) MarshalJSON() ([]byte, error) { func (x *hex64) UnmarshalJSON(in []byte) error { var hexString string - json.Unmarshal(in, &hexString) + err := json.Unmarshal(in, &hexString) + if err != nil { + return err + } hexValue, err := model.ParseHexUint64(hexString) if err != nil { return err @@ -77,10 +80,13 @@ func newTestService(t *testing.T, name string) *Service { repo, err := factory.NewRepositoryFromConnectionString(ctx, dbTestEndpoint) assert.Nil(t, err) + logLevel, err := config.GetLogLevel() + assert.Nil(t, err) + ci := CreateInfo{ CreateInfo: service.CreateInfo{ Name: name, - LogLevel: slog.LevelDebug, + LogLevel: logLevel, LogColor: true, }, Repository: repo, @@ -101,13 +107,14 @@ func numberToName(x uint64) string { } // create an application with mostly stub values. -func (s *Service) newTestApplication(t *testing.T, ctx context.Context, test, i uint64) int64 { +func (s *Service) newTestApplication(ctx context.Context, t *testing.T, test, i uint64) int64 { hex := numberToName(i) id, err := s.repository.CreateApplication(ctx, &model.Application{ Name: hex, IApplicationAddress: common.HexToAddress(hex), DataAvailability: []byte{0x00, 0x00, 0x00, 0x00}, State: model.ApplicationState_Enabled, + ConsensusType: model.Consensus_Authority, }, false) assert.Nil(t, err, "on test case: %v, when creating application: %v", test, i) return id @@ -133,12 +140,12 @@ func (s *Service) doRequest(t *testing.T, i uint64, reqData []byte) []byte { // input from ./cartesi-rollups-cli send echo-dapp -y "" func emptyInput() []byte { - raw, _ := hexutil.Decode("0x415bf363000000000000000000000000000000000000000000000000000000000000343a0000000000000000000000002e662c8a1a6c8008482a41ef6d3b333497e7f956000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000068c97fb45a1ab2f3478ee32e84c0a464f70d9da8d470868984ba5f00d9da757bbcee2098000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000") + raw, _ := hexutil.Decode("0x415bf363000000000000000000000000000000000000000000000000000000000000343a0000000000000000000000002e662c8a1a6c8008482a41ef6d3b333497e7f956000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000068c97fb45a1ab2f3478ee32e84c0a464f70d9da8d470868984ba5f00d9da757bbcee2098000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000") //nolint: lll return raw } // output from ./cartesi-rollups-cli send echo-dapp -y "" func emptyVoucher() []byte { - raw, _ := hexutil.Decode("0x237a816f000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000000000000000000000000000000000000deadbeef00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000") + raw, _ := hexutil.Decode("0x237a816f000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb9226600000000000000000000000000000000000000000000000000000000deadbeef00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000") //nolint: lll return raw } diff --git a/internal/model/models.go b/internal/model/models.go index 25fbb9a97..0e07e0849 100644 --- a/internal/model/models.go +++ b/internal/model/models.go @@ -17,26 +17,27 @@ import ( ) type Application struct { - ID int64 `sql:"primary_key" json:"-"` - Name string `json:"name"` - IApplicationAddress common.Address `json:"iapplication_address"` - IConsensusAddress common.Address `json:"iconsensus_address"` - IInputBoxAddress common.Address `json:"iinputbox_address"` - TemplateHash common.Hash `json:"template_hash"` - TemplateURI string `json:"-"` - EpochLength uint64 `json:"epoch_length"` - DataAvailability []byte `json:"data_availability"` - ConsensusType Consensus `json:"consensus_type"` - State ApplicationState `json:"state"` - Reason *string `json:"reason"` - IInputBoxBlock uint64 `json:"iinputbox_block"` - LastEpochCheckBlock uint64 `json:"last_epoch_check_block"` - LastInputCheckBlock uint64 `json:"last_input_check_block"` - LastOutputCheckBlock uint64 `json:"last_output_check_block"` - ProcessedInputs uint64 `json:"processed_inputs"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - ExecutionParameters ExecutionParameters `json:"execution_parameters"` + ID int64 `sql:"primary_key" json:"-"` + Name string `json:"name"` + IApplicationAddress common.Address `json:"iapplication_address"` + IConsensusAddress common.Address `json:"iconsensus_address"` + IInputBoxAddress common.Address `json:"iinputbox_address"` + TemplateHash common.Hash `json:"template_hash"` + TemplateURI string `json:"-"` + EpochLength uint64 `json:"epoch_length"` + DataAvailability []byte `json:"data_availability"` + ConsensusType Consensus `json:"consensus_type"` + State ApplicationState `json:"state"` + Reason *string `json:"reason"` + IInputBoxBlock uint64 `json:"iinputbox_block"` + LastEpochCheckBlock uint64 `json:"last_epoch_check_block"` + LastInputCheckBlock uint64 `json:"last_input_check_block"` + LastOutputCheckBlock uint64 `json:"last_output_check_block"` + LastTournamentCheckBlock uint64 `json:"last_tournament_check_block"` + ProcessedInputs uint64 `json:"processed_inputs"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + ExecutionParameters ExecutionParameters `json:"execution_parameters"` } // HasDataAvailabilitySelector checks if the application's DataAvailability @@ -51,22 +52,24 @@ func (a *Application) MarshalJSON() ([]byte, error) { // Define a new structure that embeds the alias but overrides the hex fields. aux := &struct { *Alias - DataAvailability string `json:"data_availability"` - IInputBoxBlock string `json:"iinputbox_block"` - LastEpochCheckBlock string `json:"last_epoch_check_block"` - LastInputCheckBlock string `json:"last_input_check_block"` - LastOutputCheckBlock string `json:"last_output_check_block"` - EpochLength string `json:"epoch_length"` - ProcessedInputs string `json:"processed_inputs"` + DataAvailability string `json:"data_availability"` + IInputBoxBlock string `json:"iinputbox_block"` + LastEpochCheckBlock string `json:"last_epoch_check_block"` + LastInputCheckBlock string `json:"last_input_check_block"` + LastOutputCheckBlock string `json:"last_output_check_block"` + LastTournamentCheckBlock string `json:"last_tournament_check_block"` + EpochLength string `json:"epoch_length"` + ProcessedInputs string `json:"processed_inputs"` }{ - Alias: (*Alias)(a), - DataAvailability: "0x" + hex.EncodeToString(a.DataAvailability), - IInputBoxBlock: fmt.Sprintf("0x%x", a.IInputBoxBlock), - LastEpochCheckBlock: fmt.Sprintf("0x%x", a.LastEpochCheckBlock), - LastInputCheckBlock: fmt.Sprintf("0x%x", a.LastInputCheckBlock), - LastOutputCheckBlock: fmt.Sprintf("0x%x", a.LastOutputCheckBlock), - EpochLength: fmt.Sprintf("0x%x", a.EpochLength), - ProcessedInputs: fmt.Sprintf("0x%x", a.ProcessedInputs), + Alias: (*Alias)(a), + DataAvailability: "0x" + hex.EncodeToString(a.DataAvailability), + IInputBoxBlock: fmt.Sprintf("0x%x", a.IInputBoxBlock), + LastEpochCheckBlock: fmt.Sprintf("0x%x", a.LastEpochCheckBlock), + LastInputCheckBlock: fmt.Sprintf("0x%x", a.LastInputCheckBlock), + LastOutputCheckBlock: fmt.Sprintf("0x%x", a.LastOutputCheckBlock), + LastTournamentCheckBlock: fmt.Sprintf("0x%x", a.LastTournamentCheckBlock), + EpochLength: fmt.Sprintf("0x%x", a.EpochLength), + ProcessedInputs: fmt.Sprintf("0x%x", a.ProcessedInputs), } return json.Marshal(aux) } @@ -76,13 +79,14 @@ func (a *Application) UnmarshalJSON(in []byte) error { aux := &struct { *Alias - DataAvailability string `json:"data_availability"` - IInputBoxBlock string `json:"iinputbox_block"` - LastInputCheckBlock string `json:"last_input_check_block"` - LastOutputCheckBlock string `json:"last_output_check_block"` - LastEpochCheckBlock string `json:"last_epoch_check_block"` - EpochLength string `json:"epoch_length"` - ProcessedInputs string `json:"processed_inputs"` + DataAvailability string `json:"data_availability"` + IInputBoxBlock string `json:"iinputbox_block"` + LastInputCheckBlock string `json:"last_input_check_block"` + LastOutputCheckBlock string `json:"last_output_check_block"` + LastEpochCheckBlock string `json:"last_epoch_check_block"` + LastTournamentCheckBlock string `json:"last_tournament_check_block"` + EpochLength string `json:"epoch_length"` + ProcessedInputs string `json:"processed_inputs"` }{} var err error @@ -119,6 +123,11 @@ func (a *Application) UnmarshalJSON(in []byte) error { return err } + a.LastTournamentCheckBlock, err = ParseHexUint64(aux.LastTournamentCheckBlock) + if err != nil { + return err + } + a.EpochLength, err = ParseHexUint64(aux.EpochLength) if err != nil { return err @@ -539,11 +548,21 @@ func ParseHexUint64(s string) (uint64, error) { return strconv.ParseUint(s[2:], 16, 64) } +func ParseHexInt64(s string) (int64, error) { + if s == "" || len(s) < 3 || (!strings.HasPrefix(s, "0x") && !strings.HasPrefix(s, "0X")) { + return 0, fmt.Errorf("invalid hex string: %s", s) + } + return strconv.ParseInt(s[2:], 16, 64) +} + func ParseHexDuration(s string) (time.Duration, error) { - ns, err := ParseHexUint64(s) + ns, err := ParseHexInt64(s) if err != nil { return 0, err } + if ns < 0 { + return 0, fmt.Errorf("duration cannot be negative: %s", s) + } return time.Duration(ns), nil } @@ -557,6 +576,7 @@ type Epoch struct { MachineHash *common.Hash `json:"machine_hash"` ClaimHash *common.Hash `json:"claim_hash"` ClaimTransactionHash *common.Hash `json:"claim_transaction_hash"` + Commitment *common.Hash `json:"commitment"` TournamentAddress *common.Address `json:"tournament_address"` Status EpochStatus `json:"status"` VirtualIndex uint64 `json:"virtual_index"` @@ -912,12 +932,16 @@ type NodeConfig[T any] struct { } type AdvanceResult struct { - InputIndex uint64 - Status InputCompletionStatus - Outputs [][]byte - Reports [][]byte - OutputsHash common.Hash - MachineHash *common.Hash + EpochIndex uint64 + InputIndex uint64 + Status InputCompletionStatus + Outputs [][]byte + Reports [][]byte + Hashes [][32]byte + RemainingMetaCycles uint64 + OutputsHash common.Hash + MachineHash common.Hash + IsDaveConsensus bool } type InspectResult struct { @@ -984,17 +1008,361 @@ func (e DefaultBlock) String() string { type MonitoredEvent string const ( - MonitoredEvent_InputAdded MonitoredEvent = "InputAdded" - MonitoredEvent_OutputExecuted MonitoredEvent = "OutputExecuted" - MonitoredEvent_ClaimSubmitted MonitoredEvent = "ClaimSubmitted" - MonitoredEvent_ClaimAccepted MonitoredEvent = "ClaimAccepted" - MonitoredEvent_EpochSealed MonitoredEvent = "EpochSealed" + MonitoredEvent_InputAdded MonitoredEvent = "InputAdded" + MonitoredEvent_OutputExecuted MonitoredEvent = "OutputExecuted" + MonitoredEvent_ClaimSubmitted MonitoredEvent = "ClaimSubmitted" + MonitoredEvent_ClaimAccepted MonitoredEvent = "ClaimAccepted" + MonitoredEvent_EpochSealed MonitoredEvent = "EpochSealed" + MonitoredEvent_CommitmentJoined MonitoredEvent = "CommitmentJoined" + MonitoredEvent_MatchAdvanced MonitoredEvent = "MatchAdvanced" + MonitoredEvent_MatchCreated MonitoredEvent = "MatchCreated" + MonitoredEvent_MatchDeleted MonitoredEvent = "MatchDeleted" + MonitoredEvent_NewInnerTournament MonitoredEvent = "NewInnerTournament" ) func (e MonitoredEvent) String() string { return string(e) } +type Tournament struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + Address common.Address `sql:"primary_key" json:"address"` + ParentTournamentAddress *common.Address `json:"parent_tournament_address"` + ParentMatchIDHash *common.Hash `json:"parent_match_id_hash"` + MaxLevel uint64 `json:"max_level"` + Level uint64 `json:"level"` + Log2Step uint64 `json:"log2step"` + Height uint64 `json:"height"` + WinnerCommitment *common.Hash `json:"winner_commitment"` + FinalStateHash *common.Hash `json:"final_state_hash"` + FinishedAtBlock uint64 `json:"finished_at_block"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (t *Tournament) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias Tournament + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + MaxLevel string `json:"max_level"` + Level string `json:"level"` + Log2Step string `json:"log2step"` + Height string `json:"height"` + FinishedAtBlock string `json:"finished_at_block"` + *Alias + }{ + Alias: (*Alias)(t), + EpochIndex: fmt.Sprintf("0x%x", t.EpochIndex), + MaxLevel: fmt.Sprintf("0x%x", t.MaxLevel), + Level: fmt.Sprintf("0x%x", t.Level), + Log2Step: fmt.Sprintf("0x%x", t.Log2Step), + Height: fmt.Sprintf("0x%x", t.Height), + FinishedAtBlock: fmt.Sprintf("0x%x", t.FinishedAtBlock), + } + return json.Marshal(aux) +} + +type Commitment struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + TournamentAddress common.Address `sql:"primary_key" json:"tournament_address"` + Commitment common.Hash `sql:"primary_key" json:"commitment"` + FinalStateHash common.Hash `json:"final_state_hash"` + SubmitterAddress common.Address `json:"submitter_address"` + BlockNumber uint64 `json:"block_number"` + TxHash common.Hash `json:"tx_hash"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (c *Commitment) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias Commitment + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", c.EpochIndex), + BlockNumber: fmt.Sprintf("0x%x", c.BlockNumber), + Alias: (*Alias)(c), + } + return json.Marshal(aux) +} + +type Match struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + TournamentAddress common.Address `sql:"primary_key" json:"tournament_address"` + IDHash common.Hash `sql:"primary_key" json:"id_hash"` + CommitmentOne common.Hash `json:"commitment_one"` + CommitmentTwo common.Hash `json:"commitment_two"` + LeftOfTwo common.Hash `json:"left_of_two"` + BlockNumber uint64 `json:"block_number"` + TxHash common.Hash `json:"tx_hash"` + Winner WinnerCommitment `json:"winner_commitment"` + DeletionReason MatchDeletionReason `json:"deletion_reason"` + DeletionBlockNumber uint64 `json:"deletion_block_number"` + DeletionTxHash common.Hash `json:"deletion_tx_hash"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (m *Match) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias Match + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + DeletionBlockNumber string `json:"deletion_block_number"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", m.EpochIndex), + BlockNumber: fmt.Sprintf("0x%x", m.BlockNumber), + DeletionBlockNumber: fmt.Sprintf("0x%x", m.DeletionBlockNumber), + Alias: (*Alias)(m), + } + return json.Marshal(aux) +} + +type MatchAdvanced struct { + ApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` + TournamentAddress common.Address `sql:"primary_key" json:"tournament_address"` + IDHash common.Hash `sql:"primary_key" json:"id_hash"` + OtherParent common.Hash `json:"other_parent"` + LeftNode common.Hash `json:"left_node"` + BlockNumber uint64 `json:"block_number"` + TxHash common.Hash `json:"tx_hash"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (m *MatchAdvanced) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias MatchAdvanced + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", m.EpochIndex), + BlockNumber: fmt.Sprintf("0x%x", m.BlockNumber), + Alias: (*Alias)(m), + } + return json.Marshal(aux) +} + +// MatchDeletionReason represents the reason why a match was deleted +type MatchDeletionReason string + +const ( + MatchDeletionReason_STEP MatchDeletionReason = "STEP" + MatchDeletionReason_TIMEOUT MatchDeletionReason = "TIMEOUT" + MatchDeletionReason_CHILD_TOURNAMENT MatchDeletionReason = "CHILD_TOURNAMENT" + MatchDeletionReason_NOT_DELETED MatchDeletionReason = "NOT_DELETED" +) + +var MatchDeletionReasonAllValues = []MatchDeletionReason{ + MatchDeletionReason_STEP, + MatchDeletionReason_TIMEOUT, + MatchDeletionReason_CHILD_TOURNAMENT, + MatchDeletionReason_NOT_DELETED, +} + +func (e *MatchDeletionReason) Scan(value any) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("invalid value for MatchDeletionReason enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "STEP": + *e = MatchDeletionReason_STEP + case "TIMEOUT": + *e = MatchDeletionReason_TIMEOUT + case "CHILD_TOURNAMENT": + *e = MatchDeletionReason_CHILD_TOURNAMENT + case "NOT_DELETED": + *e = MatchDeletionReason_NOT_DELETED + default: + return errors.New("invalid value '" + enumValue + "' for MatchDeletionReason enum") + } + + return nil +} + +func (e MatchDeletionReason) String() string { + return string(e) +} + +func MatchDeletionReasonFromUint8(v uint8) MatchDeletionReason { + switch v { + case 0: + return MatchDeletionReason_STEP + case 1: + return MatchDeletionReason_TIMEOUT + case 2: //nolint: mnd + return MatchDeletionReason_CHILD_TOURNAMENT + case 0xff: //nolint: mnd + return MatchDeletionReason_NOT_DELETED + default: + return MatchDeletionReason_STEP // default to STEP for unknown values + } +} + +// WinnerCommitment represents the winner commitment of a match +type WinnerCommitment string + +const ( + WinnerCommitment_NONE WinnerCommitment = "NONE" + WinnerCommitment_ONE WinnerCommitment = "ONE" + WinnerCommitment_TWO WinnerCommitment = "TWO" +) + +var WinnerCommitmentAllValues = []WinnerCommitment{ + WinnerCommitment_NONE, + WinnerCommitment_ONE, + WinnerCommitment_TWO, +} + +func (e *WinnerCommitment) Scan(value any) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("invalid value for WinnerCommitment enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "NONE": + *e = WinnerCommitment_NONE + case "ONE": + *e = WinnerCommitment_ONE + case "TWO": + *e = WinnerCommitment_TWO + default: + return errors.New("invalid value '" + enumValue + "' for WinnerCommitment enum") + } + + return nil +} + +func (e WinnerCommitment) String() string { + return string(e) +} + +func WinnerCommitmentFromUint8(v uint8) WinnerCommitment { + switch v { + case 0: + return WinnerCommitment_NONE + case 1: + return WinnerCommitment_ONE + case 2: //nolint: mnd + return WinnerCommitment_TWO + default: + return WinnerCommitment_NONE // default to NONE for unknown values + } +} + +type StateHash struct { + InputEpochApplicationID int64 `sql:"primary_key" json:"-"` + EpochIndex uint64 `json:"epoch_index"` + InputIndex uint64 `json:"input_index"` + Index uint64 `sql:"primary_key" json:"index"` + MachineHash common.Hash `json:"machine_hash"` + Repetitions uint64 `json:"repetitions"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (s *StateHash) MarshalJSON() ([]byte, error) { + // Create an alias to avoid infinite recursion in MarshalJSON. + type Alias StateHash + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + InputIndex string `json:"input_index"` + Index string `json:"index"` + Repetitions string `json:"repetitions"` + *Alias + }{ + EpochIndex: fmt.Sprintf("0x%x", s.EpochIndex), + InputIndex: fmt.Sprintf("0x%x", s.InputIndex), + Index: fmt.Sprintf("0x%x", s.Index), + Repetitions: fmt.Sprintf("0x%x", s.Repetitions), + Alias: (*Alias)(s), + } + return json.Marshal(aux) +} + +func (s *StateHash) UnmarshalJSON(data []byte) error { + // Create an alias to avoid infinite recursion in UnmarshalJSON. + type Alias StateHash + // Define a new structure that embeds the alias but overrides the hex fields. + aux := &struct { + EpochIndex string `json:"epoch_index"` + InputIndex string `json:"input_index"` + Index string `json:"index"` + Repetitions string `json:"repetitions"` + *Alias + }{ + Alias: (*Alias)(s), + } + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + + if aux.EpochIndex != "" { + val, err := ParseHexUint64(aux.EpochIndex) + if err != nil { + return fmt.Errorf("invalid epoch_index: %w", err) + } + s.EpochIndex = val + } + + if aux.InputIndex != "" { + val, err := ParseHexUint64(aux.InputIndex) + if err != nil { + return fmt.Errorf("invalid input_index: %w", err) + } + s.InputIndex = val + } + + if aux.Index != "" { + val, err := ParseHexUint64(aux.Index) + if err != nil { + return fmt.Errorf("invalid index: %w", err) + } + s.Index = val + } + + if aux.Repetitions != "" { + val, err := ParseHexUint64(aux.Repetitions) + if err != nil { + return fmt.Errorf("invalid repetitions: %w", err) + } + s.Repetitions = val + } + + return nil +} + func Pointer[T any](v T) *T { return &v } diff --git a/internal/repository/postgres/application.go b/internal/repository/postgres/application.go index 2d03c7570..a30ce142c 100644 --- a/internal/repository/postgres/application.go +++ b/internal/repository/postgres/application.go @@ -40,6 +40,7 @@ func (r *PostgresRepository) CreateApplication( table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, ). VALUES( @@ -50,13 +51,14 @@ func (r *PostgresRepository) CreateApplication( app.TemplateHash, app.TemplateURI, app.EpochLength, - app.DataAvailability[:], + app.DataAvailability, app.ConsensusType, app.State, app.IInputBoxBlock, app.LastEpochCheckBlock, app.LastInputCheckBlock, app.LastOutputCheckBlock, + app.LastTournamentCheckBlock, app.ProcessedInputs, ). RETURNING(table.Application.ID) @@ -158,6 +160,7 @@ func (r *PostgresRepository) GetApplication( table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, table.Application.CreatedAt, table.Application.UpdatedAt, @@ -207,6 +210,7 @@ func (r *PostgresRepository) GetApplication( &app.LastEpochCheckBlock, &app.LastInputCheckBlock, &app.LastOutputCheckBlock, + &app.LastTournamentCheckBlock, &app.ProcessedInputs, &app.CreatedAt, &app.UpdatedAt, @@ -286,6 +290,7 @@ func (r *PostgresRepository) UpdateApplication( table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, ). SET( @@ -296,7 +301,7 @@ func (r *PostgresRepository) UpdateApplication( app.TemplateHash, app.TemplateURI, app.EpochLength, - app.DataAvailability[:], + app.DataAvailability, app.ConsensusType, app.State, app.Reason, @@ -304,6 +309,7 @@ func (r *PostgresRepository) UpdateApplication( app.LastEpochCheckBlock, app.LastInputCheckBlock, app.LastOutputCheckBlock, + app.LastTournamentCheckBlock, app.ProcessedInputs, ). WHERE(table.Application.ID.EQ(postgres.Int(app.ID))) @@ -344,6 +350,20 @@ func getColumnForEvent(event model.MonitoredEvent) (postgres.ColumnFloat, error) return table.Application.LastInputCheckBlock, nil case model.MonitoredEvent_OutputExecuted: return table.Application.LastOutputCheckBlock, nil + case model.MonitoredEvent_CommitmentJoined: + fallthrough + case model.MonitoredEvent_MatchAdvanced: + fallthrough + case model.MonitoredEvent_MatchCreated: + fallthrough + case model.MonitoredEvent_MatchDeleted: + fallthrough + case model.MonitoredEvent_NewInnerTournament: + return table.Application.LastTournamentCheckBlock, nil + case model.MonitoredEvent_ClaimSubmitted: + fallthrough + case model.MonitoredEvent_ClaimAccepted: + fallthrough default: return nil, fmt.Errorf("invalid monitored event type: %v", event) } @@ -517,6 +537,7 @@ func (r *PostgresRepository) ListApplications( table.Application.LastEpochCheckBlock, table.Application.LastInputCheckBlock, table.Application.LastOutputCheckBlock, + table.Application.LastTournamentCheckBlock, table.Application.ProcessedInputs, table.Application.CreatedAt, table.Application.UpdatedAt, @@ -605,6 +626,7 @@ func (r *PostgresRepository) ListApplications( &app.LastEpochCheckBlock, &app.LastInputCheckBlock, &app.LastOutputCheckBlock, + &app.LastTournamentCheckBlock, &app.ProcessedInputs, &app.CreatedAt, &app.UpdatedAt, diff --git a/internal/repository/postgres/bulk.go b/internal/repository/postgres/bulk.go index e15bea89b..63cbe0cc3 100644 --- a/internal/repository/postgres/bulk.go +++ b/internal/repository/postgres/bulk.go @@ -101,6 +101,33 @@ func getReportNextIndex( return currentIndex, nil } +func getStateHashNextIndex( + ctx context.Context, + tx pgx.Tx, + appID int64, + epochIndex uint64, +) (uint64, error) { + + query := table.StateHashes.SELECT( + postgres.COALESCE( + postgres.Float(1).ADD(postgres.MAXf(table.StateHashes.Index)), + postgres.Float(0), + ), + ).WHERE( + table.StateHashes.InputEpochApplicationID.EQ(postgres.Int64(appID)). + AND(table.StateHashes.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))), + ) + + queryStr, args := query.Sql() + var currentIndex uint64 + err := tx.QueryRow(ctx, queryStr, args...).Scan(¤tIndex) + if err != nil { + err = fmt.Errorf("failed to get the next state hash index: %w", err) + return 0, errors.Join(err, tx.Rollback(ctx)) + } + return currentIndex, nil +} + func insertOutputs( ctx context.Context, tx pgx.Tx, @@ -179,6 +206,59 @@ func insertReports( return nil } +func insertStateHashes( + ctx context.Context, + tx pgx.Tx, + appID int64, + epochIndex uint64, + inputIndex uint64, + hashes [][32]byte, + machineHash common.Hash, + remainingMetaCycles uint64, +) error { + + nextIndex, err := getStateHashNextIndex(ctx, tx, appID, epochIndex) + if err != nil { + return err + } + + stmt := table.StateHashes.INSERT( + table.StateHashes.InputEpochApplicationID, + table.StateHashes.EpochIndex, + table.StateHashes.InputIndex, + table.StateHashes.Index, + table.StateHashes.MachineHash, + table.StateHashes.Repetitions, + ) + + for i, h := range hashes { + stmt = stmt.VALUES( + appID, + epochIndex, + inputIndex, + nextIndex+uint64(i), + h[:], + 1, + ) + } + + stmt = stmt.VALUES( + appID, + epochIndex, + inputIndex, + nextIndex+uint64(len(hashes)), + machineHash[:], + remainingMetaCycles, + ) + + sqlStr, args := stmt.Sql() + _, err = tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + func updateInput( ctx context.Context, tx pgx.Tx, @@ -267,7 +347,14 @@ func (r *PostgresRepository) StoreAdvanceResult( } } - err = updateInput(ctx, tx, appID, res.InputIndex, res.Status, res.OutputsHash, *res.MachineHash) + if res.IsDaveConsensus { + err = insertStateHashes(ctx, tx, appID, res.EpochIndex, res.InputIndex, res.Hashes, res.MachineHash, res.RemainingMetaCycles) + if err != nil { + return err + } + } + + err = updateInput(ctx, tx, appID, res.InputIndex, res.Status, res.OutputsHash, res.MachineHash) if err != nil { return err } @@ -295,11 +382,13 @@ func updateEpochClaim( UPDATE( table.Epoch.MachineHash, table.Epoch.ClaimHash, + table.Epoch.Commitment, table.Epoch.Status, ). SET( e.MachineHash, e.ClaimHash, + e.Commitment, postgres.NewEnumValue(model.EpochStatus_ClaimComputed.String()), ). WHERE( @@ -400,26 +489,223 @@ func (r *PostgresRepository) StoreClaimAndProofs(ctx context.Context, epoch *mod return nil } -func (r *PostgresRepository) UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error { - updStmt := table.Input. +func insertCommitments(ctx context.Context, tx pgx.Tx, appID int64, commitments []*model.Commitment) error { + if len(commitments) < 1 { + return nil + } + + stmt := table.Commitments.INSERT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + ) + for _, c := range commitments { + stmt = stmt.VALUES( + appID, + c.EpochIndex, + c.TournamentAddress, + c.Commitment, + c.FinalStateHash, + c.SubmitterAddress, + c.BlockNumber, + c.TxHash, + ) + } + + sqlStr, args := stmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func insertMatches(ctx context.Context, tx pgx.Tx, appID int64, matches []*model.Match) error { + if len(matches) < 1 { + return nil + } + + stmt := table.Matches.INSERT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ) + for _, m := range matches { + stmt = stmt.VALUES( + appID, + m.EpochIndex, + m.TournamentAddress, + m.IDHash, + m.CommitmentOne, + m.CommitmentTwo, + m.LeftOfTwo, + m.BlockNumber, + m.TxHash, + m.Winner, + m.DeletionReason, + m.DeletionBlockNumber, + m.DeletionTxHash, + ) + } + + sqlStr, args := stmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func insertMatchAdvanced(ctx context.Context, tx pgx.Tx, appID int64, matchAdvanced []*model.MatchAdvanced) error { + if len(matchAdvanced) < 1 { + return nil + } + + stmt := table.MatchAdvances.INSERT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + ) + for _, ma := range matchAdvanced { + stmt = stmt.VALUES( + appID, + ma.EpochIndex, + ma.TournamentAddress, + ma.IDHash, + ma.OtherParent, + ma.LeftNode, + ma.BlockNumber, + ma.TxHash, + ) + } + + sqlStr, args := stmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func updateMatches(ctx context.Context, tx pgx.Tx, appID int64, matches []*model.Match) error { + for _, m := range matches { + updStmt := table.Matches.UPDATE( + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ).SET( + m.Winner, + m.DeletionReason, + m.DeletionBlockNumber, + m.DeletionTxHash, + ).WHERE( + table.Matches.ApplicationID.EQ(postgres.Int64(appID)). + AND(table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)))). + AND(table.Matches.TournamentAddress.EQ(postgres.Bytea(m.TournamentAddress.Bytes()))). + AND(table.Matches.IDHash.EQ(postgres.Bytea(m.IDHash.Bytes()))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + if cmd.RowsAffected() == 0 { + return errors.Join( + fmt.Errorf("no match found for update: app %d, epoch %d, tournament %s, idHash %s", m.ApplicationID, m.EpochIndex, m.TournamentAddress.Hex(), m.IDHash.Hex()), + tx.Rollback(ctx), + ) + } + } + return nil +} + +func updateLastProcessedBlock(ctx context.Context, tx pgx.Tx, appID int64, lastProcessedBlock uint64) error { + lastBlock := postgres.RawFloat(fmt.Sprintf("%d", lastProcessedBlock)) + appUpdateStmt := table.Application. UPDATE( - table.Input.SnapshotURI, + table.Application.LastTournamentCheckBlock, ). SET( - snapshotURI, + lastBlock, ). - WHERE( - table.Input.EpochApplicationID.EQ(postgres.Int64(appId)). - AND(table.Input.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", inputIndex)))), - ) + WHERE(postgres.AND( + table.Application.ID.EQ(postgres.Int64(appID)), + table.Application.LastTournamentCheckBlock.LT(lastBlock), + )) - sqlStr, args := updStmt.Sql() - cmd, err := r.db.Exec(ctx, sqlStr, args...) + sqlStr, args := appUpdateStmt.Sql() + _, err := tx.Exec(ctx, sqlStr, args...) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil +} + +func (r *PostgresRepository) StoreTournamentEvents( + ctx context.Context, + appID int64, + commitments []*model.Commitment, + matches []*model.Match, + matchAdvanced []*model.MatchAdvanced, + matchDeleted []*model.Match, + lastProcessedBlock uint64, +) error { + tx, err := r.db.Begin(ctx) if err != nil { return err } - if cmd.RowsAffected() == 0 { - return fmt.Errorf("no input found with appId %d and index %d", appId, inputIndex) + + err = insertCommitments(ctx, tx, appID, commitments) + if err != nil { + return err + } + + err = insertMatches(ctx, tx, appID, matches) + if err != nil { + return err + } + + err = insertMatchAdvanced(ctx, tx, appID, matchAdvanced) + if err != nil { + return err + } + + err = updateMatches(ctx, tx, appID, matchDeleted) + if err != nil { + return err } + + err = updateLastProcessedBlock(ctx, tx, appID, lastProcessedBlock) + if err != nil { + return err + } + + err = tx.Commit(ctx) + if err != nil { + return errors.Join(err, tx.Rollback(ctx)) + } + return nil } diff --git a/internal/repository/postgres/commitment.go b/internal/repository/postgres/commitment.go new file mode 100644 index 000000000..7211686f9 --- /dev/null +++ b/internal/repository/postgres/commitment.go @@ -0,0 +1,226 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "encoding/hex" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ CommitmentRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateCommitment( + ctx context.Context, + nameOrAddress string, + c *model.Commitment, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", c.EpochIndex)), + postgres.Bytea(c.TournamentAddress.Bytes()), + postgres.Bytea(c.Commitment.Bytes()), + postgres.Bytea(c.FinalStateHash.Bytes()), + postgres.Bytea(c.SubmitterAddress.Bytes()), + postgres.RawFloat(fmt.Sprintf("%d", c.BlockNumber)), + postgres.Bytea(c.TxHash.Bytes()), + ).WHERE( + whereClause, + ) + + insertStmt := table.Commitments.INSERT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) GetCommitment( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + commitmentHex string, +) (*model.Commitment, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddr := common.HexToAddress(tournamentAddress) + commitment, err := hex.DecodeString(commitmentHex) + if err != nil { + return nil, fmt.Errorf("invalid commitment hex: %w", err) + } + + sel := table.Commitments. + SELECT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + table.Commitments.CreatedAt, + table.Commitments.UpdatedAt, + ). + FROM( + table.Commitments. + INNER_JOIN(table.Application, + table.Commitments.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Commitments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))). + AND(table.Commitments.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))). + AND(table.Commitments.Commitment.EQ(postgres.Bytea(commitment))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var c model.Commitment + err = row.Scan( + &c.ApplicationID, + &c.EpochIndex, + &c.TournamentAddress, + &c.Commitment, + &c.FinalStateHash, + &c.SubmitterAddress, + &c.BlockNumber, + &c.TxHash, + &c.CreatedAt, + &c.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &c, nil +} + +func (r *PostgresRepository) ListCommitments( + ctx context.Context, + nameOrAddress string, + f repository.CommitmentFilter, + p repository.Pagination, + descending bool, +) ([]*model.Commitment, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.Commitments. + SELECT( + table.Commitments.ApplicationID, + table.Commitments.EpochIndex, + table.Commitments.TournamentAddress, + table.Commitments.Commitment, + table.Commitments.FinalStateHash, + table.Commitments.SubmitterAddress, + table.Commitments.BlockNumber, + table.Commitments.TxHash, + table.Commitments.CreatedAt, + table.Commitments.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.Commitments. + INNER_JOIN(table.Application, + table.Commitments.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.Commitments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + if f.TournamentAddress != nil { + tournamentAddr := common.HexToAddress(*f.TournamentAddress) + conditions = append(conditions, table.Commitments.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.Commitments.EpochIndex.DESC()) + } else { + sel = sel.ORDER_BY(table.Commitments.EpochIndex.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var commitments []*model.Commitment + var total uint64 + for rows.Next() { + var c model.Commitment + err := rows.Scan( + &c.ApplicationID, + &c.EpochIndex, + &c.TournamentAddress, + &c.Commitment, + &c.FinalStateHash, + &c.SubmitterAddress, + &c.BlockNumber, + &c.TxHash, + &c.CreatedAt, + &c.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + commitments = append(commitments, &c) + } + + return commitments, total, nil +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go b/internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go new file mode 100644 index 000000000..45b52d52b --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/matchdeletionreason.go @@ -0,0 +1,22 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var MatchDeletionReason = &struct { + Step postgres.StringExpression + Timeout postgres.StringExpression + ChildTournament postgres.StringExpression + NotDeleted postgres.StringExpression +}{ + Step: postgres.NewEnumValue("STEP"), + Timeout: postgres.NewEnumValue("TIMEOUT"), + ChildTournament: postgres.NewEnumValue("CHILD_TOURNAMENT"), + NotDeleted: postgres.NewEnumValue("NOT_DELETED"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go b/internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go new file mode 100644 index 000000000..29be1aa40 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/enum/winnercommitment.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var WinnerCommitment = &struct { + None postgres.StringExpression + One postgres.StringExpression + Two postgres.StringExpression +}{ + None: postgres.NewEnumValue("NONE"), + One: postgres.NewEnumValue("ONE"), + Two: postgres.NewEnumValue("TWO"), +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/application.go b/internal/repository/postgres/db/rollupsdb/public/table/application.go index 8e77da146..851f7cd6b 100644 --- a/internal/repository/postgres/db/rollupsdb/public/table/application.go +++ b/internal/repository/postgres/db/rollupsdb/public/table/application.go @@ -17,25 +17,26 @@ type applicationTable struct { postgres.Table // Columns - ID postgres.ColumnInteger - Name postgres.ColumnString - IapplicationAddress postgres.ColumnString - IconsensusAddress postgres.ColumnString - IinputboxAddress postgres.ColumnString - IinputboxBlock postgres.ColumnFloat - TemplateHash postgres.ColumnString - TemplateURI postgres.ColumnString - EpochLength postgres.ColumnFloat - DataAvailability postgres.ColumnString - ConsensusType postgres.ColumnString - State postgres.ColumnString - Reason postgres.ColumnString - LastEpochCheckBlock postgres.ColumnFloat - LastInputCheckBlock postgres.ColumnFloat - LastOutputCheckBlock postgres.ColumnFloat - ProcessedInputs postgres.ColumnFloat - CreatedAt postgres.ColumnTimestampz - UpdatedAt postgres.ColumnTimestampz + ID postgres.ColumnInteger + Name postgres.ColumnString + IapplicationAddress postgres.ColumnString + IconsensusAddress postgres.ColumnString + IinputboxAddress postgres.ColumnString + IinputboxBlock postgres.ColumnFloat + TemplateHash postgres.ColumnString + TemplateURI postgres.ColumnString + EpochLength postgres.ColumnFloat + DataAvailability postgres.ColumnString + ConsensusType postgres.ColumnString + State postgres.ColumnString + Reason postgres.ColumnString + LastEpochCheckBlock postgres.ColumnFloat + LastInputCheckBlock postgres.ColumnFloat + LastOutputCheckBlock postgres.ColumnFloat + LastTournamentCheckBlock postgres.ColumnFloat + ProcessedInputs postgres.ColumnFloat + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz AllColumns postgres.ColumnList MutableColumns postgres.ColumnList @@ -76,52 +77,54 @@ func newApplicationTable(schemaName, tableName, alias string) *ApplicationTable func newApplicationTableImpl(schemaName, tableName, alias string) applicationTable { var ( - IDColumn = postgres.IntegerColumn("id") - NameColumn = postgres.StringColumn("name") - IapplicationAddressColumn = postgres.StringColumn("iapplication_address") - IconsensusAddressColumn = postgres.StringColumn("iconsensus_address") - IinputboxAddressColumn = postgres.StringColumn("iinputbox_address") - IinputboxBlockColumn = postgres.FloatColumn("iinputbox_block") - TemplateHashColumn = postgres.StringColumn("template_hash") - TemplateURIColumn = postgres.StringColumn("template_uri") - EpochLengthColumn = postgres.FloatColumn("epoch_length") - DataAvailabilityColumn = postgres.StringColumn("data_availability") - ConsensusTypeColumn = postgres.StringColumn("consensus_type") - StateColumn = postgres.StringColumn("state") - ReasonColumn = postgres.StringColumn("reason") - LastEpochCheckBlockColumn = postgres.FloatColumn("last_epoch_check_block") - LastInputCheckBlockColumn = postgres.FloatColumn("last_input_check_block") - LastOutputCheckBlockColumn = postgres.FloatColumn("last_output_check_block") - ProcessedInputsColumn = postgres.FloatColumn("processed_inputs") - CreatedAtColumn = postgres.TimestampzColumn("created_at") - UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - allColumns = postgres.ColumnList{IDColumn, NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, ConsensusTypeColumn, StateColumn, ReasonColumn, LastEpochCheckBlockColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} - mutableColumns = postgres.ColumnList{NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, ConsensusTypeColumn, StateColumn, ReasonColumn, LastEpochCheckBlockColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} + IDColumn = postgres.IntegerColumn("id") + NameColumn = postgres.StringColumn("name") + IapplicationAddressColumn = postgres.StringColumn("iapplication_address") + IconsensusAddressColumn = postgres.StringColumn("iconsensus_address") + IinputboxAddressColumn = postgres.StringColumn("iinputbox_address") + IinputboxBlockColumn = postgres.FloatColumn("iinputbox_block") + TemplateHashColumn = postgres.StringColumn("template_hash") + TemplateURIColumn = postgres.StringColumn("template_uri") + EpochLengthColumn = postgres.FloatColumn("epoch_length") + DataAvailabilityColumn = postgres.StringColumn("data_availability") + ConsensusTypeColumn = postgres.StringColumn("consensus_type") + StateColumn = postgres.StringColumn("state") + ReasonColumn = postgres.StringColumn("reason") + LastEpochCheckBlockColumn = postgres.FloatColumn("last_epoch_check_block") + LastInputCheckBlockColumn = postgres.FloatColumn("last_input_check_block") + LastOutputCheckBlockColumn = postgres.FloatColumn("last_output_check_block") + LastTournamentCheckBlockColumn = postgres.FloatColumn("last_tournament_check_block") + ProcessedInputsColumn = postgres.FloatColumn("processed_inputs") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{IDColumn, NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, ConsensusTypeColumn, StateColumn, ReasonColumn, LastEpochCheckBlockColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, LastTournamentCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{NameColumn, IapplicationAddressColumn, IconsensusAddressColumn, IinputboxAddressColumn, IinputboxBlockColumn, TemplateHashColumn, TemplateURIColumn, EpochLengthColumn, DataAvailabilityColumn, ConsensusTypeColumn, StateColumn, ReasonColumn, LastEpochCheckBlockColumn, LastInputCheckBlockColumn, LastOutputCheckBlockColumn, LastTournamentCheckBlockColumn, ProcessedInputsColumn, CreatedAtColumn, UpdatedAtColumn} ) return applicationTable{ Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), //Columns - ID: IDColumn, - Name: NameColumn, - IapplicationAddress: IapplicationAddressColumn, - IconsensusAddress: IconsensusAddressColumn, - IinputboxAddress: IinputboxAddressColumn, - IinputboxBlock: IinputboxBlockColumn, - TemplateHash: TemplateHashColumn, - TemplateURI: TemplateURIColumn, - EpochLength: EpochLengthColumn, - DataAvailability: DataAvailabilityColumn, - ConsensusType: ConsensusTypeColumn, - State: StateColumn, - Reason: ReasonColumn, - LastEpochCheckBlock: LastEpochCheckBlockColumn, - LastInputCheckBlock: LastInputCheckBlockColumn, - LastOutputCheckBlock: LastOutputCheckBlockColumn, - ProcessedInputs: ProcessedInputsColumn, - CreatedAt: CreatedAtColumn, - UpdatedAt: UpdatedAtColumn, + ID: IDColumn, + Name: NameColumn, + IapplicationAddress: IapplicationAddressColumn, + IconsensusAddress: IconsensusAddressColumn, + IinputboxAddress: IinputboxAddressColumn, + IinputboxBlock: IinputboxBlockColumn, + TemplateHash: TemplateHashColumn, + TemplateURI: TemplateURIColumn, + EpochLength: EpochLengthColumn, + DataAvailability: DataAvailabilityColumn, + ConsensusType: ConsensusTypeColumn, + State: StateColumn, + Reason: ReasonColumn, + LastEpochCheckBlock: LastEpochCheckBlockColumn, + LastInputCheckBlock: LastInputCheckBlockColumn, + LastOutputCheckBlock: LastOutputCheckBlockColumn, + LastTournamentCheckBlock: LastTournamentCheckBlockColumn, + ProcessedInputs: ProcessedInputsColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, AllColumns: allColumns, MutableColumns: mutableColumns, diff --git a/internal/repository/postgres/db/rollupsdb/public/table/commitments.go b/internal/repository/postgres/db/rollupsdb/public/table/commitments.go new file mode 100644 index 000000000..b67772afa --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/commitments.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Commitments = newCommitmentsTable("public", "commitments", "") + +type commitmentsTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + TournamentAddress postgres.ColumnString + Commitment postgres.ColumnString + FinalStateHash postgres.ColumnString + SubmitterAddress postgres.ColumnString + BlockNumber postgres.ColumnFloat + TxHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type CommitmentsTable struct { + commitmentsTable + + EXCLUDED commitmentsTable +} + +// AS creates new CommitmentsTable with assigned alias +func (a CommitmentsTable) AS(alias string) *CommitmentsTable { + return newCommitmentsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new CommitmentsTable with assigned schema name +func (a CommitmentsTable) FromSchema(schemaName string) *CommitmentsTable { + return newCommitmentsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new CommitmentsTable with assigned table prefix +func (a CommitmentsTable) WithPrefix(prefix string) *CommitmentsTable { + return newCommitmentsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new CommitmentsTable with assigned table suffix +func (a CommitmentsTable) WithSuffix(suffix string) *CommitmentsTable { + return newCommitmentsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newCommitmentsTable(schemaName, tableName, alias string) *CommitmentsTable { + return &CommitmentsTable{ + commitmentsTable: newCommitmentsTableImpl(schemaName, tableName, alias), + EXCLUDED: newCommitmentsTableImpl("", "excluded", ""), + } +} + +func newCommitmentsTableImpl(schemaName, tableName, alias string) commitmentsTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + TournamentAddressColumn = postgres.StringColumn("tournament_address") + CommitmentColumn = postgres.StringColumn("commitment") + FinalStateHashColumn = postgres.StringColumn("final_state_hash") + SubmitterAddressColumn = postgres.StringColumn("submitter_address") + BlockNumberColumn = postgres.FloatColumn("block_number") + TxHashColumn = postgres.StringColumn("tx_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, TournamentAddressColumn, CommitmentColumn, FinalStateHashColumn, SubmitterAddressColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{FinalStateHashColumn, SubmitterAddressColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return commitmentsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + TournamentAddress: TournamentAddressColumn, + Commitment: CommitmentColumn, + FinalStateHash: FinalStateHashColumn, + SubmitterAddress: SubmitterAddressColumn, + BlockNumber: BlockNumberColumn, + TxHash: TxHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/epoch.go b/internal/repository/postgres/db/rollupsdb/public/table/epoch.go index 57d207a26..3df99bcb9 100644 --- a/internal/repository/postgres/db/rollupsdb/public/table/epoch.go +++ b/internal/repository/postgres/db/rollupsdb/public/table/epoch.go @@ -26,6 +26,7 @@ type epochTable struct { MachineHash postgres.ColumnString ClaimHash postgres.ColumnString ClaimTransactionHash postgres.ColumnString + Commitment postgres.ColumnString TournamentAddress postgres.ColumnString Status postgres.ColumnString VirtualIndex postgres.ColumnFloat @@ -80,13 +81,14 @@ func newEpochTableImpl(schemaName, tableName, alias string) epochTable { MachineHashColumn = postgres.StringColumn("machine_hash") ClaimHashColumn = postgres.StringColumn("claim_hash") ClaimTransactionHashColumn = postgres.StringColumn("claim_transaction_hash") + CommitmentColumn = postgres.StringColumn("commitment") TournamentAddressColumn = postgres.StringColumn("tournament_address") StatusColumn = postgres.StringColumn("status") VirtualIndexColumn = postgres.FloatColumn("virtual_index") CreatedAtColumn = postgres.TimestampzColumn("created_at") UpdatedAtColumn = postgres.TimestampzColumn("updated_at") - allColumns = postgres.ColumnList{ApplicationIDColumn, IndexColumn, FirstBlockColumn, LastBlockColumn, InputIndexLowerBoundColumn, InputIndexUpperBoundColumn, MachineHashColumn, ClaimHashColumn, ClaimTransactionHashColumn, TournamentAddressColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} - mutableColumns = postgres.ColumnList{FirstBlockColumn, LastBlockColumn, InputIndexLowerBoundColumn, InputIndexUpperBoundColumn, MachineHashColumn, ClaimHashColumn, ClaimTransactionHashColumn, TournamentAddressColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} + allColumns = postgres.ColumnList{ApplicationIDColumn, IndexColumn, FirstBlockColumn, LastBlockColumn, InputIndexLowerBoundColumn, InputIndexUpperBoundColumn, MachineHashColumn, ClaimHashColumn, ClaimTransactionHashColumn, CommitmentColumn, TournamentAddressColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{FirstBlockColumn, LastBlockColumn, InputIndexLowerBoundColumn, InputIndexUpperBoundColumn, MachineHashColumn, ClaimHashColumn, ClaimTransactionHashColumn, CommitmentColumn, TournamentAddressColumn, StatusColumn, VirtualIndexColumn, CreatedAtColumn, UpdatedAtColumn} ) return epochTable{ @@ -102,6 +104,7 @@ func newEpochTableImpl(schemaName, tableName, alias string) epochTable { MachineHash: MachineHashColumn, ClaimHash: ClaimHashColumn, ClaimTransactionHash: ClaimTransactionHashColumn, + Commitment: CommitmentColumn, TournamentAddress: TournamentAddressColumn, Status: StatusColumn, VirtualIndex: VirtualIndexColumn, diff --git a/internal/repository/postgres/db/rollupsdb/public/table/match_advances.go b/internal/repository/postgres/db/rollupsdb/public/table/match_advances.go new file mode 100644 index 000000000..4c1340e12 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/match_advances.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var MatchAdvances = newMatchAdvancesTable("public", "match_advances", "") + +type matchAdvancesTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + TournamentAddress postgres.ColumnString + IDHash postgres.ColumnString + OtherParent postgres.ColumnString + LeftNode postgres.ColumnString + BlockNumber postgres.ColumnFloat + TxHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MatchAdvancesTable struct { + matchAdvancesTable + + EXCLUDED matchAdvancesTable +} + +// AS creates new MatchAdvancesTable with assigned alias +func (a MatchAdvancesTable) AS(alias string) *MatchAdvancesTable { + return newMatchAdvancesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MatchAdvancesTable with assigned schema name +func (a MatchAdvancesTable) FromSchema(schemaName string) *MatchAdvancesTable { + return newMatchAdvancesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MatchAdvancesTable with assigned table prefix +func (a MatchAdvancesTable) WithPrefix(prefix string) *MatchAdvancesTable { + return newMatchAdvancesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MatchAdvancesTable with assigned table suffix +func (a MatchAdvancesTable) WithSuffix(suffix string) *MatchAdvancesTable { + return newMatchAdvancesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMatchAdvancesTable(schemaName, tableName, alias string) *MatchAdvancesTable { + return &MatchAdvancesTable{ + matchAdvancesTable: newMatchAdvancesTableImpl(schemaName, tableName, alias), + EXCLUDED: newMatchAdvancesTableImpl("", "excluded", ""), + } +} + +func newMatchAdvancesTableImpl(schemaName, tableName, alias string) matchAdvancesTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + TournamentAddressColumn = postgres.StringColumn("tournament_address") + IDHashColumn = postgres.StringColumn("id_hash") + OtherParentColumn = postgres.StringColumn("other_parent") + LeftNodeColumn = postgres.StringColumn("left_node") + BlockNumberColumn = postgres.FloatColumn("block_number") + TxHashColumn = postgres.StringColumn("tx_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, TournamentAddressColumn, IDHashColumn, OtherParentColumn, LeftNodeColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{LeftNodeColumn, BlockNumberColumn, TxHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return matchAdvancesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + TournamentAddress: TournamentAddressColumn, + IDHash: IDHashColumn, + OtherParent: OtherParentColumn, + LeftNode: LeftNodeColumn, + BlockNumber: BlockNumberColumn, + TxHash: TxHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/matches.go b/internal/repository/postgres/db/rollupsdb/public/table/matches.go new file mode 100644 index 000000000..dfdaa2e22 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/matches.go @@ -0,0 +1,117 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Matches = newMatchesTable("public", "matches", "") + +type matchesTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + TournamentAddress postgres.ColumnString + IDHash postgres.ColumnString + CommitmentOne postgres.ColumnString + CommitmentTwo postgres.ColumnString + LeftOfTwo postgres.ColumnString + BlockNumber postgres.ColumnFloat + TxHash postgres.ColumnString + Winner postgres.ColumnString + DeletionReason postgres.ColumnString + DeletionBlockNumber postgres.ColumnFloat + DeletionTxHash postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MatchesTable struct { + matchesTable + + EXCLUDED matchesTable +} + +// AS creates new MatchesTable with assigned alias +func (a MatchesTable) AS(alias string) *MatchesTable { + return newMatchesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MatchesTable with assigned schema name +func (a MatchesTable) FromSchema(schemaName string) *MatchesTable { + return newMatchesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MatchesTable with assigned table prefix +func (a MatchesTable) WithPrefix(prefix string) *MatchesTable { + return newMatchesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MatchesTable with assigned table suffix +func (a MatchesTable) WithSuffix(suffix string) *MatchesTable { + return newMatchesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMatchesTable(schemaName, tableName, alias string) *MatchesTable { + return &MatchesTable{ + matchesTable: newMatchesTableImpl(schemaName, tableName, alias), + EXCLUDED: newMatchesTableImpl("", "excluded", ""), + } +} + +func newMatchesTableImpl(schemaName, tableName, alias string) matchesTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + TournamentAddressColumn = postgres.StringColumn("tournament_address") + IDHashColumn = postgres.StringColumn("id_hash") + CommitmentOneColumn = postgres.StringColumn("commitment_one") + CommitmentTwoColumn = postgres.StringColumn("commitment_two") + LeftOfTwoColumn = postgres.StringColumn("left_of_two") + BlockNumberColumn = postgres.FloatColumn("block_number") + TxHashColumn = postgres.StringColumn("tx_hash") + WinnerColumn = postgres.StringColumn("winner") + DeletionReasonColumn = postgres.StringColumn("deletion_reason") + DeletionBlockNumberColumn = postgres.FloatColumn("deletion_block_number") + DeletionTxHashColumn = postgres.StringColumn("deletion_tx_hash") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, TournamentAddressColumn, IDHashColumn, CommitmentOneColumn, CommitmentTwoColumn, LeftOfTwoColumn, BlockNumberColumn, TxHashColumn, WinnerColumn, DeletionReasonColumn, DeletionBlockNumberColumn, DeletionTxHashColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{CommitmentOneColumn, CommitmentTwoColumn, LeftOfTwoColumn, BlockNumberColumn, TxHashColumn, WinnerColumn, DeletionReasonColumn, DeletionBlockNumberColumn, DeletionTxHashColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return matchesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + TournamentAddress: TournamentAddressColumn, + IDHash: IDHashColumn, + CommitmentOne: CommitmentOneColumn, + CommitmentTwo: CommitmentTwoColumn, + LeftOfTwo: LeftOfTwoColumn, + BlockNumber: BlockNumberColumn, + TxHash: TxHashColumn, + Winner: WinnerColumn, + DeletionReason: DeletionReasonColumn, + DeletionBlockNumber: DeletionBlockNumberColumn, + DeletionTxHash: DeletionTxHashColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go b/internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go new file mode 100644 index 000000000..3ad32cd01 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/state_hashes.go @@ -0,0 +1,96 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var StateHashes = newStateHashesTable("public", "state_hashes", "") + +type stateHashesTable struct { + postgres.Table + + // Columns + InputEpochApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + InputIndex postgres.ColumnFloat + Index postgres.ColumnFloat + MachineHash postgres.ColumnString + Repetitions postgres.ColumnInteger + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type StateHashesTable struct { + stateHashesTable + + EXCLUDED stateHashesTable +} + +// AS creates new StateHashesTable with assigned alias +func (a StateHashesTable) AS(alias string) *StateHashesTable { + return newStateHashesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new StateHashesTable with assigned schema name +func (a StateHashesTable) FromSchema(schemaName string) *StateHashesTable { + return newStateHashesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new StateHashesTable with assigned table prefix +func (a StateHashesTable) WithPrefix(prefix string) *StateHashesTable { + return newStateHashesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new StateHashesTable with assigned table suffix +func (a StateHashesTable) WithSuffix(suffix string) *StateHashesTable { + return newStateHashesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newStateHashesTable(schemaName, tableName, alias string) *StateHashesTable { + return &StateHashesTable{ + stateHashesTable: newStateHashesTableImpl(schemaName, tableName, alias), + EXCLUDED: newStateHashesTableImpl("", "excluded", ""), + } +} + +func newStateHashesTableImpl(schemaName, tableName, alias string) stateHashesTable { + var ( + InputEpochApplicationIDColumn = postgres.IntegerColumn("input_epoch_application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + InputIndexColumn = postgres.FloatColumn("input_index") + IndexColumn = postgres.FloatColumn("index") + MachineHashColumn = postgres.StringColumn("machine_hash") + RepetitionsColumn = postgres.IntegerColumn("repetitions") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{InputEpochApplicationIDColumn, EpochIndexColumn, InputIndexColumn, IndexColumn, MachineHashColumn, RepetitionsColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{InputIndexColumn, MachineHashColumn, RepetitionsColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return stateHashesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + InputEpochApplicationID: InputEpochApplicationIDColumn, + EpochIndex: EpochIndexColumn, + InputIndex: InputIndexColumn, + Index: IndexColumn, + MachineHash: MachineHashColumn, + Repetitions: RepetitionsColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go b/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go index 528eff35b..9865eb4cd 100644 --- a/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go +++ b/internal/repository/postgres/db/rollupsdb/public/table/table_use_schema.go @@ -11,11 +11,16 @@ package table // this method only once at the beginning of the program. func UseSchema(schema string) { Application = Application.FromSchema(schema) + Commitments = Commitments.FromSchema(schema) Epoch = Epoch.FromSchema(schema) ExecutionParameters = ExecutionParameters.FromSchema(schema) Input = Input.FromSchema(schema) + MatchAdvances = MatchAdvances.FromSchema(schema) + Matches = Matches.FromSchema(schema) NodeConfig = NodeConfig.FromSchema(schema) Output = Output.FromSchema(schema) Report = Report.FromSchema(schema) SchemaMigrations = SchemaMigrations.FromSchema(schema) + StateHashes = StateHashes.FromSchema(schema) + Tournaments = Tournaments.FromSchema(schema) } diff --git a/internal/repository/postgres/db/rollupsdb/public/table/tournaments.go b/internal/repository/postgres/db/rollupsdb/public/table/tournaments.go new file mode 100644 index 000000000..11f815079 --- /dev/null +++ b/internal/repository/postgres/db/rollupsdb/public/table/tournaments.go @@ -0,0 +1,114 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Tournaments = newTournamentsTable("public", "tournaments", "") + +type tournamentsTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnInteger + EpochIndex postgres.ColumnFloat + Address postgres.ColumnString + ParentTournamentAddress postgres.ColumnString + ParentMatchIDHash postgres.ColumnString + MaxLevel postgres.ColumnInteger + Level postgres.ColumnInteger + Log2step postgres.ColumnInteger + Height postgres.ColumnInteger + WinnerCommitment postgres.ColumnString + FinalStateHash postgres.ColumnString + FinishedAtBlock postgres.ColumnFloat + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type TournamentsTable struct { + tournamentsTable + + EXCLUDED tournamentsTable +} + +// AS creates new TournamentsTable with assigned alias +func (a TournamentsTable) AS(alias string) *TournamentsTable { + return newTournamentsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new TournamentsTable with assigned schema name +func (a TournamentsTable) FromSchema(schemaName string) *TournamentsTable { + return newTournamentsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new TournamentsTable with assigned table prefix +func (a TournamentsTable) WithPrefix(prefix string) *TournamentsTable { + return newTournamentsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new TournamentsTable with assigned table suffix +func (a TournamentsTable) WithSuffix(suffix string) *TournamentsTable { + return newTournamentsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newTournamentsTable(schemaName, tableName, alias string) *TournamentsTable { + return &TournamentsTable{ + tournamentsTable: newTournamentsTableImpl(schemaName, tableName, alias), + EXCLUDED: newTournamentsTableImpl("", "excluded", ""), + } +} + +func newTournamentsTableImpl(schemaName, tableName, alias string) tournamentsTable { + var ( + ApplicationIDColumn = postgres.IntegerColumn("application_id") + EpochIndexColumn = postgres.FloatColumn("epoch_index") + AddressColumn = postgres.StringColumn("address") + ParentTournamentAddressColumn = postgres.StringColumn("parent_tournament_address") + ParentMatchIDHashColumn = postgres.StringColumn("parent_match_id_hash") + MaxLevelColumn = postgres.IntegerColumn("max_level") + LevelColumn = postgres.IntegerColumn("level") + Log2stepColumn = postgres.IntegerColumn("log2step") + HeightColumn = postgres.IntegerColumn("height") + WinnerCommitmentColumn = postgres.StringColumn("winner_commitment") + FinalStateHashColumn = postgres.StringColumn("final_state_hash") + FinishedAtBlockColumn = postgres.FloatColumn("finished_at_block") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, EpochIndexColumn, AddressColumn, ParentTournamentAddressColumn, ParentMatchIDHashColumn, MaxLevelColumn, LevelColumn, Log2stepColumn, HeightColumn, WinnerCommitmentColumn, FinalStateHashColumn, FinishedAtBlockColumn, CreatedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{ParentTournamentAddressColumn, ParentMatchIDHashColumn, MaxLevelColumn, LevelColumn, Log2stepColumn, HeightColumn, WinnerCommitmentColumn, FinalStateHashColumn, FinishedAtBlockColumn, CreatedAtColumn, UpdatedAtColumn} + ) + + return tournamentsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + EpochIndex: EpochIndexColumn, + Address: AddressColumn, + ParentTournamentAddress: ParentTournamentAddressColumn, + ParentMatchIDHash: ParentMatchIDHashColumn, + MaxLevel: MaxLevelColumn, + Level: LevelColumn, + Log2step: Log2stepColumn, + Height: HeightColumn, + WinnerCommitment: WinnerCommitmentColumn, + FinalStateHash: FinalStateHashColumn, + FinishedAtBlock: FinishedAtBlockColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/internal/repository/postgres/epoch.go b/internal/repository/postgres/epoch.go index f7857a906..9b1245708 100644 --- a/internal/repository/postgres/epoch.go +++ b/internal/repository/postgres/epoch.go @@ -212,6 +212,7 @@ func (r *PostgresRepository) GetEpoch( table.Epoch.MachineHash, table.Epoch.ClaimHash, table.Epoch.ClaimTransactionHash, + table.Epoch.Commitment, table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, @@ -243,6 +244,7 @@ func (r *PostgresRepository) GetEpoch( &ep.MachineHash, &ep.ClaimHash, &ep.ClaimTransactionHash, + &ep.Commitment, &ep.TournamentAddress, &ep.Status, &ep.VirtualIndex, @@ -323,6 +325,7 @@ func (r *PostgresRepository) GetLastNonOpenEpoch( table.Epoch.ClaimHash, table.Epoch.ClaimTransactionHash, table.Epoch.TournamentAddress, + table.Epoch.Commitment, table.Epoch.Status, table.Epoch.VirtualIndex, table.Epoch.CreatedAt, @@ -356,6 +359,7 @@ func (r *PostgresRepository) GetLastNonOpenEpoch( &ep.ClaimHash, &ep.ClaimTransactionHash, &ep.TournamentAddress, + &ep.Commitment, &ep.Status, &ep.VirtualIndex, &ep.CreatedAt, @@ -392,6 +396,7 @@ func (r *PostgresRepository) GetEpochByVirtualIndex( table.Epoch.MachineHash, table.Epoch.ClaimHash, table.Epoch.ClaimTransactionHash, + table.Epoch.Commitment, table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, @@ -423,6 +428,7 @@ func (r *PostgresRepository) GetEpochByVirtualIndex( &ep.MachineHash, &ep.ClaimHash, &ep.ClaimTransactionHash, + &ep.Commitment, &ep.TournamentAddress, &ep.Status, &ep.VirtualIndex, @@ -482,6 +488,36 @@ func (r *PostgresRepository) UpdateEpoch( return nil } +func (r *PostgresRepository) UpdateEpochCommitment( + ctx context.Context, + appID int64, + epochIndex uint64, + commitment []byte, +) error { + + updStmt := table.Epoch. + UPDATE( + table.Epoch.Commitment, + ). + SET( + commitment, + ). + WHERE( + table.Epoch.ApplicationID.EQ(postgres.Int64(appID)). + AND(table.Epoch.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + func (r *PostgresRepository) UpdateEpochStatus( ctx context.Context, nameOrAddress string, @@ -630,6 +666,7 @@ func (r *PostgresRepository) ListEpochs( table.Epoch.MachineHash, table.Epoch.ClaimHash, table.Epoch.ClaimTransactionHash, + table.Epoch.Commitment, table.Epoch.TournamentAddress, table.Epoch.Status, table.Epoch.VirtualIndex, @@ -690,6 +727,7 @@ func (r *PostgresRepository) ListEpochs( &ep.MachineHash, &ep.ClaimHash, &ep.ClaimTransactionHash, + &ep.Commitment, &ep.TournamentAddress, &ep.Status, &ep.VirtualIndex, diff --git a/internal/repository/postgres/input.go b/internal/repository/postgres/input.go index ba7f829a6..4048e3dc5 100644 --- a/internal/repository/postgres/input.go +++ b/internal/repository/postgres/input.go @@ -413,3 +413,27 @@ func (r *PostgresRepository) GetNumberOfInputs( } return count, nil } + +func (r *PostgresRepository) UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error { + updStmt := table.Input. + UPDATE( + table.Input.SnapshotURI, + ). + SET( + snapshotURI, + ). + WHERE( + table.Input.EpochApplicationID.EQ(postgres.Int64(appId)). + AND(table.Input.Index.EQ(postgres.RawFloat(fmt.Sprintf("%d", inputIndex)))), + ) + + sqlStr, args := updStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return fmt.Errorf("no input found with appId %d and index %d", appId, inputIndex) + } + return nil +} diff --git a/internal/repository/postgres/match.go b/internal/repository/postgres/match.go new file mode 100644 index 000000000..371f94dd9 --- /dev/null +++ b/internal/repository/postgres/match.go @@ -0,0 +1,298 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ MatchRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateMatch( + ctx context.Context, + nameOrAddress string, + m *model.Match, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)), + postgres.Bytea(m.TournamentAddress.Bytes()), + postgres.Bytea(m.IDHash.Bytes()), + postgres.Bytea(m.CommitmentOne.Bytes()), + postgres.Bytea(m.CommitmentTwo.Bytes()), + postgres.Bytea(m.LeftOfTwo.Bytes()), + postgres.RawFloat(fmt.Sprintf("%d", m.BlockNumber)), + postgres.Bytea(m.TxHash.Bytes()), + postgres.NewEnumValue(m.Winner.String()), + postgres.NewEnumValue(m.DeletionReason.String()), + postgres.RawFloat(fmt.Sprintf("%d", m.DeletionBlockNumber)), + postgres.Bytea(m.DeletionTxHash.Bytes()), + ).WHERE( + whereClause, + ) + + insertStmt := table.Matches.INSERT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) UpdateMatch( + ctx context.Context, + nameOrAddress string, + m *model.Match, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + updateStmt := table.Matches. + UPDATE( + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + ). + SET( + m.Winner, + m.DeletionReason, + m.DeletionBlockNumber, + postgres.Bytea(m.DeletionTxHash.Bytes()), + ). + FROM( + table.Application, + ). + WHERE( + whereClause. + AND(table.Matches.ApplicationID.EQ(postgres.Int(m.ApplicationID))). + AND(table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)))). + AND(table.Matches.TournamentAddress.EQ(postgres.Bytea(m.TournamentAddress.Bytes()))). + AND(table.Matches.IDHash.EQ(postgres.Bytea(m.IDHash.Bytes()))), + ) + + sqlStr, args := updateStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *PostgresRepository) GetMatch( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + idHashHex string, +) (*model.Match, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddr := common.HexToAddress(tournamentAddress) + idHash := common.HexToHash(idHashHex) + + sel := table.Matches. + SELECT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + table.Matches.CreatedAt, + table.Matches.UpdatedAt, + ). + FROM( + table.Matches. + INNER_JOIN(table.Application, + table.Matches.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))). + AND(table.Matches.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))). + AND(table.Matches.IDHash.EQ(postgres.Bytea(idHash.Bytes()))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var m model.Match + err = row.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.CommitmentOne, + &m.CommitmentTwo, + &m.LeftOfTwo, + &m.BlockNumber, + &m.TxHash, + &m.Winner, + &m.DeletionReason, + &m.DeletionBlockNumber, + &m.DeletionTxHash, + &m.CreatedAt, + &m.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &m, nil +} + +func (r *PostgresRepository) ListMatches( + ctx context.Context, + nameOrAddress string, + f repository.MatchFilter, + p repository.Pagination, + descending bool, +) ([]*model.Match, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.Matches. + SELECT( + table.Matches.ApplicationID, + table.Matches.EpochIndex, + table.Matches.TournamentAddress, + table.Matches.IDHash, + table.Matches.CommitmentOne, + table.Matches.CommitmentTwo, + table.Matches.LeftOfTwo, + table.Matches.BlockNumber, + table.Matches.TxHash, + table.Matches.Winner, + table.Matches.DeletionReason, + table.Matches.DeletionBlockNumber, + table.Matches.DeletionTxHash, + table.Matches.CreatedAt, + table.Matches.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.Matches. + INNER_JOIN(table.Application, + table.Matches.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.Matches.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + if f.TournamentAddress != nil { + tournamentAddr := common.HexToAddress(*f.TournamentAddress) + conditions = append(conditions, table.Matches.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.Matches.EpochIndex.DESC()) + } else { + sel = sel.ORDER_BY(table.Matches.EpochIndex.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var matches []*model.Match + var total uint64 + for rows.Next() { + var m model.Match + err := rows.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.CommitmentOne, + &m.CommitmentTwo, + &m.LeftOfTwo, + &m.BlockNumber, + &m.TxHash, + &m.Winner, + &m.DeletionReason, + &m.DeletionBlockNumber, + &m.DeletionTxHash, + &m.CreatedAt, + &m.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + matches = append(matches, &m) + } + + return matches, total, nil +} diff --git a/internal/repository/postgres/match_advanced.go b/internal/repository/postgres/match_advanced.go new file mode 100644 index 000000000..97db8a5ce --- /dev/null +++ b/internal/repository/postgres/match_advanced.go @@ -0,0 +1,231 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "encoding/hex" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ MatchAdvancedRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateMatchAdvanced( + ctx context.Context, + nameOrAddress string, + m *model.MatchAdvanced, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", m.EpochIndex)), + postgres.Bytea(m.TournamentAddress.Bytes()), + postgres.Bytea(m.IDHash.Bytes()), + postgres.Bytea(m.OtherParent.Bytes()), + postgres.Bytea(m.LeftNode.Bytes()), + postgres.RawFloat(fmt.Sprintf("%d", m.BlockNumber)), + postgres.Bytea(m.TxHash.Bytes()), + ).WHERE( + whereClause, + ) + + insertStmt := table.MatchAdvances.INSERT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + ).QUERY( + selectQuery, + ) + + sqlStr, args := insertStmt.Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) GetMatchAdvanced( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + idHashHex string, + parentHex string, +) (*model.MatchAdvanced, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddr := common.HexToAddress(tournamentAddress) + idHash := common.HexToHash(idHashHex) + parent, err := hex.DecodeString(parentHex) + if err != nil { + return nil, fmt.Errorf("invalid parent hex: %w", err) + } + + sel := table.MatchAdvances. + SELECT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + table.MatchAdvances.CreatedAt, + table.MatchAdvances.UpdatedAt, + ). + FROM( + table.MatchAdvances. + INNER_JOIN(table.Application, + table.MatchAdvances.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.MatchAdvances.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))). + AND(table.MatchAdvances.TournamentAddress.EQ(postgres.Bytea(tournamentAddr.Bytes()))). + AND(table.MatchAdvances.IDHash.EQ(postgres.Bytea(idHash.Bytes()))). + AND(table.MatchAdvances.OtherParent.EQ(postgres.Bytea(parent))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var m model.MatchAdvanced + err = row.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.OtherParent, + &m.LeftNode, + &m.BlockNumber, + &m.TxHash, + &m.CreatedAt, + &m.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &m, nil +} + +func (r *PostgresRepository) ListMatchAdvances( + ctx context.Context, + nameOrAddress string, + epochIndex uint64, + tournamentAddress string, + idHashHex string, + p repository.Pagination, + descending bool, +) ([]*model.MatchAdvanced, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.MatchAdvances. + SELECT( + table.MatchAdvances.ApplicationID, + table.MatchAdvances.EpochIndex, + table.MatchAdvances.TournamentAddress, + table.MatchAdvances.IDHash, + table.MatchAdvances.OtherParent, + table.MatchAdvances.LeftNode, + table.MatchAdvances.BlockNumber, + table.MatchAdvances.TxHash, + table.MatchAdvances.CreatedAt, + table.MatchAdvances.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.MatchAdvances. + INNER_JOIN(table.Application, + table.MatchAdvances.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + conditions = append(conditions, table.MatchAdvances.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", epochIndex)))) + + tAddr := common.HexToAddress(tournamentAddress) + conditions = append(conditions, table.MatchAdvances.TournamentAddress.EQ(postgres.Bytea(tAddr.Bytes()))) + + idHash := common.HexToHash(idHashHex) + conditions = append(conditions, table.MatchAdvances.IDHash.EQ(postgres.Bytea(idHash.Bytes()))) + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.MatchAdvances.EpochIndex.DESC()) + } else { + sel = sel.ORDER_BY(table.MatchAdvances.EpochIndex.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var matchAdvances []*model.MatchAdvanced + var total uint64 + for rows.Next() { + var m model.MatchAdvanced + err := rows.Scan( + &m.ApplicationID, + &m.EpochIndex, + &m.TournamentAddress, + &m.IDHash, + &m.OtherParent, + &m.LeftNode, + &m.BlockNumber, + &m.TxHash, + &m.CreatedAt, + &m.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + matchAdvances = append(matchAdvances, &m) + } + + return matchAdvances, total, nil +} diff --git a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql index d97984953..c13aaafcd 100644 --- a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql +++ b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.down.sql @@ -3,6 +3,31 @@ BEGIN; +DROP TRIGGER IF EXISTS "state_hashes_set_updated_at" ON "match_advances"; +DROP TABLE IF EXISTS "state_hashes"; + +DROP TRIGGER IF EXISTS "match_advances_set_updated_at" ON "match_advances"; +DROP INDEX IF EXISTS "match_advances_block_number_idx"; +DROP TABLE IF EXISTS "match_advances"; + +ALTER TABLE "tournaments" DROP CONSTRAINT "tournaments_parent_match_fkey"; + +DROP TRIGGER IF EXISTS "matches_set_updated_at" ON "matches"; +DROP INDEX IF EXISTS "matches_unique_pair_idx"; +DROP INDEX IF EXISTS "matches_app_epoch_tournament_idx"; +DROP TABLE IF EXISTS "matches"; + +DROP TRIGGER IF EXISTS "commitments_set_updated_at" ON "commitments"; +DROP INDEX IF EXISTS "commitments_final_state_idx"; +DROP INDEX IF EXISTS "commitments_app_epoch_tournament_idx"; +DROP TABLE IF EXISTS "commitments"; + +DROP TRIGGER IF EXISTS "tournaments_set_updated_at" ON "tournaments"; +DROP INDEX IF EXISTS "tournaments_parent_match_nonroot_idx"; +DROP INDEX IF EXISTS "unique_root_per_epoch_idx"; +DROP INDEX IF EXISTS "tournaments_epoch_idx"; +DROP TABLE IF EXISTS "tournaments"; + DROP TRIGGER IF EXISTS "node_config_set_updated_at" ON "node_config"; DROP TABLE IF EXISTS "node_config"; @@ -35,6 +60,8 @@ DROP TABLE IF EXISTS "application"; DROP FUNCTION IF EXISTS "update_updated_at_column"; DROP FUNCTION IF EXISTS "check_hash_siblings"; +DROP TYPE IF EXISTS "WinnerCommitment"; +DROP TYPE IF EXISTS "MatchDeletionReason"; DROP TYPE IF EXISTS "Consensus"; DROP TYPE IF EXISTS "SnapshotPolicy"; DROP TYPE IF EXISTS "EpochStatus"; diff --git a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql index 6f3ae2f67..02d8d5e89 100644 --- a/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql +++ b/internal/repository/postgres/schema/migrations/000001_create_initial_schema.up.sql @@ -36,6 +36,10 @@ CREATE TYPE "SnapshotPolicy" AS ENUM ('NONE', 'EVERY_INPUT', 'EVERY_EPOCH'); CREATE TYPE "Consensus" AS ENUM ('AUTHORITY', 'QUORUM', 'PRT'); +CREATE TYPE "MatchDeletionReason" AS ENUM ('STEP', 'TIMEOUT', 'CHILD_TOURNAMENT', 'NOT_DELETED'); + +CREATE TYPE "WinnerCommitment" AS ENUM ('NONE', 'ONE', 'TWO'); + CREATE FUNCTION "update_updated_at_column"() RETURNS TRIGGER AS $$ BEGIN @@ -82,6 +86,7 @@ CREATE TABLE "application" "last_epoch_check_block" uint64 NOT NULL, "last_input_check_block" uint64 NOT NULL, "last_output_check_block" uint64 NOT NULL, + "last_tournament_check_block" uint64 NOT NULL, "processed_inputs" uint64 NOT NULL, "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), @@ -128,6 +133,7 @@ CREATE TABLE "epoch" "machine_hash" hash, "claim_hash" hash, "claim_transaction_hash" hash, + "commitment" hash, "tournament_address" ethereum_address, "status" "EpochStatus" NOT NULL, "virtual_index" uint64 NOT NULL, @@ -159,6 +165,7 @@ CREATE TABLE "input" "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), CONSTRAINT "input_pkey" PRIMARY KEY ("epoch_application_id", "index"), + CONSTRAINT "input_epoch_index_unique" UNIQUE ("epoch_application_id", "epoch_index", "index"), CONSTRAINT "input_application_id_tx_reference_unique" UNIQUE ("epoch_application_id", "transaction_reference"), CONSTRAINT "input_epoch_id_fkey" FOREIGN KEY ("epoch_application_id", "epoch_index") REFERENCES "epoch"("application_id", "index") ON DELETE CASCADE ); @@ -189,7 +196,7 @@ CREATE TABLE "output" CREATE INDEX "output_raw_data_type_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 1 FOR 4)); -CREATE INDEX "output_raw_data_address_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 17 FOR 20) ) +CREATE INDEX "output_raw_data_address_idx" ON "output" ("input_epoch_application_id", substring("raw_data" FROM 17 FOR 20)) WHERE SUBSTRING("raw_data" FROM 1 FOR 4) IN ( E'\\x10321e8b', -- DelegateCallVoucher E'\\x237a816f' -- Voucher @@ -224,5 +231,176 @@ CREATE TABLE "node_config" CREATE TRIGGER "config_set_updated_at" BEFORE UPDATE ON "node_config" FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); +CREATE TABLE "tournaments" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "address" ethereum_address NOT NULL, + "parent_tournament_address" ethereum_address, + "parent_match_id_hash" hash, + "max_level" INT NOT NULL CHECK("max_level" >= 0), + "level" INT NOT NULL CHECK("level" >= 0), + "log2step" INT NOT NULL CHECK("log2step" >= 0), + "height" INT NOT NULL CHECK("height" >= 0), + "winner_commitment" hash, + "final_state_hash" hash, + "finished_at_block" uint64 DEFAULT 0, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "tournaments_pkey" PRIMARY KEY ("application_id","epoch_index","address"), + CONSTRAINT "tournaments_epoch_fkey" FOREIGN KEY ("application_id","epoch_index") + REFERENCES "epoch"("application_id","index") + ON DELETE CASCADE, + CONSTRAINT "chk_tournament_root_parent" + CHECK ( + ("level" = 0 AND "parent_tournament_address" IS NULL AND "parent_match_id_hash" IS NULL) + OR + ("level" > 0 AND "parent_tournament_address" IS NOT NULL AND "parent_match_id_hash" IS NOT NULL) + ) +); + +CREATE INDEX "tournaments_epoch_idx" + ON "tournaments"("application_id","epoch_index"); + +CREATE UNIQUE INDEX "unique_root_per_epoch_idx" + ON "tournaments"("application_id","epoch_index") + WHERE "level" = 0; + +CREATE INDEX "tournaments_parent_match_nonroot_idx" + ON "tournaments"("application_id","epoch_index","parent_tournament_address","parent_match_id_hash") + WHERE "level" > 0; + +CREATE TRIGGER "tournaments_set_updated_at" +BEFORE UPDATE ON "tournaments" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "commitments" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "tournament_address" ethereum_address NOT NULL, + "commitment" hash NOT NULL, + "final_state_hash" hash NOT NULL, + "submitter_address" ethereum_address NOT NULL, + "block_number" uint64 NOT NULL, + "tx_hash" hash NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "commitments_pkey" + PRIMARY KEY ("application_id","epoch_index","tournament_address","commitment"), + CONSTRAINT "commitments_tournament_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address") + REFERENCES "tournaments"("application_id","epoch_index","address") + ON DELETE CASCADE +); + +CREATE INDEX "commitments_app_epoch_tournament_idx" + ON "commitments"("application_id","epoch_index","tournament_address"); + +CREATE INDEX "commitments_final_state_idx" + ON "commitments"("final_state_hash"); + +CREATE TRIGGER "commitments_set_updated_at" +BEFORE UPDATE ON "commitments" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "matches" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "tournament_address" ethereum_address NOT NULL, + "id_hash" hash NOT NULL, + "commitment_one" hash NOT NULL, + "commitment_two" hash NOT NULL, + "left_of_two" hash NOT NULL, + "block_number" uint64 NOT NULL, + "tx_hash" hash NOT NULL, + "winner" "WinnerCommitment" NOT NULL, + "deletion_reason" "MatchDeletionReason" NOT NULL, + "deletion_block_number" uint64 DEFAULT 0, + "deletion_tx_hash" hash, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "matches_pkey" + PRIMARY KEY ("application_id","epoch_index","tournament_address","id_hash"), + + CONSTRAINT "matches_tournament_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address") + REFERENCES "tournaments"("application_id","epoch_index","address") + ON DELETE CASCADE, + + CONSTRAINT "matches_one_commitment_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address","commitment_one") + REFERENCES "commitments"("application_id","epoch_index","tournament_address","commitment") + ON DELETE RESTRICT, + + CONSTRAINT "matches_two_commitment_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address","commitment_two") + REFERENCES "commitments"("application_id","epoch_index","tournament_address","commitment") + ON DELETE RESTRICT +); + +CREATE INDEX "matches_app_epoch_tournament_idx" + ON "matches"("application_id","epoch_index","tournament_address"); + +CREATE UNIQUE INDEX "matches_unique_pair_idx" + ON "matches"("application_id","epoch_index","tournament_address","commitment_one","commitment_two"); + +CREATE TRIGGER "matches_set_updated_at" +BEFORE UPDATE ON "matches" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +-- Add foreign key from tournaments to matches (parent match) +ALTER TABLE "tournaments" + ADD CONSTRAINT "tournaments_parent_match_fkey" + FOREIGN KEY ("application_id","epoch_index","parent_tournament_address","parent_match_id_hash") + REFERENCES "matches"("application_id","epoch_index","tournament_address","id_hash") + ON DELETE CASCADE; + +CREATE TABLE "match_advances" +( + "application_id" INT4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "tournament_address" ethereum_address NOT NULL, + "id_hash" hash NOT NULL, -- keccak256(abi.encode(one,two)) + "other_parent" hash NOT NULL, + "left_node" hash NOT NULL, + "block_number" uint64 NOT NULL, + "tx_hash" hash NOT NULL, + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "match_advances_pkey" + PRIMARY KEY ("application_id","epoch_index","tournament_address","id_hash","other_parent"), + + CONSTRAINT "match_advances_matches_fkey" + FOREIGN KEY ("application_id","epoch_index","tournament_address","id_hash") + REFERENCES "matches"("application_id","epoch_index","tournament_address","id_hash") + ON DELETE CASCADE +); + +CREATE INDEX "match_advances_block_number_idx" + ON "match_advances"("application_id","epoch_index","tournament_address","id_hash","block_number"); + +CREATE TRIGGER "match_advances_set_updated_at" +BEFORE UPDATE ON "match_advances" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TABLE "state_hashes" +( + "input_epoch_application_id" int4 NOT NULL, + "epoch_index" uint64 NOT NULL, + "input_index" uint64 NOT NULL, + "index" uint64 NOT NULL, + "machine_hash" hash NOT NULL, + "repetitions" INT8 NOT NULL CHECK ("repetitions" > 0), + "created_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + "updated_at" TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT "state_hashes_pkey" PRIMARY KEY ("input_epoch_application_id", "epoch_index", "index"), + CONSTRAINT "state_hashes_input_id_fkey" FOREIGN KEY ("input_epoch_application_id", "epoch_index", "input_index") REFERENCES "input"("epoch_application_id", "epoch_index", "index") ON DELETE CASCADE +); + +CREATE TRIGGER "state_hashes_set_updated_at" BEFORE UPDATE ON "state_hashes" +FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + COMMIT; diff --git a/internal/repository/postgres/state_hash.go b/internal/repository/postgres/state_hash.go new file mode 100644 index 000000000..e41a9dbf2 --- /dev/null +++ b/internal/repository/postgres/state_hash.go @@ -0,0 +1,97 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "fmt" + + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +func (r *PostgresRepository) ListStateHashes( + ctx context.Context, + nameOrAddress string, + f repository.StateHashFilter, + p repository.Pagination, + descending bool, +) ([]*model.StateHash, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.StateHashes. + SELECT( + table.StateHashes.InputEpochApplicationID, + table.StateHashes.EpochIndex, + table.StateHashes.InputIndex, + table.StateHashes.Index, + table.StateHashes.MachineHash, + table.StateHashes.Repetitions, + table.StateHashes.CreatedAt, + table.StateHashes.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.StateHashes.INNER_JOIN( + table.Application, + table.StateHashes.InputEpochApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.StateHashes.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.StateHashes.Index.DESC()) + } else { + sel = sel.ORDER_BY(table.StateHashes.Index.ASC()) + } + + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var stateHashes []*model.StateHash + var total uint64 + for rows.Next() { + var sh model.StateHash + err := rows.Scan( + &sh.InputEpochApplicationID, + &sh.EpochIndex, + &sh.InputIndex, + &sh.Index, + &sh.MachineHash, + &sh.Repetitions, + &sh.CreatedAt, + &sh.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + stateHashes = append(stateHashes, &sh) + } + return stateHashes, total, nil +} diff --git a/internal/repository/postgres/test_only.go b/internal/repository/postgres/test_only.go index f63ef5445..ea3fa13e2 100644 --- a/internal/repository/postgres/test_only.go +++ b/internal/repository/postgres/test_only.go @@ -25,6 +25,7 @@ func (r *PostgresRepository) CreateEpoch( table.Epoch.ClaimHash, table.Epoch.ClaimTransactionHash, table.Epoch.TournamentAddress, + table.Epoch.Commitment, table.Epoch.Status, table.Epoch.VirtualIndex, ).VALUES( @@ -38,6 +39,7 @@ func (r *PostgresRepository) CreateEpoch( e.ClaimHash, e.ClaimTransactionHash, e.TournamentAddress, + e.Commitment, e.Status, e.VirtualIndex, ) diff --git a/internal/repository/postgres/tournament.go b/internal/repository/postgres/tournament.go new file mode 100644 index 000000000..cd37cdbd5 --- /dev/null +++ b/internal/repository/postgres/tournament.go @@ -0,0 +1,313 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-jet/jet/v2/postgres" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/internal/repository/postgres/db/rollupsdb/public/table" +) + +// ------------------------ TournamentRepository Methods ------------------------ // + +func (r *PostgresRepository) CreateTournament( + ctx context.Context, + nameOrAddress string, + t *model.Tournament, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + insertStmt := table.Tournaments. + INSERT( + table.Tournaments.ApplicationID, + table.Tournaments.EpochIndex, + table.Tournaments.Address, + table.Tournaments.ParentTournamentAddress, + table.Tournaments.ParentMatchIDHash, + table.Tournaments.MaxLevel, + table.Tournaments.Level, + table.Tournaments.Log2step, + table.Tournaments.Height, + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + ) + + parentAddress := postgres.NULL + if t.ParentTournamentAddress != nil { + parentAddress = postgres.Bytea(t.ParentTournamentAddress.Bytes()) + } + parentMatch := postgres.NULL + if t.ParentMatchIDHash != nil { + parentMatch = postgres.Bytea(t.ParentMatchIDHash.Bytes()) + } + winnerCommitment := postgres.NULL + if t.WinnerCommitment != nil { + winnerCommitment = postgres.Bytea(t.WinnerCommitment.Bytes()) + } + finalState := postgres.NULL + if t.FinalStateHash != nil { + finalState = postgres.Bytea(t.FinalStateHash.Bytes()) + } + + selectQuery := table.Application.SELECT( + table.Application.ID, + postgres.RawFloat(fmt.Sprintf("%d", t.EpochIndex)), + postgres.Bytea(t.Address.Bytes()), + parentAddress, + parentMatch, + postgres.RawFloat(fmt.Sprintf("%d", t.MaxLevel)), + postgres.RawFloat(fmt.Sprintf("%d", t.Level)), + postgres.RawFloat(fmt.Sprintf("%d", t.Log2Step)), + postgres.RawFloat(fmt.Sprintf("%d", t.Height)), + winnerCommitment, + finalState, + postgres.RawFloat(fmt.Sprintf("%d", t.FinishedAtBlock)), + ).WHERE( + whereClause, + ) + + sqlStr, args := insertStmt.QUERY(selectQuery).Sql() + _, err = r.db.Exec(ctx, sqlStr, args...) + + return err +} + +func (r *PostgresRepository) UpdateTournament( + ctx context.Context, + nameOrAddress string, + t *model.Tournament, +) error { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return err + } + + winnerCommitment := postgres.NULL + if t.WinnerCommitment != nil { + winnerCommitment = postgres.Bytea(t.WinnerCommitment.Bytes()) + } + finalState := postgres.NULL + if t.FinalStateHash != nil { + winnerCommitment = postgres.Bytea(t.FinalStateHash.Bytes()) + } + + updateStmt := table.Tournaments. + UPDATE( + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + ). + SET( + winnerCommitment, + finalState, + t.FinishedAtBlock, + ). + FROM( + table.Application, + ). + WHERE( + whereClause. + AND(table.Tournaments.ApplicationID.EQ(postgres.Int(t.ApplicationID))). + AND(table.Tournaments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", t.EpochIndex)))). + AND(table.Tournaments.Address.EQ(postgres.Bytea(t.Address))), + ) + + sqlStr, args := updateStmt.Sql() + cmd, err := r.db.Exec(ctx, sqlStr, args...) + if err != nil { + return err + } + if cmd.RowsAffected() == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *PostgresRepository) GetTournament( + ctx context.Context, + nameOrAddress string, + address string, +) (*model.Tournament, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, err + } + + tournamentAddress := common.HexToAddress(address) + sel := table.Tournaments. + SELECT( + table.Tournaments.ApplicationID, + table.Tournaments.EpochIndex, + table.Tournaments.Address, + table.Tournaments.ParentTournamentAddress, + table.Tournaments.ParentMatchIDHash, + table.Tournaments.MaxLevel, + table.Tournaments.Level, + table.Tournaments.Log2step, + table.Tournaments.Height, + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + table.Tournaments.CreatedAt, + table.Tournaments.UpdatedAt, + ). + FROM( + table.Tournaments. + INNER_JOIN(table.Application, + table.Tournaments.ApplicationID.EQ(table.Application.ID), + ), + ). + WHERE( + whereClause. + AND(table.Tournaments.Address.EQ(postgres.Bytea(tournamentAddress.Bytes()))), + ) + + sqlStr, args := sel.Sql() + row := r.db.QueryRow(ctx, sqlStr, args...) + + var t model.Tournament + err = row.Scan( + &t.ApplicationID, + &t.EpochIndex, + &t.Address, + &t.ParentTournamentAddress, + &t.ParentMatchIDHash, + &t.MaxLevel, + &t.Level, + &t.Log2Step, + &t.Height, + &t.WinnerCommitment, + &t.FinalStateHash, + &t.FinishedAtBlock, + &t.CreatedAt, + &t.UpdatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + return &t, nil +} + +func (r *PostgresRepository) ListTournaments( + ctx context.Context, + nameOrAddress string, + f repository.TournamentFilter, + p repository.Pagination, + descending bool, +) ([]*model.Tournament, uint64, error) { + + whereClause, err := getWhereClauseFromNameOrAddress(nameOrAddress) + if err != nil { + return nil, 0, err + } + + sel := table.Tournaments. + SELECT( + table.Tournaments.ApplicationID, + table.Tournaments.EpochIndex, + table.Tournaments.Address, + table.Tournaments.ParentTournamentAddress, + table.Tournaments.ParentMatchIDHash, + table.Tournaments.MaxLevel, + table.Tournaments.Level, + table.Tournaments.Log2step, + table.Tournaments.Height, + table.Tournaments.WinnerCommitment, + table.Tournaments.FinalStateHash, + table.Tournaments.FinishedAtBlock, + table.Tournaments.CreatedAt, + table.Tournaments.UpdatedAt, + postgres.COUNT(postgres.STAR).OVER().AS("total_count"), + ). + FROM( + table.Tournaments. + INNER_JOIN(table.Application, + table.Tournaments.ApplicationID.EQ(table.Application.ID), + ), + ) + + conditions := []postgres.BoolExpression{whereClause} + if f.EpochIndex != nil { + conditions = append(conditions, table.Tournaments.EpochIndex.EQ(postgres.RawFloat(fmt.Sprintf("%d", *f.EpochIndex)))) + } + if f.Level != nil { + conditions = append(conditions, table.Tournaments.Level.EQ(postgres.RawInt(fmt.Sprintf("%d", *f.Level)))) + } + if f.ParentTournamentAddress != nil { + conditions = append(conditions, table.Tournaments.ParentTournamentAddress.EQ(postgres.Bytea(f.ParentTournamentAddress.Bytes()))) + } + if f.ParentMatchIDHash != nil { + conditions = append(conditions, table.Tournaments.ParentMatchIDHash.EQ(postgres.Bytea(f.ParentMatchIDHash.Bytes()))) + } + + sel = sel.WHERE(postgres.AND(conditions...)) + + if descending { + sel = sel.ORDER_BY(table.Tournaments.EpochIndex.DESC(), table.Tournaments.Level.DESC()) + } else { + sel = sel.ORDER_BY(table.Tournaments.EpochIndex.ASC(), table.Tournaments.Level.ASC()) + } + + // Apply pagination + if p.Limit > 0 { + sel = sel.LIMIT(int64(p.Limit)) + } + if p.Offset > 0 { + sel = sel.OFFSET(int64(p.Offset)) + } + + sqlStr, args := sel.Sql() + rows, err := r.db.Query(ctx, sqlStr, args...) + if err != nil { + return nil, 0, err + } + defer rows.Close() + + var tournaments []*model.Tournament + var total uint64 + for rows.Next() { + var t model.Tournament + err := rows.Scan( + &t.ApplicationID, + &t.EpochIndex, + &t.Address, + &t.ParentTournamentAddress, + &t.ParentMatchIDHash, + &t.MaxLevel, + &t.Level, + &t.Log2Step, + &t.Height, + &t.WinnerCommitment, + &t.FinalStateHash, + &t.FinishedAtBlock, + &t.CreatedAt, + &t.UpdatedAt, + &total, + ) + if err != nil { + return nil, 0, err + } + tournaments = append(tournaments, &t) + } + + return tournaments, total, nil +} diff --git a/internal/repository/repository.go b/internal/repository/repository.go index a7963d68c..60ee98821 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -58,6 +58,27 @@ type ReportFilter struct { InputIndex *uint64 } +type StateHashFilter struct { + EpochIndex *uint64 +} + +type TournamentFilter struct { + EpochIndex *uint64 + Level *uint64 + ParentTournamentAddress *common.Address + ParentMatchIDHash *common.Hash +} + +type CommitmentFilter struct { + EpochIndex *uint64 + TournamentAddress *string +} + +type MatchFilter struct { + EpochIndex *uint64 + TournamentAddress *string +} + type ApplicationRepository interface { CreateApplication(ctx context.Context, app *Application, withExecutionParameters bool) (int64, error) GetApplication(ctx context.Context, nameOrAddress string) (*Application, error) @@ -88,6 +109,7 @@ type EpochRepository interface { UpdateEpoch(ctx context.Context, nameOrAddress string, e *Epoch) error UpdateEpochStatus(ctx context.Context, nameOrAddress string, e *Epoch) error UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) ([]uint64, error) + UpdateEpochCommitment(ctx context.Context, appID int64, epochIndex uint64, commitment []byte) error ListEpochs(ctx context.Context, nameOrAddress string, f EpochFilter, p Pagination, descending bool) ([]*Epoch, uint64, error) } @@ -99,6 +121,7 @@ type InputRepository interface { GetLastProcessedInput(ctx context.Context, appAddress string) (*Input, error) ListInputs(ctx context.Context, nameOrAddress string, f InputFilter, p Pagination, descending bool) ([]*Input, uint64, error) GetNumberOfInputs(ctx context.Context, nameOrAddress string) (uint64, error) + UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error } type OutputRepository interface { @@ -114,10 +137,42 @@ type ReportRepository interface { ListReports(ctx context.Context, nameOrAddress string, f ReportFilter, p Pagination, descending bool) ([]*Report, uint64, error) } +type StateHashRepository interface { + ListStateHashes(ctx context.Context, nameOrAddress string, f StateHashFilter, p Pagination, descending bool) ([]*StateHash, uint64, error) +} + +type TournamentRepository interface { + CreateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + UpdateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + GetTournament(ctx context.Context, nameOrAddress string, address string) (*Tournament, error) + ListTournaments(ctx context.Context, nameOrAddress string, f TournamentFilter, + p Pagination, descending bool) ([]*Tournament, uint64, error) +} + +type CommitmentRepository interface { + CreateCommitment(ctx context.Context, nameOrAddress string, c *Commitment) error + GetCommitment(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, commitmentHex string) (*Commitment, error) + ListCommitments(ctx context.Context, nameOrAddress string, f CommitmentFilter, p Pagination, descending bool) ([]*Commitment, uint64, error) +} + +type MatchRepository interface { + CreateMatch(ctx context.Context, nameOrAddress string, m *Match) error + UpdateMatch(ctx context.Context, nameOrAddress string, m *Match) error + GetMatch(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, idHashHex string) (*Match, error) + ListMatches(ctx context.Context, nameOrAddress string, f MatchFilter, p Pagination, descending bool) ([]*Match, uint64, error) +} + +type MatchAdvancedRepository interface { + CreateMatchAdvanced(ctx context.Context, nameOrAddress string, m *MatchAdvanced) error + GetMatchAdvanced(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, idHashHex string, parentHex string) (*MatchAdvanced, error) + ListMatchAdvances(ctx context.Context, nameOrAddress string, epochIndex uint64, tournamentAddress string, idHashHex string, p Pagination, descending bool) ([]*MatchAdvanced, uint64, error) +} + type BulkOperationsRepository interface { - StoreAdvanceResult(ctx context.Context, appId int64, ar *AdvanceResult) error + StoreAdvanceResult(ctx context.Context, appID int64, result *AdvanceResult) error StoreClaimAndProofs(ctx context.Context, epoch *Epoch, outputs []*Output) error - UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error + StoreTournamentEvents(ctx context.Context, appID int64, commitments []*Commitment, matches []*Match, + matchAdvanced []*MatchAdvanced, matchDeleted []*Match, lastBlock uint64) error } type NodeConfigRepository interface { @@ -165,6 +220,11 @@ type Repository interface { InputRepository OutputRepository ReportRepository + StateHashRepository + TournamentRepository + CommitmentRepository + MatchRepository + MatchAdvancedRepository BulkOperationsRepository NodeConfigRepository ClaimerRepository diff --git a/test/tooling/db/db.go b/test/tooling/db/db.go index c981b5168..747f0234f 100644 --- a/test/tooling/db/db.go +++ b/test/tooling/db/db.go @@ -22,18 +22,18 @@ func SetupTestPostgres(endpoint string) error { schema, err := schema.New(endpoint) if err != nil { - return err + return fmt.Errorf("failed to create schema: %w", err) } defer schema.Close() err = schema.Downgrade() if err != nil { - return err + return fmt.Errorf("failed to downgrade schema: %w", err) } err = schema.Upgrade() if err != nil { - return err + return fmt.Errorf("failed to upgrade schema: %w", err) } return nil From 82619cc975d10470ebaaf5972030e89b469bb5e1 Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Sat, 20 Sep 2025 16:57:16 -0300 Subject: [PATCH 2/8] feat(prt): initial implementation of the PRT service --- cmd/cartesi-rollups-node/root/root.go | 4 + cmd/cartesi-rollups-prt/root/root.go | 40 ++- internal/node/node.go | 12 +- internal/prt/itournament_adapter.go | 437 ++++++++++++++++++++++++++ internal/prt/prt.go | 385 ++++++++++++++++++++++- internal/prt/service.go | 100 ++++-- internal/prt/types.go | 60 ++++ 7 files changed, 1003 insertions(+), 35 deletions(-) create mode 100644 internal/prt/itournament_adapter.go create mode 100644 internal/prt/types.go diff --git a/cmd/cartesi-rollups-node/root/root.go b/cmd/cartesi-rollups-node/root/root.go index d69d502ae..8dad4c195 100644 --- a/cmd/cartesi-rollups-node/root/root.go +++ b/cmd/cartesi-rollups-node/root/root.go @@ -184,6 +184,10 @@ func run(cmd *cobra.Command, args []string) { createInfo.ClaimerClient, err = createEthClient(ctx, cfg.BlockchainHttpEndpoint.String(), logger) cobra.CheckErr(err) + logger = service.NewLogger(cfg.LogLevel, cfg.LogColor).With("service", "prt") + createInfo.PrtClient, err = createEthClient(ctx, cfg.BlockchainHttpEndpoint.String(), logger) + cobra.CheckErr(err) + createInfo.Repository, err = factory.NewRepositoryFromConnectionString(ctx, cfg.DatabaseConnection.String()) cobra.CheckErr(err) defer createInfo.Repository.Close() diff --git a/cmd/cartesi-rollups-prt/root/root.go b/cmd/cartesi-rollups-prt/root/root.go index b8ac78d4a..edb682ec7 100644 --- a/cmd/cartesi-rollups-prt/root/root.go +++ b/cmd/cartesi-rollups-prt/root/root.go @@ -5,12 +5,16 @@ package root import ( "context" + "log/slog" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/prt" "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/internal/version" "github.com/cartesi/rollups-node/pkg/service" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/hashicorp/go-retryablehttp" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -25,7 +29,7 @@ var ( pollInterval string maxStartupTime string telemetryAddress string - cfg *config.ValidatorConfig + cfg *config.PrtConfig ) var Cmd = &cobra.Command{ @@ -59,7 +63,7 @@ func init() { // TODO: validate on preRunE Cmd.PreRunE = func(cmd *cobra.Command, args []string) error { var err error - cfg, err = config.LoadValidatorConfig() + cfg, err = config.LoadPrtConfig() if err != nil { return err } @@ -67,6 +71,31 @@ func init() { } } +func createEthClient(ctx context.Context, endpoint string, logger *slog.Logger) (*ethclient.Client, error) { + rclient := retryablehttp.NewClient() + rclient.Logger = logger + rclient.RetryMax = int(cfg.BlockchainHttpMaxRetries) + rclient.RetryWaitMin = cfg.BlockchainHttpRetryMinWait + rclient.RetryWaitMax = cfg.BlockchainHttpRetryMaxWait + + clientOptions := []rpc.ClientOption{ + rpc.WithHTTPClient(rclient.StandardClient()), + } + + authOpt, err := config.HTTPAuthorizationOption() + cobra.CheckErr(err) + if authOpt != nil { + clientOptions = append(clientOptions, authOpt) + } + + rpcClient, err := rpc.DialOptions(ctx, endpoint, clientOptions...) + if err != nil { + return nil, err + } + + return ethclient.NewClient(rpcClient), nil +} + func run(cmd *cobra.Command, args []string) { ctx, cancel := context.WithTimeout(context.Background(), cfg.MaxStartupTime) defer cancel() @@ -79,11 +108,16 @@ func run(cmd *cobra.Command, args []string) { EnableSignalHandling: true, TelemetryCreate: true, TelemetryAddress: cfg.TelemetryAddress, - PollInterval: cfg.ValidatorPollingInterval, + PollInterval: cfg.PrtPollingInterval, }, Config: *cfg, } + var err error + logger := service.NewLogger(cfg.LogLevel, cfg.LogColor).With("service", serviceName) + createInfo.EthClient, err = createEthClient(ctx, cfg.BlockchainHttpEndpoint.String(), logger) + cobra.CheckErr(err) + createInfo.Repository, err = factory.NewRepositoryFromConnectionString(ctx, cfg.DatabaseConnection.String()) cobra.CheckErr(err) defer createInfo.Repository.Close() diff --git a/internal/node/node.go b/internal/node/node.go index a45cd5500..ef7e8ef23 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -27,6 +27,7 @@ type CreateInfo struct { Config config.NodeConfig + PrtClient *ethclient.Client ClaimerClient *ethclient.Client ReaderClient *ethclient.Client ReaderWSClient *ethclient.Client @@ -37,7 +38,6 @@ type Service struct { service.Service Children []service.IService - Client *ethclient.Client Repository repository.Repository } @@ -90,6 +90,11 @@ func createServices(ctx context.Context, c *CreateInfo, s *Service) error { ch <- newClaimer(ctx, c, s) }() + numChildren++ + go func() { + ch <- newPrt(ctx, c, s) + }() + if c.Config.FeatureJsonrpcApiEnabled { numChildren++ go func() { @@ -270,11 +275,12 @@ func newPrt(ctx context.Context, c *CreateInfo, s *Service) service.IService { LogColor: c.Config.LogColor, EnableSignalHandling: false, TelemetryCreate: false, - PollInterval: c.Config.ValidatorPollingInterval, + PollInterval: c.Config.PrtPollingInterval, ServeMux: s.ServeMux, }, + EthClient: c.PrtClient, Repository: c.Repository, - Config: *c.Config.ToValidatorConfig(), + Config: *c.Config.ToPrtConfig(), } prtService, err := prt.Create(ctx, &prtArgs) diff --git a/internal/prt/itournament_adapter.go b/internal/prt/itournament_adapter.go new file mode 100644 index 000000000..902e228f0 --- /dev/null +++ b/internal/prt/itournament_adapter.go @@ -0,0 +1,437 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package prt + +import ( + "math/big" + + . "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/itournament" + "github.com/cartesi/rollups-node/pkg/ethutil" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +// ITournament Wrapper +type ITournamentAdapterImpl struct { + tournament *itournament.ITournament + client *ethclient.Client + tournamentAddress common.Address + filter ethutil.Filter +} + +func NewITournamentAdapter( + tournamentAddress common.Address, + client *ethclient.Client, + filter ethutil.Filter, +) (TournamentAdapter, error) { + tournamentContract, err := itournament.NewITournament(tournamentAddress, client) + if err != nil { + return nil, err + } + return &ITournamentAdapterImpl{ + tournament: tournamentContract, + tournamentAddress: tournamentAddress, + client: client, + filter: filter, + }, nil +} + +func (a *ITournamentAdapterImpl) Result(opts *bind.CallOpts) (bool, [32]byte, [32]byte, error) { + result, error := a.tournament.ArbitrationResult(opts) + return result.Finished, result.WinnerCommitment, result.FinalState, error +} + +func (a *ITournamentAdapterImpl) Constants(opts *bind.CallOpts) (TournamentConstants, error) { + c, error := a.tournament.TournamentLevelConstants(opts) + return TournamentConstants{ + MaxLevel: c.MaxLevel, + Level: c.Level, + Log2step: c.Log2step, + Height: c.Height, + }, error +} + +func (a *ITournamentAdapterImpl) TimeFinished(opts *bind.CallOpts) (bool, uint64, error) { + return a.tournament.TimeFinished(opts) +} + +func buildCommitmentJoinedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events["commitmentJoined"].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveCommitmentJoinedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentCommitmentJoined, error) { + q, err := buildCommitmentJoinedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentCommitmentJoined + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseCommitmentJoined(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildMatchAdvancedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events["matchAdvanced"].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveMatchAdvancedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentMatchAdvanced, error) { + q, err := buildMatchAdvancedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentMatchAdvanced + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseMatchAdvanced(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildMatchCreatedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events["matchCreated"].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveMatchCreatedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentMatchCreated, error) { + q, err := buildMatchCreatedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentMatchCreated + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseMatchCreated(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildMatchDeletedFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events["matchDeleted"].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveMatchDeletedEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentMatchDeleted, error) { + q, err := buildMatchDeletedFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentMatchDeleted + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseMatchDeleted(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildNewInnerTournamentFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{c.Events["newInnerTournament"].ID}, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveNewInnerTournamentEvents( + opts *bind.FilterOpts, +) ([]*itournament.ITournamentNewInnerTournament, error) { + q, err := buildNewInnerTournamentFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var events []*itournament.ITournamentNewInnerTournament + for log, err := range itr { + if err != nil { + return nil, err + } + ev, err := a.tournament.ParseNewInnerTournament(*log) + if err != nil { + return nil, err + } + events = append(events, ev) + } + return events, nil +} + +func buildAllEventsFilterQuery( + opts *bind.FilterOpts, + tournamentAddress common.Address, +) (q ethereum.FilterQuery, err error) { + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return q, err + } + + topics, err := abi.MakeTopics( + []any{ + c.Events[MonitoredEvent_CommitmentJoined.String()].ID, + c.Events[MonitoredEvent_MatchAdvanced.String()].ID, + c.Events[MonitoredEvent_MatchCreated.String()].ID, + c.Events[MonitoredEvent_MatchDeleted.String()].ID, + c.Events[MonitoredEvent_NewInnerTournament.String()].ID, + }, + ) + if err != nil { + return q, err + } + + q = ethereum.FilterQuery{ + Addresses: []common.Address{tournamentAddress}, + FromBlock: new(big.Int).SetUint64(opts.Start), + Topics: topics, + } + if opts.End != nil { + q.ToBlock = new(big.Int).SetUint64(*opts.End) + } + return q, err +} + +func (a *ITournamentAdapterImpl) RetrieveAllEvents( + opts *bind.FilterOpts, +) (*TournamentEvents, error) { + q, err := buildAllEventsFilterQuery(opts, a.tournamentAddress) + if err != nil { + return nil, err + } + + itr, err := a.filter.ChunkedFilterLogs(opts.Context, a.client, q) + if err != nil { + return nil, err + } + + var commitmentJoined []*itournament.ITournamentCommitmentJoined + var matchAdvanced []*itournament.ITournamentMatchAdvanced + var matchCreated []*itournament.ITournamentMatchCreated + var matchDeleted []*itournament.ITournamentMatchDeleted + var newInnerTournament []*itournament.ITournamentNewInnerTournament + + c, err := itournament.ITournamentMetaData.GetAbi() + if err != nil { + return nil, err + } + + for log, err := range itr { + if err != nil { + return nil, err + } + + switch log.Topics[0] { + case c.Events[MonitoredEvent_CommitmentJoined.String()].ID: + ev, err := a.tournament.ParseCommitmentJoined(*log) + if err != nil { + return nil, err + } + commitmentJoined = append(commitmentJoined, ev) + case c.Events[MonitoredEvent_MatchAdvanced.String()].ID: + ev, err := a.tournament.ParseMatchAdvanced(*log) + if err != nil { + return nil, err + } + matchAdvanced = append(matchAdvanced, ev) + case c.Events[MonitoredEvent_MatchCreated.String()].ID: + ev, err := a.tournament.ParseMatchCreated(*log) + if err != nil { + return nil, err + } + matchCreated = append(matchCreated, ev) + case c.Events[MonitoredEvent_MatchDeleted.String()].ID: + ev, err := a.tournament.ParseMatchDeleted(*log) + if err != nil { + return nil, err + } + matchDeleted = append(matchDeleted, ev) + case c.Events[MonitoredEvent_NewInnerTournament.String()].ID: + ev, err := a.tournament.ParseNewInnerTournament(*log) + if err != nil { + return nil, err + } + newInnerTournament = append(newInnerTournament, ev) + } + } + + return &TournamentEvents{ + CommitmentJoined: commitmentJoined, + MatchAdvanced: matchAdvanced, + MatchCreated: matchCreated, + MatchDeleted: matchDeleted, + NewInnerTournament: newInnerTournament, + }, nil +} diff --git a/internal/prt/prt.go b/internal/prt/prt.go index 2083daa2e..0a19096ae 100644 --- a/internal/prt/prt.go +++ b/internal/prt/prt.go @@ -7,41 +7,404 @@ import ( "context" "errors" "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/pkg/contracts/idaveconsensus" ) -type PrtRepository interface { - ListApplications(ctx context.Context, f repository.ApplicationFilter, p repository.Pagination, descending bool) ([]*Application, uint64, error) +type prtRepository interface { + ListApplications(ctx context.Context, f repository.ApplicationFilter, + p repository.Pagination, descending bool) ([]*Application, uint64, error) UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error + + ListEpochs(ctx context.Context, nameOrAddress string, f repository.EpochFilter, + p repository.Pagination, descending bool) ([]*Epoch, uint64, error) + UpdateEpoch(ctx context.Context, nameOrAddress string, e *Epoch) error + UpdateEpochStatus(ctx context.Context, nameOrAddress string, e *Epoch) error + + CreateTournament(ctx context.Context, nameOrAddress string, t *Tournament) error + + CreateCommitment(ctx context.Context, nameOrAddress string, c *Commitment) error + UpdateMatch(ctx context.Context, nameOrAddress string, m *Match) error + CreateMatch(ctx context.Context, nameOrAddress string, m *Match) error + CreateMatchAdvanced(ctx context.Context, nameOrAddress string, m *MatchAdvanced) error + + StoreTournamentEvents(ctx context.Context, appID int64, commitments []*Commitment, matches []*Match, + matchAdvanced []*MatchAdvanced, matchDeleted []*Match, lastBlock uint64) error + + SaveNodeConfigRaw(ctx context.Context, key string, rawJSON []byte) error + LoadNodeConfigRaw(ctx context.Context, key string) (rawJSON []byte, createdAt, updatedAt time.Time, err error) } -func getAllRunningApplications(ctx context.Context, er PrtRepository) ([]*Application, uint64, error) { - f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled)} - return er.ListApplications(ctx, f, repository.Pagination{}, false) +// EthClientInterface defines the methods we need from ethclient.Client +type EthClientInterface interface { + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + ChainID(ctx context.Context) (*big.Int, error) +} + +func getAllRunningApplications(ctx context.Context, r prtRepository) ([]*Application, uint64, error) { + f := repository.ApplicationFilter{State: Pointer(ApplicationState_Enabled), ConsensusType: Pointer(Consensus_PRT)} + return r.ListApplications(ctx, f, repository.Pagination{}, false) +} + +func getAllClaimComputedEpochs(ctx context.Context, r prtRepository, nameOrAddress string) ([]*Epoch, uint64, error) { + f := repository.EpochFilter{Status: Pointer(EpochStatus_ClaimComputed)} + return r.ListEpochs(ctx, nameOrAddress, f, repository.Pagination{}, false) } // setApplicationInoperable marks an application as inoperable with the given reason, // logs any error that occurs during the update, and returns an error with the reason. -func (v *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...interface{}) error { +func (s *Service) setApplicationInoperable(ctx context.Context, app *Application, reasonFmt string, args ...any) error { reason := fmt.Sprintf(reasonFmt, args...) appAddress := app.IApplicationAddress.String() // Log the reason first - v.Logger.Error(reason, "application", appAddress) + s.Logger.Error(reason, "application", appAddress) // Update application state - err := v.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) + err := s.repository.UpdateApplicationState(ctx, app.ID, ApplicationState_Inoperable, &reason) if err != nil { - v.Logger.Error("failed to update application state to inoperable", "app", appAddress, "err", err) + s.Logger.Error("failed to update application state to inoperable", "app", appAddress, "err", err) } // Return the error with the reason return errors.New(reason) } -func (v *Service) validateApplication(ctx context.Context, app *Application) error { - v.Logger.Debug("Starting validation", "application", app.Name) +func (s *Service) saveTournamentEvents(ctx context.Context, app *Application, epoch *Epoch, + tournamentAddress common.Address, events *TournamentEvents, lastBlock uint64) error { + commitments := make([]*Commitment, 0, len(events.CommitmentJoined)) + for _, ev := range events.CommitmentJoined { + c := Commitment{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + Commitment: ev.Commitment, + FinalStateHash: ev.FinalStateHash, + SubmitterAddress: ev.Submitter, + BlockNumber: ev.Raw.BlockNumber, + TxHash: ev.Raw.TxHash, + } + s.Logger.Info("Found CommitmentJoined event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "commitment", c.Commitment.String()) + commitments = append(commitments, &c) + } + + matches := make([]*Match, 0, len(events.MatchCreated)) + for _, ev := range events.MatchCreated { + m := Match{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + IDHash: ev.MatchIdHash, + CommitmentOne: ev.One, + CommitmentTwo: ev.Two, + LeftOfTwo: ev.LeftOfTwo, + BlockNumber: ev.Raw.BlockNumber, + TxHash: ev.Raw.TxHash, + Winner: WinnerCommitment_NONE, + DeletionReason: MatchDeletionReason_NOT_DELETED, + DeletionBlockNumber: 0, + DeletionTxHash: common.Hash{}, + } + s.Logger.Info("Found MatchCreated event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "id_hash", m.IDHash.String(), + "one", m.CommitmentOne.String(), + "two", m.CommitmentTwo.String(), + "leftOfTwo", m.LeftOfTwo.String()) + matches = append(matches, &m) + } + + matchAdvanced := make([]*MatchAdvanced, 0, len(events.MatchAdvanced)) + for _, ev := range events.MatchAdvanced { + m := &MatchAdvanced{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + IDHash: ev.MatchIdHash, + OtherParent: ev.OtherParent, + LeftNode: ev.LeftNode, + BlockNumber: ev.Raw.BlockNumber, + TxHash: ev.Raw.TxHash, + } + s.Logger.Info("Found MatchAdvanced event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "id_hash", m.IDHash.String(), + "other_parent", m.OtherParent.String(), + "left_node", m.LeftNode.String()) + matchAdvanced = append(matchAdvanced, m) + } + + matchDeleted := make([]*Match, 0, len(events.MatchDeleted)) + for _, ev := range events.MatchDeleted { + m := Match{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + TournamentAddress: tournamentAddress, + IDHash: ev.MatchIdHash, + CommitmentOne: ev.One, + CommitmentTwo: ev.Two, + Winner: WinnerCommitmentFromUint8(ev.WinnerCommitment), + DeletionReason: MatchDeletionReasonFromUint8(ev.Reason), + DeletionBlockNumber: ev.Raw.BlockNumber, + DeletionTxHash: ev.Raw.TxHash, + } + s.Logger.Info("Found MatchDeleted event", + "application", app.Name, + "epoch_index", epoch.Index, + "tournament", tournamentAddress.Hex(), + "id_hash", ((common.Hash)(ev.MatchIdHash)).String(), + "one", ((common.Hash)(ev.One)).String(), + "two", ((common.Hash)(ev.Two)).String(), + "winner", m.Winner.String(), + "reason", m.DeletionReason.String(), + ) + matchDeleted = append(matchDeleted, &m) + } + + err := s.repository.StoreTournamentEvents(ctx, app.ID, commitments, matches, matchAdvanced, matchDeleted, lastBlock) + if err != nil { + s.Logger.Error("failed to save tournament events", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } return nil } + +func (s *Service) checkFinalizedEpochs(ctx context.Context, app *Application) error { + epochs, _, err := getAllClaimComputedEpochs(ctx, s.repository, app.Name) + if err != nil { + s.Logger.Error("failed to list epochs", "application", app.Name, "error", err) + return err + } + if len(epochs) == 0 { + return nil // nothing to do + } + + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + + consensus, err := idaveconsensus.NewIDaveConsensus(app.IConsensusAddress, ethClient) + if err != nil { + s.Logger.Error("failed to bind dave consensus contract", "application", app.Name, + "consensus_address", app.IConsensusAddress.String(), "error", err) + return err + } + + for _, epoch := range epochs { + if epoch.ClaimTransactionHash == nil { // epoch not claimed on-chain yet + break + } + receipt, err := ethClient.TransactionReceipt(ctx, *epoch.ClaimTransactionHash) + if err != nil { + s.Logger.Error("failed to fetch transaction receipt for epoch", "application", app.Name, + "epoch", epoch.Index, "tx", epoch.ClaimTransactionHash, "error", err) + return err + } + + if receipt.Status != 1 { + return fmt.Errorf("EpochSealed transaction hash points to failed transaction") + } + + var event *idaveconsensus.IDaveConsensusEpochSealed + for _, vLog := range receipt.Logs { + event, err = consensus.ParseEpochSealed(*vLog) + if err != nil { + continue // Skip logs that don't match + } + } + if event == nil { + return fmt.Errorf("failed to find EpochSealed event in receipt logs") + + } + + if epoch.Index != event.EpochNumber.Uint64()-1 { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent index between off-chain (%d) and on-chain (%d)", + epoch.Index, epoch.Index, event.EpochNumber.Uint64()-1) + } + if *epoch.MachineHash != event.InitialMachineStateHash { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent machine hash between off-chain (%s) and on-chain (%s)", + epoch.Index, epoch.MachineHash.String(), hexutil.Encode(event.InitialMachineStateHash[:])) + } + if *epoch.ClaimHash != event.OutputsMerkleRoot { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent claim hash between off-chain (%s) and on-chain (%s)", + epoch.Index, epoch.ClaimHash.String(), hexutil.Encode(event.OutputsMerkleRoot[:])) + } + + err = s.fetchTournamentData(ctx, app, epoch, RootLevel, nil, nil, *epoch.TournamentAddress) + if err != nil { + s.Logger.Error("failed to fetch tournament data", "application", app.Name, + "epoch", epoch.Index, "tournament", epoch.TournamentAddress.String(), "error", err) + return err + } + + s.Logger.Info("Found finalized epoch. OutputsMerkleRoot matched. Setting claim as accepted", + "application", app.Name, + "epoch", epoch.Index, + "event_block_number", event.Raw.BlockNumber, + "claim_hash", fmt.Sprintf("%x", event.OutputsMerkleRoot), + "tx", epoch.ClaimTransactionHash, + ) + + epoch.Status = EpochStatus_ClaimAccepted + err = s.repository.UpdateEpochStatus(ctx, app.Name, epoch) + if err != nil { + s.Logger.Error("failed to update epoch status to claim accepted", "application", app.Name, "epoch", epoch.Index, "error", err) + return err + } + } + return nil +} + +func (s *Service) fetchTournamentData( + ctx context.Context, + app *Application, + epoch *Epoch, + level TournamentLevel, + parentMatchIDHash *common.Hash, + parentTournamentAddress *common.Address, + tournamentAddress common.Address, +) error { + s.Logger.Info("Fetching "+level.String()+" tournament data", "application", app.Name, "tournament", tournamentAddress.String()) + // TODO: use adapters instead of direct contract calls + // Type assertion to get the concrete client if possible + ethClient, ok := s.client.(*ethclient.Client) + if !ok { + return fmt.Errorf("client is not an *ethclient.Client, cannot create dave consensus bind") + } + + adapter, err := NewITournamentAdapter(tournamentAddress, ethClient, s.filter) + if err != nil { + s.Logger.Error("failed to create "+level.String()+" tournament adapter", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + constants, err := adapter.Constants(nil) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament constants", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + finished, timeFinished, err := adapter.TimeFinished(nil) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament finished at time", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + if !finished { + s.Logger.Error(level.String()+" tournament should be finished", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + _, winnerCommitment, finalState, err := adapter.Result(nil) + if err != nil { + s.Logger.Error("failed to fetch "+level.String()+" tournament result", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + if level == RootLevel && *epoch.Commitment != winnerCommitment { + return s.setApplicationInoperable(ctx, app, "Epoch %d has inconsistent commitment between off-chain (%s) and on-chain (%s)", + epoch.Index, epoch.Commitment.String(), hexutil.Encode(winnerCommitment[:])) + } + + t := Tournament{ + ApplicationID: app.ID, + EpochIndex: epoch.Index, + Address: tournamentAddress, + ParentMatchIDHash: parentMatchIDHash, + ParentTournamentAddress: parentTournamentAddress, + MaxLevel: constants.MaxLevel, + Level: constants.Level, + Log2Step: constants.Log2step, + Height: constants.Height, + WinnerCommitment: (*common.Hash)(&winnerCommitment), + FinalStateHash: (*common.Hash)(&finalState), + FinishedAtBlock: timeFinished, + } + + err = s.repository.CreateTournament(ctx, app.IApplicationAddress.Hex(), &t) + if err != nil { + s.Logger.Error("failed to create "+level.String()+" tournament in database", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + opts := &bind.FilterOpts{ + Context: ctx, + Start: epoch.LastBlock, + End: &timeFinished, // To latest block + } + + events, err := adapter.RetrieveAllEvents(opts) + if err != nil { + s.Logger.Error("failed to retrieve all events from "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", tournamentAddress.String(), "error", err) + return err + } + + // Print summary of events found + s.Logger.Info("Retrieved events for "+level.String()+" tournament", "address", t.Address.String(), + "commitmentJoined", len(events.CommitmentJoined), + "matchCreated", len(events.MatchCreated), + "matchAdvanced", len(events.MatchAdvanced), + "matchDeleted", len(events.MatchDeleted), + "newInnerTournament", len(events.NewInnerTournament)) + + err = s.saveTournamentEvents(ctx, app, epoch, tournamentAddress, events, t.FinishedAtBlock) + if err != nil { + s.Logger.Error("failed to save events for "+level.String()+" tournament", "application", app.Name, + "epoch", epoch.Index, "tournament_address", t.Address.String(), "error", err) + return err + } + + for _, newInner := range events.NewInnerTournament { + hashID := (common.Hash)(newInner.MatchIdHash) + childAddress := newInner.ChildTournament + + s.Logger.Info("NewInnerTournament event", "id_hash", hashID.String(), "tournament_address", childAddress.String()) + + nextLevel := level + 1 + if nextLevel > BottomLevel { + return fmt.Errorf("unexpected tournament level") + } + + err = s.fetchTournamentData(ctx, app, epoch, nextLevel, &hashID, &tournamentAddress, childAddress) + if err != nil { + s.Logger.Error("failed to fetch "+TournamentLevel(nextLevel).String()+" tournament data", "application", app.Name, + "tournament", childAddress.String(), "error", err) + return err + } + } + + return nil +} + +func (s *Service) validateApplication(ctx context.Context, app *Application) error { + s.Logger.Debug("Syncing PTR tournaments", "application", app.Name) + return s.checkFinalizedEpochs(ctx, app) +} diff --git a/internal/prt/service.go b/internal/prt/service.go index ca9f9682a..de6ac60d1 100644 --- a/internal/prt/service.go +++ b/internal/prt/service.go @@ -5,30 +5,38 @@ package prt import ( "context" + "errors" "fmt" + "math/big" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/merkle" + . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/pkg/ethutil" "github.com/cartesi/rollups-node/pkg/service" - "github.com/ethereum/go-ethereum/common" ) +type CreateInfo struct { + service.CreateInfo + Config config.PrtConfig + Repository repository.Repository + EthClient EthClientInterface +} + type Service struct { service.Service - repository PrtRepository - - // cached constants - pristineRootHash common.Hash - pristinePostContext []common.Hash + repository prtRepository + client EthClientInterface + submissionEnabled bool + filter ethutil.Filter } -type CreateInfo struct { - service.CreateInfo +const PrtConfigKey = "prt" - Config config.ValidatorConfig - - Repository repository.Repository +type PersistentConfig struct { + DefaultBlock DefaultBlock + ClaimSubmissionEnabled bool + ChainID uint64 } func Create(ctx context.Context, c *CreateInfo) (*Service, error) { @@ -45,13 +53,39 @@ func Create(ctx context.Context, c *CreateInfo) (*Service, error) { return nil, err } + if c.EthClient == nil { + return nil, fmt.Errorf("EthClient on prt service Create is nil") + } + chainID, err := c.EthClient.ChainID(ctx) + if err != nil { + return nil, err + } + if chainID.Uint64() != c.Config.BlockchainId { + return nil, fmt.Errorf("EthClient chainId mismatch: network %d != provided %d", + chainID.Uint64(), c.Config.BlockchainId) + } + s.repository = c.Repository if s.repository == nil { - return nil, fmt.Errorf("repository on validator service Create is nil") + return nil, fmt.Errorf("repository on prt service Create is nil") } - s.pristinePostContext = merkle.CreatePostContext() - s.pristineRootHash = s.pristinePostContext[merkle.TREE_DEPTH] + nodeConfig, err := s.setupPersistentConfig(ctx, &c.Config) + if err != nil { + return nil, err + } + if chainID.Uint64() != nodeConfig.ChainID { + return nil, fmt.Errorf("NodeConfig chainId mismatch: network %d != config %d", + chainID.Uint64(), nodeConfig.ChainID) + } + + s.client = c.EthClient + s.submissionEnabled = nodeConfig.ClaimSubmissionEnabled + s.filter = ethutil.Filter{ + MinChunkSize: ethutil.DefaultMinChunkSize, + MaxChunkSize: new(big.Int).SetUint64(c.Config.BlockchainMaxBlockRange), + Logger: s.Logger, + } return s, nil } @@ -77,10 +111,40 @@ func (s *Service) Tick() []error { } return errs } -func (s *Service) Stop(b bool) []error { + +func (s *Service) Stop(_ bool) []error { return nil } -func (v *Service) String() string { - return v.Name +func (s *Service) String() string { + return s.Name +} + +func (s *Service) setupPersistentConfig( + ctx context.Context, + c *config.PrtConfig, +) (*PersistentConfig, error) { + config, err := repository.LoadNodeConfig[PersistentConfig](ctx, s.repository, PrtConfigKey) + if config == nil && errors.Is(err, repository.ErrNotFound) { + nc := NodeConfig[PersistentConfig]{ + Key: PrtConfigKey, + Value: PersistentConfig{ + DefaultBlock: c.BlockchainDefaultBlock, + ClaimSubmissionEnabled: c.FeatureClaimSubmissionEnabled, + ChainID: c.BlockchainId, + }, + } + s.Logger.Info("Initializing PRT persistent config", "config", nc.Value) + err = repository.SaveNodeConfig(ctx, s.repository, &nc) + if err != nil { + return nil, err + } + return &nc.Value, nil + } else if err == nil { + s.Logger.Info("PRT service was already configured. Using previous persistent config", "config", config.Value) + return &config.Value, nil + } + + s.Logger.Error("Could not retrieve persistent config from Database. %w", "error", err) + return nil, err } diff --git a/internal/prt/types.go b/internal/prt/types.go new file mode 100644 index 000000000..743c78f01 --- /dev/null +++ b/internal/prt/types.go @@ -0,0 +1,60 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package prt + +import ( + "github.com/ethereum/go-ethereum/accounts/abi/bind" + + "github.com/cartesi/rollups-node/pkg/contracts/itournament" +) + +type TournamentConstants struct { + MaxLevel uint64 + Level uint64 + Log2step uint64 + Height uint64 +} + +// Interface for Tournament reading +type TournamentAdapter interface { + RetrieveCommitmentJoinedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentCommitmentJoined, error) + RetrieveMatchAdvancedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentMatchAdvanced, error) + RetrieveMatchCreatedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentMatchCreated, error) + RetrieveMatchDeletedEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentMatchDeleted, error) + RetrieveNewInnerTournamentEvents(opts *bind.FilterOpts) ([]*itournament.ITournamentNewInnerTournament, error) + RetrieveAllEvents(opts *bind.FilterOpts) (*TournamentEvents, error) + Result(opts *bind.CallOpts) (bool, [32]byte, [32]byte, error) + Constants(opts *bind.CallOpts) (TournamentConstants, error) + TimeFinished(opts *bind.CallOpts) (bool, uint64, error) +} + +// Struct to hold all events retrieved at once +type TournamentEvents struct { + CommitmentJoined []*itournament.ITournamentCommitmentJoined + MatchAdvanced []*itournament.ITournamentMatchAdvanced + MatchCreated []*itournament.ITournamentMatchCreated + MatchDeleted []*itournament.ITournamentMatchDeleted + NewInnerTournament []*itournament.ITournamentNewInnerTournament +} + +type TournamentLevel int + +const ( + RootLevel TournamentLevel = iota + MiddleLevel + BottomLevel +) + +func (l TournamentLevel) String() string { + switch l { + case RootLevel: + return "root" + case MiddleLevel: + return "middle" + case BottomLevel: + return "bottom" + default: + return "unknown" + } +} From f57dab2ccb6ffbfe4510c2c8407dd99bf406ec58 Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Mon, 6 Oct 2025 16:15:44 -0300 Subject: [PATCH 3/8] fix: ethutil filter comment typo --- pkg/ethutil/filter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/ethutil/filter.go b/pkg/ethutil/filter.go index d0439277c..d841a3635 100644 --- a/pkg/ethutil/filter.go +++ b/pkg/ethutil/filter.go @@ -87,7 +87,7 @@ func queryBlockRangeTooLarge(err error) bool { // (From, To) block ranges into multiple smaller calls when it detects the // provider rejected the query for this specific reason. Detection is a // heuristic and implemented in the function queryBlockRangeTooLarge. It -// potentially has to be adjusted to accomodate each provider. +// potentially has to be adjusted to accommodate each provider. func (f *Filter) ChunkedFilterLogs( ctx context.Context, client *ethclient.Client, From ff62ad288799d2a1e04c8b26edd3e67e38915c0c Mon Sep 17 00:00:00 2001 From: Marcelo Politzer <251334+mpolitzer@users.noreply.github.com> Date: Tue, 9 Sep 2025 14:07:46 -0300 Subject: [PATCH 4/8] feat(merkle): add builder on merkle trees data structure --- internal/merkle/builder.go | 405 ++++++++++++++++++++++++++++++++ internal/merkle/builder_test.go | 143 +++++++++++ 2 files changed, 548 insertions(+) create mode 100644 internal/merkle/builder.go create mode 100644 internal/merkle/builder_test.go diff --git a/internal/merkle/builder.go b/internal/merkle/builder.go new file mode 100644 index 000000000..18268739d --- /dev/null +++ b/internal/merkle/builder.go @@ -0,0 +1,405 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package merkle + +import ( + "fmt" + "math/big" + "slices" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + zero = big.NewInt(0) + one = big.NewInt(1) + overflowValue = new(big.Int).Lsh(one, 256) + overflowMask = new(big.Int).Sub(overflowValue, one) +) + +// MerkleProof: dave/common-rs/merkle/src/tree.rs +type Proof struct { + Pos *big.Int + Node common.Hash + Siblings []common.Hash +} + +func Leaf(node common.Hash, pos *big.Int) *Proof { + return &Proof{ + Node: node, + Pos: pos, + Siblings: nil, + } +} + +func (proof *Proof) BuildRoot() common.Hash { + two := big.NewInt(2) + rootHash := proof.Node + + for i, s := range proof.Siblings { + + // ((pos >> i) % 2) == 0 + if new(big.Int).Rem(new(big.Int).Rsh(proof.Pos, uint(i)), two).Cmp(zero) == 0 { + rootHash = crypto.Keccak256Hash(rootHash[:], s[:]) + } else { + rootHash = crypto.Keccak256Hash(s[:], rootHash[:]) + } + } + return rootHash +} + +func (proof *Proof) VerifyRoot(other common.Hash) bool { + return proof.BuildRoot() == other +} + +func (proof *Proof) PushHash(h common.Hash) { + proof.Siblings = append(proof.Siblings, h) +} + +//////////////////////////////////////////////////////////////////////////////// + +// MerkleTree: dave/common-rs/merkle/src/tree.rs +type Tree struct { + RootHash common.Hash + Height uint32 + Subtrees *InnerNode +} + +// InnerNode: dave/common-rs/merkle/src/tree.rs +// Emulate the rust enum type with a struct containing both {Pair, Iterated}. +type InnerNode struct { + // Pair + LHS, RHS *Tree + + // Iterated + Child *Tree +} + +func (inner *InnerNode) Valid() bool { + isPair := (inner.LHS != nil && inner.RHS != nil) + isIterated := inner.Child != nil + return (isPair || isIterated) && !(isPair && isIterated) // xor +} + +func (inner *InnerNode) Children() (*Tree, *Tree) { + if !inner.Valid() { + panic(fmt.Sprintf("invalid InnerNode state: %v\n", inner)) + } + + if inner.Child != nil { + return inner.Child, inner.Child + } else { + return inner.LHS, inner.RHS + } +} + +func TreeLeaf(hash common.Hash) *Tree { + return &Tree{ + Height: 0, + RootHash: hash, + Subtrees: nil, + } +} + +func (tree *Tree) GetRootHash() common.Hash { + return tree.RootHash +} + +func (tree *Tree) FindChildByHash(hash common.Hash) *InnerNode { + if inner := tree.Subtrees; inner != nil { + if !inner.Valid() { + panic(fmt.Sprintf("invalid InnerNode state: %v\n", inner)) + } + + if inner.Child != nil { + child := inner.Child.FindChildByHash(hash) + if child != nil { + return child + } + } else { + lhs := inner.LHS.FindChildByHash(hash) + if lhs != nil { + return lhs + } + + rhs := inner.LHS.FindChildByHash(hash) + if rhs != nil { + return rhs + } + } + } + return nil // not found +} + +func (tree *Tree) Join(other *Tree) *Tree { + return &Tree{ + RootHash: crypto.Keccak256Hash(tree.RootHash[:], other.RootHash[:]), + Height: tree.Height + 1, + Subtrees: &InnerNode{ + LHS: tree, + RHS: other, + }, + } +} + +func (tree *Tree) Iterated(rep uint64) *Tree { + root := tree + for range rep { + root = &Tree{ + RootHash: crypto.Keccak256Hash(root.RootHash[:], root.RootHash[:]), + Height: tree.Height + 1, + Subtrees: &InnerNode{ + Child: tree, + }, + } + } + return root +} + +func (tree *Tree) ProveLeaf(index *big.Int) *Proof { + return tree.ProveLeafRec(index) +} + +func (tree *Tree) ProveLast() *Proof { + // index = (1 << height) - 1 + index := new(big.Int).Sub( + new(big.Int).Lsh( + one, + uint(tree.Height), + ), + one, + ) + return tree.ProveLeaf(index) +} + +func (tree *Tree) ProveLeafRec(index *big.Int) *Proof { + numLeafs := new(big.Int).Lsh(one, uint(tree.Height)) + if numLeafs.Cmp(index) <= 0 { + panic(fmt.Sprintf("index out of bounds: %v, %v", numLeafs, index)) + } + + subtree := tree.Subtrees + if subtree == nil { + if index.Cmp(zero) != 0 { + panic(fmt.Sprintf("invalid Tree state: %v", tree)) + } + if tree.Height != 0 { + panic(fmt.Sprintf("invalid Tree state: %v", tree)) + } + return Leaf(tree.RootHash, index) + } + + shiftAmount := uint(tree.Height - 1) + isLeftLeaf := new(big.Int).Rsh(index, shiftAmount).Cmp(zero) == 0 + + // innerIndex = index & !(1 << shiftAmount) + innerIndex := new(big.Int).And( + index, + new(big.Int).Not( + new(big.Int).Lsh( + one, + shiftAmount, + ), + ), + ) + + lhs, rhs := subtree.Children() + if isLeftLeaf { + proof := lhs.ProveLeafRec(innerIndex) + proof.PushHash(rhs.RootHash) + proof.Pos = index + return proof + } else { + proof := rhs.ProveLeafRec(innerIndex) + proof.PushHash(lhs.RootHash) + proof.Pos = index + return proof + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Node: common-rs/merkle/src/tree_builder.rs +type Node struct { + Tree *Tree + AccumulatedCount *big.Int +} + +type Builder struct { + Trees []Node +} + +func (b *Builder) Height() (uint32, bool) { + n := len(b.Trees) + if n == 0 { + return 0, false + } + return b.Trees[n-1].Tree.Height, true +} + +func (b *Builder) Count() (*big.Int, bool) { + n := len(b.Trees) + if n == 0 { + return nil, false + } + return b.Trees[n-1].AccumulatedCount, true +} + +func (b *Builder) CanBuild() bool { + n := len(b.Trees) + if n == 0 { + return false + } + return isPow2(b.Trees[n-1].AccumulatedCount) +} + +func (b *Builder) Append(leaf *Tree) { + b.AppendRepeated(leaf, big.NewInt(1)) +} + +func (b *Builder) AppendRepeatedUint64(leaf *Tree, reps uint64) { + b.AppendRepeated(leaf, new(big.Int).SetUint64(reps)) +} + +func (b *Builder) AppendRepeated(leaf *Tree, reps *big.Int) { + if reps.Cmp(zero) <= 0 { + panic("invalid repetitions") + } + + accumulatedCount := b.CalculateAccumulatedCount(reps) + if height, ok := b.Height(); ok { + if height != leaf.Height { + panic("mismatched tree size") + } + } + b.Trees = append(b.Trees, Node{ + Tree: leaf, + AccumulatedCount: accumulatedCount, + }) +} + +func (b *Builder) Build() *Tree { + if count, ok := b.Count(); ok { + if !isCountPow2(count) { + panic(fmt.Sprintf("builder has %v leafs, which is not a power of two", count)) + } + log2Size := countTrailingZeroes(count) + return buildMerkle(b.Trees, log2Size, big.NewInt(0)) + } else { + panic("no leafs in the merkle builder") + } +} + +func (b *Builder) CalculateAccumulatedCount(reps *big.Int) *big.Int { + n := len(b.Trees) + if n != 0 { + if reps.Cmp(zero) == 0 { + panic("merkle builder is full") + } + + accumulatedCount := new(big.Int).And( + new(big.Int).Add(reps, b.Trees[n-1].AccumulatedCount), + overflowMask, + ) + if reps.Cmp(accumulatedCount) >= 0 { + panic("merkle tree overflow") + } + return accumulatedCount + } else { + return reps + } +} + +func buildMerkle(trees []Node, log2Size uint, stride *big.Int) *Tree { + size := new(big.Int).And( + new(big.Int).Lsh(one, log2Size), + overflowMask, + ) + + firstTime := new(big.Int).Add(new(big.Int).Mul(stride, size), one) + lastTime := new(big.Int).Mul(new(big.Int).Add(stride, one), size) + + firstCell := findCellContaining(trees, firstTime) + lastCell := findCellContaining(trees, lastTime) + + if firstCell == lastCell { + tree := trees[firstCell].Tree + iterated := tree.Iterated(uint64(log2Size)) + return iterated + } + + left := buildMerkle(trees[firstCell:(lastCell+1)], + log2Size-1, + new(big.Int).Lsh(stride, 1), + ) + + right := buildMerkle(trees[firstCell:(lastCell+1)], + log2Size-1, + new(big.Int).Add(new(big.Int).Lsh(stride, 1), one), + ) + + return left.Join(right) +} + +func findCellContaining(trees []Node, elem *big.Int) uint { + left := uint(0) + right := uint(len(trees) - 1) + + for left < right { + needle := left + (right-left)/2 + + x := new(big.Int).And( + new(big.Int).Sub(trees[needle].AccumulatedCount, one), + overflowMask, + ) + y := new(big.Int).And( + new(big.Int).Sub(elem, one), + overflowMask, + ) + if x.Cmp(y) < 0 { + left = needle + 1 + } else { + right = needle + } + } + return left +} + +//////////////////////////////////////////////////////////////////////////////// + +func isPow2(x *big.Int) bool { + if x.Sign() <= 0 { + return false + } + + // x & (x-1) == 0 + return new(big.Int).And( + x, + new(big.Int).Sub( + x, + one, + ), + ).Cmp(zero) == 0 +} + +func isCountPow2(x *big.Int) bool { + return x.Cmp(big.NewInt(0)) == 0 || isPow2(x) +} + +func countTrailingZeroes(x *big.Int) uint { + count := uint(0) + + // each byte from least to most significant +brk: + for _, b := range slices.Backward(x.Bytes()) { + for i := range 8 { + if b>>i&1 != 0 { + break brk + } + count++ + } + } + return count +} diff --git a/internal/merkle/builder_test.go b/internal/merkle/builder_test.go new file mode 100644 index 000000000..8d1249849 --- /dev/null +++ b/internal/merkle/builder_test.go @@ -0,0 +1,143 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package merkle + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +var ( + oneDigest = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") + zeroDigest = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") +) + +func TestIsCountPow2(t *testing.T) { + assert.True(t, isCountPow2(big.NewInt(0))) + assert.True(t, isCountPow2(big.NewInt(1))) + assert.True(t, isCountPow2(big.NewInt(2))) + assert.False(t, isCountPow2(big.NewInt(3))) + assert.True(t, isCountPow2(big.NewInt(4))) + assert.False(t, isCountPow2(big.NewInt(5))) +} + +// repanicked +//func TestRepeatZero(t *testing.T) { +// defer recover() +// +// builder := Builder{} +// builder.AppendRepeatedUint64(TreeLeaf(zeroHash), 0) +//} + +func TestSimple0(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(oneDigest)) + treeRoot := builder.Build().RootHash + expected := oneDigest + + assert.Equal(t, expected, treeRoot) +} + +func TestSimple1(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(zeroDigest)) + builder.Append(TreeLeaf(oneDigest)) + treeRoot := builder.Build().RootHash + + expected := TreeLeaf(zeroDigest).Join(TreeLeaf(oneDigest)).RootHash + + assert.Equal(t, expected, treeRoot) +} + +func TestSimple2(t *testing.T) { + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(oneDigest), 2) + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 2) + treeRoot := builder.Build().RootHash + + lhs := TreeLeaf(oneDigest).Join(TreeLeaf(oneDigest)) + rhs := TreeLeaf(zeroDigest).Join(TreeLeaf(zeroDigest)) + expected := lhs.Join(rhs).RootHash + + assert.Equal(t, expected, treeRoot) +} + +func TestSimple3(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(zeroDigest)) + builder.AppendRepeatedUint64(TreeLeaf(oneDigest), 2) + builder.Append(TreeLeaf(zeroDigest)) + treeRoot := builder.Build().RootHash + + lhs := TreeLeaf(zeroDigest).Join(TreeLeaf(oneDigest)) + rhs := TreeLeaf(oneDigest).Join(TreeLeaf(zeroDigest)) + expected := lhs.Join(rhs).RootHash + + assert.Equal(t, expected, treeRoot) +} + +func TestMerkleBuilder8(t *testing.T) { + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 2) + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 6) + assert.True(t, builder.CanBuild()) + + merkle := builder.Build() + assert.Equal(t, merkle.RootHash, TreeLeaf(zeroDigest).Iterated(3).RootHash) +} + +func TestMerkleBuilder64(t *testing.T) { + one := big.NewInt(1) + two := big.NewInt(2) + reps := new(big.Int).Sub(new(big.Int).Lsh(one, 64), two) + + builder := Builder{} + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 2) + builder.AppendRepeated(TreeLeaf(zeroDigest), reps) + assert.True(t, builder.CanBuild()) + + merkle := builder.Build() + assert.Equal(t, merkle.RootHash, TreeLeaf(zeroDigest).Iterated(64).RootHash) +} + +func TestMerkleBuilder256(t *testing.T) { + one := big.NewInt(1) + reps := new(big.Int).Lsh(one, 256) + + builder := Builder{} + builder.AppendRepeated(TreeLeaf(zeroDigest), reps) + assert.True(t, builder.CanBuild()) + + merkle := builder.Build() + assert.Equal(t, merkle.RootHash, TreeLeaf(zeroDigest).Iterated(256).RootHash) +} + +func TestAppendAndRepeated(t *testing.T) { + builder := Builder{} + builder.Append(TreeLeaf(zeroDigest)) + assert.True(t, builder.CanBuild()) + tree1 := builder.Build() + + builder = Builder{} + builder.AppendRepeatedUint64(TreeLeaf(zeroDigest), 1) + tree2 := builder.Build() + + assert.Equal(t, tree1, tree2) +} + +// repanicked +//func TestBuildNotPow2(t *testing.T) { +// defer recover() +// +// builder := Builder{} +// builder.Append(TreeLeaf(zeroDigest)) +// builder.Append(TreeLeaf(zeroDigest)) +// builder.Append(TreeLeaf(zeroDigest)) +// assert.False(t, builder.CanBuild()) +// +// builder.Build() +//} From 6c8f8d89639a33785520e5c0e42c3e780b7cffa0 Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Wed, 17 Dec 2025 15:24:16 -0300 Subject: [PATCH 5/8] feat: build PRT epoch commitment --- internal/advancer/advancer.go | 167 ++++++++------------------- internal/advancer/advancer_test.go | 25 +++- internal/advancer/service.go | 102 ++++++++++++++++ internal/inspect/inspect_test.go | 8 ++ internal/manager/instance.go | 61 ++++++++-- internal/manager/instance_test.go | 83 +++++-------- internal/manager/manager.go | 2 +- internal/manager/manager_test.go | 59 ++++++++-- internal/manager/types.go | 3 +- internal/validator/validator.go | 66 ++++++++++- internal/validator/validator_test.go | 6 + pkg/emulator/machine.go | 23 ++-- pkg/machine/backend.go | 20 +++- pkg/machine/backend_test.go | 37 ++++-- pkg/machine/implementation.go | 151 +++++++++++++++++------- pkg/machine/implementation_test.go | 90 ++++++--------- pkg/machine/libcartesi.go | 137 +++++++++++++++++++++- pkg/machine/libcartesi_test.go | 16 +-- pkg/machine/machine.go | 4 +- pkg/machine/machine_test.go | 28 +++-- test/validator/validator_test.go | 12 +- 21 files changed, 752 insertions(+), 348 deletions(-) create mode 100644 internal/advancer/service.go diff --git a/internal/advancer/advancer.go b/internal/advancer/advancer.go index 4287ee389..ddaf09970 100644 --- a/internal/advancer/advancer.go +++ b/internal/advancer/advancer.go @@ -7,17 +7,13 @@ import ( "context" "errors" "fmt" - "net/http" "os" "path" "strings" - "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/inspect" "github.com/cartesi/rollups-node/internal/manager" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" - "github.com/cartesi/rollups-node/pkg/service" ) var ( @@ -34,6 +30,7 @@ type AdvancerRepository interface { GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) StoreAdvanceResult(ctx context.Context, appID int64, ar *AdvanceResult) error UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) ([]uint64, error) + UpdateEpochCommitment(ctx context.Context, appID int64, epochIndex uint64, commitment []byte) error UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error GetEpoch(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) UpdateInputSnapshotURI(ctx context.Context, appId int64, inputIndex uint64, snapshotURI string) error @@ -41,92 +38,6 @@ type AdvancerRepository interface { GetLastProcessedInput(ctx context.Context, appAddress string) (*Input, error) } -// Service is the main advancer service that processes inputs through Cartesi machines -type Service struct { - service.Service - snapshotsDir string - repository AdvancerRepository - machineManager manager.MachineProvider - inspector *inspect.Inspector - HTTPServer *http.Server - HTTPServerFunc func() error -} - -// CreateInfo contains the configuration for creating an advancer service -type CreateInfo struct { - service.CreateInfo - Config config.AdvancerConfig - Repository repository.Repository -} - -// Create initializes a new advancer service -func Create(ctx context.Context, c *CreateInfo) (*Service, error) { - var err error - if err = ctx.Err(); err != nil { - return nil, err // This returns context.Canceled or context.DeadlineExceeded. - } - - s := &Service{} - c.Impl = s - - err = service.Create(ctx, &c.CreateInfo, &s.Service) - if err != nil { - return nil, err - } - - s.repository = c.Repository - if s.repository == nil { - return nil, fmt.Errorf("repository on advancer service Create is nil") - } - - // Create the machine manager - manager := manager.NewMachineManager( - ctx, - c.Repository, - s.Logger, - c.Config.FeatureMachineHashCheckEnabled, - ) - s.machineManager = manager - - // Initialize the inspect service if enabled - if c.Config.FeatureInspectEnabled { - s.inspector, s.HTTPServer, s.HTTPServerFunc = inspect.NewInspector( - c.Repository, - manager, - c.Config.InspectAddress, - c.LogLevel, - c.LogColor, - ) - } - - s.snapshotsDir = c.Config.SnapshotsDir - - return s, nil -} - -// Service interface implementation -func (s *Service) Alive() bool { return true } -func (s *Service) Ready() bool { return true } -func (s *Service) Reload() []error { return nil } -func (s *Service) Tick() []error { - if err := s.Step(s.Context); err != nil { - return []error{err} - } - return []error{} -} -func (s *Service) Stop(b bool) []error { - return nil -} -func (s *Service) Serve() error { - if s.inspector != nil && s.HTTPServerFunc != nil { - go s.HTTPServerFunc() - } - return s.Service.Serve() -} -func (s *Service) String() string { - return s.Name -} - // getUnprocessedInputs retrieves inputs that haven't been processed yet func getUnprocessedInputs(ctx context.Context, repo AdvancerRepository, appAddress string) ([]*Input, uint64, error) { f := repository.InputFilter{Status: Pointer(InputCompletionStatus_None)} @@ -212,7 +123,7 @@ func (s *Service) processInputs(ctx context.Context, app *Application, inputs [] "index", input.Index) // Advance the machine with this input - result, err := machine.Advance(ctx, input.RawData, input.Index) + result, err := machine.Advance(ctx, input.RawData, input.EpochIndex, input.Index, app.IsDaveConsensus()) if err != nil { // If there's an error, mark the application as inoperable s.Logger.Error("Error executing advance", @@ -235,14 +146,16 @@ func (s *Service) processInputs(ctx context.Context, app *Application, inputs [] return err } - + // log advance result hashes s.Logger.Info("Processing input finished", "application", app.Name, - "epoch", input.EpochIndex, - "index", input.Index, + "epoch", result.EpochIndex, + "index", result.InputIndex, "status", result.Status, "outputs", len(result.Outputs), "reports", len(result.Reports), + "hashes", len(result.Hashes), + "remaining_cycles", result.RemainingMetaCycles, ) // Store the result in the database @@ -271,13 +184,37 @@ func (s *Service) processInputs(ctx context.Context, app *Application, inputs [] return nil } -// handleEpochSnapshotAfterInputProcessed handles the snapshot creation after when an epoch is closed after an input was processed -func (s *Service) handleEpochSnapshotAfterInputProcessed(ctx context.Context, app *Application) error { - // Check if the application has a epoch snapshot policy - if app.ExecutionParameters.SnapshotPolicy != SnapshotPolicy_EveryEpoch { - return nil +func (s *Service) isEpochLastInput(ctx context.Context, app *Application, input *Input) (bool, error) { + if app == nil || input == nil { + return false, fmt.Errorf("application and input must not be nil") + } + // Get the epoch for this input + epoch, err := s.repository.GetEpoch(ctx, app.IApplicationAddress.String(), input.EpochIndex) + if err != nil { + return false, fmt.Errorf("failed to get epoch: %w", err) + } + + // Skip if the epoch is still open + if epoch.Status == EpochStatus_Open { + return false, nil } + // Check if this is the last input of the epoch + lastInput, err := s.repository.GetLastInput(ctx, app.IApplicationAddress.String(), input.EpochIndex) + if err != nil { + return false, fmt.Errorf("failed to get last input: %w", err) + } + + // If this is the last input and the epoch is closed, return true + if lastInput != nil && lastInput.Index == input.Index { + return true, nil + } + + return false, nil +} + +// handleEpochSnapshotAfterInputProcessed handles the snapshot creation after when an epoch is closed after an input was processed +func (s *Service) handleEpochSnapshotAfterInputProcessed(ctx context.Context, app *Application) error { // Get the machine instance for this application machine, exists := s.machineManager.GetMachine(app.ID) if !exists { @@ -290,12 +227,13 @@ func (s *Service) handleEpochSnapshotAfterInputProcessed(ctx context.Context, ap return fmt.Errorf("failed to get last input: %w", err) } - if lastProcessedInput == nil { - return nil + // Check if the application has a epoch snapshot policy + if lastProcessedInput != nil && app.ExecutionParameters.SnapshotPolicy == SnapshotPolicy_EveryEpoch { + // Handle the snapshot + return s.handleSnapshot(ctx, app, machine, lastProcessedInput) } - // Handle the snapshot - return s.handleSnapshot(ctx, app, machine, lastProcessedInput) + return nil } // handleSnapshot creates a snapshot based on the application's snapshot policy @@ -314,25 +252,12 @@ func (s *Service) handleSnapshot(ctx context.Context, app *Application, machine // For EVERY_EPOCH policy, check if this is the last input of the epoch if policy == SnapshotPolicy_EveryEpoch { - // Get the epoch for this input - epoch, err := s.repository.GetEpoch(ctx, app.IApplicationAddress.String(), input.EpochIndex) - if err != nil { - return fmt.Errorf("failed to get epoch: %w", err) - } - - // Skip if the epoch is still open - if epoch.Status == EpochStatus_Open { - return nil - } - - // Check if this is the last input of the epoch - lastInput, err := s.repository.GetLastInput(ctx, app.IApplicationAddress.String(), input.EpochIndex) + // If this is the last input and the epoch is closed, create a snapshot + isLastInput, err := s.isEpochLastInput(ctx, app, input) if err != nil { - return fmt.Errorf("failed to get last input: %w", err) + return err } - - // If this is the last input and the epoch is closed, create a snapshot - if lastInput != nil && lastInput.Index == input.Index { + if isLastInput { return s.createSnapshot(ctx, app, machine, input) } } @@ -364,7 +289,7 @@ func (s *Service) createSnapshot(ctx context.Context, app *Application, machine // Ensure the parent directory exists if _, err := os.Stat(s.snapshotsDir); os.IsNotExist(err) { - if err := os.MkdirAll(s.snapshotsDir, 0755); err != nil { // nolint: mnd + if err := os.MkdirAll(s.snapshotsDir, 0755); err != nil { //nolint: mnd return fmt.Errorf("failed to create snapshots directory: %w", err) } } diff --git a/internal/advancer/advancer_test.go b/internal/advancer/advancer_test.go index a994046fb..e3d291e01 100644 --- a/internal/advancer/advancer_test.go +++ b/internal/advancer/advancer_test.go @@ -513,6 +513,8 @@ func (mock *MockMachineImpl) Advance( ctx context.Context, input []byte, _ uint64, + _ uint64, + _ bool, ) (*AdvanceResult, error) { // If AdvanceBlock is true, block until context is canceled if mock.AdvanceBlock { @@ -605,8 +607,8 @@ type MockMachineInstance struct { } // Advance implements the MachineInstance interface for testing -func (m *MockMachineInstance) Advance(ctx context.Context, input []byte, index uint64) (*AdvanceResult, error) { - return m.machineImpl.Advance(ctx, input, index) +func (m *MockMachineInstance) Advance(ctx context.Context, input []byte, epochIndex uint64, index uint64, leafs bool) (*AdvanceResult, error) { + return m.machineImpl.Advance(ctx, input, epochIndex, index, leafs) } // Inspect implements the MachineInstance interface for testing @@ -632,6 +634,12 @@ func (m *MockMachineInstance) CreateSnapshot(ctx context.Context, processInputs return nil } +// Retrieves the hash of the current machine state +func (m *MockMachineInstance) Hash(ctx context.Context) ([32]byte, error) { + // Not used in advancer tests, but needed to satisfy the interface + return [32]byte{}, nil +} + // Close implements the MachineInstance interface for testing func (m *MockMachineInstance) Close() error { // Not used in advancer tests, but needed to satisfy the interface @@ -649,6 +657,7 @@ type MockRepository struct { UpdateApplicationStateError error UpdateEpochsError error UpdatedEpochs []uint64 + UpdateEpochCommitmentError error GetLastSnapshotReturn *Input GetLastSnapshotError error @@ -706,6 +715,15 @@ func (mock *MockRepository) StoreAdvanceResult( return mock.StoreAdvanceError } +func (mock *MockRepository) UpdateEpochCommitment(ctx context.Context, appID int64, epochIndex uint64, commitmen []byte) error { + // Check for context cancellation + if ctx.Err() != nil { + return ctx.Err() + } + + return mock.UpdateEpochCommitmentError +} + func (mock *MockRepository) UpdateEpochsInputsProcessed(ctx context.Context, nameOrAddress string) ([]uint64, error) { // Check for context cancellation if ctx.Err() != nil { @@ -849,14 +867,13 @@ func randomInputs(appId int64, epochIndex uint64, size int) []*Input { } func randomAdvanceResult(inputIndex uint64) *AdvanceResult { - hash := randomHash() res := &AdvanceResult{ InputIndex: inputIndex, Status: InputCompletionStatus_Accepted, Outputs: randomSliceOfBytes(), Reports: randomSliceOfBytes(), OutputsHash: randomHash(), - MachineHash: &hash, + MachineHash: randomHash(), } return res } diff --git a/internal/advancer/service.go b/internal/advancer/service.go new file mode 100644 index 000000000..1f8399331 --- /dev/null +++ b/internal/advancer/service.go @@ -0,0 +1,102 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package advancer + +import ( + "context" + "fmt" + "net/http" + + "github.com/cartesi/rollups-node/internal/config" + "github.com/cartesi/rollups-node/internal/inspect" + "github.com/cartesi/rollups-node/internal/manager" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/cartesi/rollups-node/pkg/service" +) + +// Service is the main advancer service that processes inputs through Cartesi machines +type Service struct { + service.Service + snapshotsDir string + repository AdvancerRepository + machineManager manager.MachineProvider + inspector *inspect.Inspector + HTTPServer *http.Server + HTTPServerFunc func() error +} + +// CreateInfo contains the configuration for creating an advancer service +type CreateInfo struct { + service.CreateInfo + Config config.AdvancerConfig + Repository repository.Repository +} + +// Create initializes a new advancer service +func Create(ctx context.Context, c *CreateInfo) (*Service, error) { + var err error + if err = ctx.Err(); err != nil { + return nil, err // This returns context.Canceled or context.DeadlineExceeded. + } + + s := &Service{} + c.Impl = s + + err = service.Create(ctx, &c.CreateInfo, &s.Service) + if err != nil { + return nil, err + } + + s.repository = c.Repository + if s.repository == nil { + return nil, fmt.Errorf("repository on advancer service Create is nil") + } + + // Create the machine manager + manager := manager.NewMachineManager( + ctx, + c.Repository, + s.Logger, + c.Config.FeatureMachineHashCheckEnabled, + ) + s.machineManager = manager + + // Initialize the inspect service if enabled + if c.Config.FeatureInspectEnabled { + s.inspector, s.HTTPServer, s.HTTPServerFunc = inspect.NewInspector( + c.Repository, + manager, + c.Config.InspectAddress, + c.LogLevel, + c.LogColor, + ) + } + + s.snapshotsDir = c.Config.SnapshotsDir + + return s, nil +} + +// Service interface implementation +func (s *Service) Alive() bool { return true } +func (s *Service) Ready() bool { return true } +func (s *Service) Reload() []error { return nil } +func (s *Service) Tick() []error { + if err := s.Step(s.Context); err != nil { + return []error{err} + } + return []error{} +} +func (s *Service) Stop(b bool) []error { + return nil +} +func (s *Service) Serve() error { + if s.inspector != nil && s.HTTPServerFunc != nil { + go s.HTTPServerFunc() + } + return s.Service.Serve() +} +func (s *Service) String() string { + return s.Name +} diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 5e55e234f..14caa8b99 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -219,6 +219,8 @@ func (mock *MockMachine) Advance( _ context.Context, input []byte, _ uint64, + _ uint64, + _ bool, ) (*AdvanceResult, error) { // Not used in inspect tests, but needed to satisfy the interface return nil, nil @@ -238,6 +240,12 @@ func (mock *MockMachine) CreateSnapshot(ctx context.Context, processedInputs uin return nil } +// Retrieves the hash of the current machine state +func (m *MockMachine) Hash(ctx context.Context) ([32]byte, error) { + // Not used in inspect tests, but needed to satisfy the interface + return [32]byte{}, nil +} + func (mock *MockMachine) Close() error { // Not used in inspect tests, but needed to satisfy the interface return nil diff --git a/internal/manager/instance.go b/internal/manager/instance.go index 2fac54d75..a25a8465f 100644 --- a/internal/manager/instance.go +++ b/internal/manager/instance.go @@ -5,6 +5,7 @@ package manager import ( "context" + "encoding/hex" "errors" "fmt" "log/slog" @@ -175,7 +176,7 @@ func (m *MachineInstanceImpl) Synchronize(ctx context.Context, repo MachineRepos "epoch_index", input.EpochIndex, "input_index", input.Index) - _, err := m.Advance(ctx, input.RawData, input.Index) + _, err := m.Advance(ctx, input.RawData, input.EpochIndex, input.Index, false) if err != nil { return fmt.Errorf("%w: failed to replay input %d: %v", ErrMachineSynchronization, input.Index, err) @@ -205,7 +206,7 @@ func (m *MachineInstanceImpl) forkForAdvance(ctx context.Context, index uint64) } // Advance processes an input and advances the machine state -func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index uint64) (*AdvanceResult, error) { +func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, epochIndex uint64, index uint64, computeHashes bool) (*AdvanceResult, error) { // Only one advance can be active at a time m.advanceMutex.Lock() defer m.advanceMutex.Unlock() @@ -234,8 +235,16 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u advanceCtx, cancel := context.WithTimeout(ctx, m.advanceTimeout) defer cancel() + if computeHashes { + // write the checkpoint hash before processing + err = fork.WriteCheckpointHash(advanceCtx, prevMachineHash) + if err != nil { + return nil, errors.Join(err, fork.Close()) + } + } + // Process the input - accepted, outputs, reports, outputsHash, err := fork.Advance(advanceCtx, input) + accepted, outputs, reports, hashes, remaining, outputsHash, err := fork.Advance(advanceCtx, input, computeHashes) status, err := toInputStatus(accepted, err) if err != nil { return nil, errors.Join(err, fork.Close()) @@ -243,11 +252,15 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u // Create the result result := &AdvanceResult{ - InputIndex: index, - Status: status, - Outputs: outputs, - Reports: reports, - OutputsHash: outputsHash, + EpochIndex: epochIndex, + InputIndex: index, + Status: status, + Outputs: outputs, + Reports: reports, + Hashes: hashes, + RemainingMetaCycles: remaining, + OutputsHash: outputsHash, + IsDaveConsensus: computeHashes, } // If the input was accepted, update the machine state @@ -257,7 +270,7 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u if err != nil { return nil, errors.Join(err, fork.Close()) } - result.MachineHash = (*common.Hash)(&machineHash) + result.MachineHash = machineHash // Replace the current machine with the fork m.mutex.HLock() @@ -270,7 +283,7 @@ func (m *MachineInstanceImpl) Advance(ctx context.Context, input []byte, index u m.mutex.Unlock() } else { // Use the previous state for rejected inputs - result.MachineHash = (*common.Hash)(&prevMachineHash) + result.MachineHash = prevMachineHash result.OutputsHash = prevOutputsHash // Close the fork since we're not using it @@ -384,6 +397,34 @@ func (m *MachineInstanceImpl) CreateSnapshot(ctx context.Context, processedInput return nil } +func (m *MachineInstanceImpl) Hash(ctx context.Context) ([32]byte, error) { + // Acquire the advance mutex to ensure no advance operations are in progress + m.advanceMutex.Lock() + defer m.advanceMutex.Unlock() + + // Acquire a read lock on the machine + m.mutex.LLock() + defer m.mutex.Unlock() + + if m.runtime == nil { + return [32]byte{}, ErrMachineClosed + } + + m.logger.Debug("Retrieving machine root hash") + + storeCtx, cancel := context.WithTimeout(ctx, m.application.ExecutionParameters.LoadDeadline) + defer cancel() + + hash, err := m.runtime.Hash(storeCtx) + if err != nil { + m.logger.Error("Failed to retrieve machine root hash", "error", err) + return [32]byte{}, err + } + + m.logger.Debug("Machine root hash retrieved successfully", "hash", "0x"+hex.EncodeToString(hash[:])) + return hash, nil +} + // Close shuts down the machine instance func (m *MachineInstanceImpl) Close() error { // Acquire all locks to ensure no operations are in progress diff --git a/internal/manager/instance_test.go b/internal/manager/instance_test.go index 169763820..def41dfb8 100644 --- a/internal/manager/instance_test.go +++ b/internal/manager/instance_test.go @@ -210,7 +210,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require := s.Require() _, fork, machine := s.setupAdvance() - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Nil(err) require.NotNil(res) @@ -219,7 +219,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require.Equal(expectedOutputs, res.Outputs) require.Equal(expectedReports1, res.Reports) require.Equal(newHash(1), res.OutputsHash) - require.Equal(newHash(2), *res.MachineHash) + require.Equal(newHash(2), res.MachineHash) require.Equal(uint64(6), machine.processedInputs) }) @@ -229,7 +229,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceAcceptedReturn = false fork.CloseError = nil - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Nil(err) require.NotNil(res) @@ -238,7 +238,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require.Equal(expectedOutputs, res.Outputs) require.Equal(expectedReports1, res.Reports) require.Equal(newHash(1), res.OutputsHash) - require.Equal(newHash(2), *res.MachineHash) + require.Equal(newHash(2), res.MachineHash) require.Equal(uint64(6), machine.processedInputs) }) @@ -249,7 +249,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceError = err fork.CloseError, inner.CloseError = inner.CloseError, fork.CloseError - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Nil(err) require.NotNil(res) @@ -257,7 +257,7 @@ func (s *MachineInstanceSuite) TestAdvance() { require.Equal(expectedOutputs, res.Outputs) require.Equal(expectedReports1, res.Reports) require.Equal(newHash(1), res.OutputsHash) - require.Equal(newHash(2), *res.MachineHash) + require.Equal(newHash(2), res.MachineHash) require.Equal(uint64(6), machine.processedInputs) }) } @@ -294,7 +294,7 @@ func (s *MachineInstanceSuite) TestAdvance() { errFork := errors.New("Fork error") inner.ForkError = errFork - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.Equal(errFork, err) @@ -308,7 +308,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceError = errAdvance fork.CloseError, inner.CloseError = inner.CloseError, fork.CloseError - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errAdvance) @@ -325,7 +325,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.CloseError = errClose inner.CloseError = nil - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errAdvance) @@ -341,7 +341,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.HashError = errHash fork.CloseError, inner.CloseError = inner.CloseError, fork.CloseError - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errHash) @@ -358,7 +358,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.CloseError = errClose inner.CloseError = nil - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errHash) @@ -374,7 +374,7 @@ func (s *MachineInstanceSuite) TestAdvance() { errClose := errors.New("Close error") inner.CloseError = errClose - res, err := machine.Advance(context.Background(), []byte{}, 5) + res, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Nil(res) require.ErrorIs(err, errClose) @@ -389,7 +389,7 @@ func (s *MachineInstanceSuite) TestAdvance() { fork.AdvanceError = machine.ErrException fork.CloseError = errClose - res, err := machineInst.Advance(context.Background(), []byte{}, 5) + res, err := machineInst.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.NotNil(res) require.ErrorIs(err, errClose) @@ -601,7 +601,7 @@ func (s *MachineInstanceSuite) TestClose() { time.Sleep(centisecond / 2) // This should block until Close is done - _, err := machine.Advance(context.Background(), []byte{}, 5) + _, err := machine.Advance(context.Background(), []byte{}, 0, 5, false) require.Error(err) require.Equal(ErrMachineClosed, err) }() @@ -622,37 +622,6 @@ func (s *MachineInstanceSuite) TestClose() { // ------------------------------------------------------------------------------------------------ -// MockMachineInstance implements the MachineInstance interface for testing -type MockMachineInstance struct { - application *model.Application -} - -func (m *MockMachineInstance) Application() *model.Application { - return m.application -} - -func (m *MockMachineInstance) Advance(ctx context.Context, input []byte, index uint64) (*model.AdvanceResult, error) { - return nil, nil -} - -func (m *MockMachineInstance) Inspect(ctx context.Context, query []byte) (*model.InspectResult, error) { - return nil, nil -} - -func (m *MockMachineInstance) Synchronize(ctx context.Context, repo MachineRepository) error { - return nil -} - -func (m *MockMachineInstance) CreateSnapshot(ctx context.Context, processedInputs uint64, path string) error { - return nil -} - -func (m *MockMachineInstance) Close() error { - return nil -} - -// ------------------------------------------------------------------------------------------------ - var ( errUnreachable = errors.New("unreachable") expectedOutputs = []machine.Output{ @@ -803,11 +772,15 @@ type MockRollupsMachine struct { HashReturn machine.Hash HashError error - AdvanceAcceptedReturn bool - AdvanceOutputsReturn []machine.Output - AdvanceReportsReturn []machine.Report - AdvanceHashReturn machine.Hash - AdvanceError error + CheckpointHashError error + + AdvanceAcceptedReturn bool + AdvanceOutputsReturn []machine.Output + AdvanceReportsReturn []machine.Report + AdvanceLeafsReturn []machine.Hash + AdvanceRemainingReturn uint64 + AdvanceHashReturn machine.Hash + AdvanceError error InspectAcceptedReturn bool InspectReportsReturn []machine.Report @@ -830,12 +803,18 @@ func (m *MockRollupsMachine) OutputsHash(_ context.Context) (machine.Hash, error return m.AdvanceHashReturn, m.HashError } -func (m *MockRollupsMachine) Advance(_ context.Context, input []byte) ( - bool, []machine.Output, []machine.Report, machine.Hash, error, +func (m *MockRollupsMachine) WriteCheckpointHash(_ context.Context, _ machine.Hash) error { + return m.CheckpointHashError +} + +func (m *MockRollupsMachine) Advance(_ context.Context, input []byte, leafs bool) ( + bool, []machine.Output, []machine.Report, []machine.Hash, uint64, machine.Hash, error, ) { return m.AdvanceAcceptedReturn, m.AdvanceOutputsReturn, m.AdvanceReportsReturn, + m.AdvanceLeafsReturn, + m.AdvanceRemainingReturn, m.AdvanceHashReturn, m.AdvanceError } diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 5d17a2d23..29379a8cf 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -132,7 +132,7 @@ func (m *MachineManager) UpdateMachines(ctx context.Context) error { "epoch_index", input.EpochIndex, "input_index", input.Index) - _, err := instance.Advance(ctx, input.RawData, input.Index) + _, err := instance.Advance(ctx, input.RawData, input.EpochIndex, input.Index, false) if err != nil { m.logger.Error("Failed to replay input after snapshot", "application", app.Name, diff --git a/internal/manager/manager_test.go b/internal/manager/manager_test.go index a016fb9a7..45ecafd16 100644 --- a/internal/manager/manager_test.go +++ b/internal/manager/manager_test.go @@ -103,9 +103,9 @@ func (s *MachineManagerSuite) TestUpdateMachines() { app2 := &model.Application{ID: 2, Name: "App2"} app3 := &model.Application{ID: 3, Name: "App3"} - mockMachine1 := &MockMachineInstance{application: app1} - mockMachine2 := &MockMachineInstance{application: app2} - mockMachine3 := &MockMachineInstance{application: app3} + mockMachine1 := &DummyMachineInstanceMock{application: app1} + mockMachine2 := &DummyMachineInstanceMock{application: app2} + mockMachine3 := &DummyMachineInstanceMock{application: app3} manager.addMachine(1, mockMachine1) manager.addMachine(2, mockMachine2) @@ -130,7 +130,7 @@ func (s *MachineManagerSuite) TestGetMachine() { Return(nil, nil) manager := NewMachineManager(context.Background(), repo, nil, false) - machine := &MockMachineInstance{application: &model.Application{ID: 1}} + machine := &DummyMachineInstanceMock{application: &model.Application{ID: 1}} // Add a machine manager.addMachine(1, machine) @@ -153,7 +153,7 @@ func (s *MachineManagerSuite) TestHasMachine() { Return(nil, nil) manager := NewMachineManager(context.Background(), repo, nil, false) - machine := &MockMachineInstance{application: &model.Application{ID: 1}} + machine := &DummyMachineInstanceMock{application: &model.Application{ID: 1}} // Add a machine manager.addMachine(1, machine) @@ -173,8 +173,8 @@ func (s *MachineManagerSuite) TestAddMachine() { Return(nil, nil) manager := NewMachineManager(context.Background(), repo, nil, false) - machine1 := &MockMachineInstance{application: &model.Application{ID: 1}} - machine2 := &MockMachineInstance{application: &model.Application{ID: 2}} + machine1 := &DummyMachineInstanceMock{application: &model.Application{ID: 1}} + machine2 := &DummyMachineInstanceMock{application: &model.Application{ID: 2}} // Add first machine added := manager.addMachine(1, machine1) @@ -202,9 +202,9 @@ func (s *MachineManagerSuite) TestRemoveDisabledMachines() { app2 := &model.Application{ID: 2} app3 := &model.Application{ID: 3} - machine1 := &MockMachineInstance{application: app1} - machine2 := &MockMachineInstance{application: app2} - machine3 := &MockMachineInstance{application: app3} + machine1 := &DummyMachineInstanceMock{application: app1} + machine2 := &DummyMachineInstanceMock{application: app2} + machine3 := &DummyMachineInstanceMock{application: app3} manager.addMachine(1, machine1) manager.addMachine(2, machine2) @@ -233,8 +233,8 @@ func (s *MachineManagerSuite) TestApplications() { app1 := &model.Application{ID: 1, Name: "App1"} app2 := &model.Application{ID: 2, Name: "App2"} - machine1 := &MockMachineInstance{application: app1} - machine2 := &MockMachineInstance{application: app2} + machine1 := &DummyMachineInstanceMock{application: app1} + machine2 := &DummyMachineInstanceMock{application: app2} manager.addMachine(1, machine1) manager.addMachine(2, machine2) @@ -290,3 +290,38 @@ func (m *MockMachineRepository) GetLastSnapshot( } return args.Get(0).(*model.Input), args.Error(1) } + +// ------------------------------------------------------------------------------------------------ + +// DummyMachineInstanceMock implements the MachineInstance interface for testing +type DummyMachineInstanceMock struct { + application *model.Application +} + +func (m *DummyMachineInstanceMock) Application() *model.Application { + return m.application +} + +func (m *DummyMachineInstanceMock) Advance(_ context.Context, _ []byte, _ uint64, _ uint64, _ bool) (*model.AdvanceResult, error) { + return nil, nil +} + +func (m *DummyMachineInstanceMock) Inspect(_ context.Context, _ []byte) (*model.InspectResult, error) { + return nil, nil +} + +func (m *DummyMachineInstanceMock) Synchronize(_ context.Context, _ MachineRepository) error { + return nil +} + +func (m *DummyMachineInstanceMock) CreateSnapshot(_ context.Context, _ uint64, _ string) error { + return nil +} + +func (m *DummyMachineInstanceMock) Hash(_ context.Context) ([32]byte, error) { + return [32]byte{}, nil +} + +func (m *DummyMachineInstanceMock) Close() error { + return nil +} diff --git a/internal/manager/types.go b/internal/manager/types.go index c6d71bdec..a2f2dcd28 100644 --- a/internal/manager/types.go +++ b/internal/manager/types.go @@ -12,10 +12,11 @@ import ( // MachineInstance defines the interface for a machine instance type MachineInstance interface { Application() *Application - Advance(ctx context.Context, input []byte, index uint64) (*AdvanceResult, error) + Advance(ctx context.Context, input []byte, epochIndex uint64, inputIndex uint64, computeHashes bool) (*AdvanceResult, error) Inspect(ctx context.Context, query []byte) (*InspectResult, error) Synchronize(ctx context.Context, repo MachineRepository) error CreateSnapshot(ctx context.Context, processedInputs uint64, path string) error + Hash(ctx context.Context) ([32]byte, error) Close() error } diff --git a/internal/validator/validator.go b/internal/validator/validator.go index 0c527f127..05c84748d 100644 --- a/internal/validator/validator.go +++ b/internal/validator/validator.go @@ -10,13 +10,15 @@ import ( "errors" "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/merkle" . "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" + pkgm "github.com/cartesi/rollups-node/pkg/machine" "github.com/cartesi/rollups-node/pkg/service" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" ) type Service struct { @@ -103,6 +105,7 @@ type ValidatorRepository interface { GetLastInput(ctx context.Context, appAddress string, epochIndex uint64) (*Input, error) // FIXME migrate to list GetEpochByVirtualIndex(ctx context.Context, nameOrAddress string, index uint64) (*Epoch, error) StoreClaimAndProofs(ctx context.Context, epoch *Epoch, outputs []*Output) error + ListStateHashes(ctx context.Context, nameOrAddress string, f repository.StateHashFilter, p repository.Pagination, descending bool) ([]*StateHash, uint64, error) } func getAllRunningApplications(ctx context.Context, er ValidatorRepository) ([]*Application, uint64, error) { @@ -210,10 +213,17 @@ func (v *Service) validateApplication(ctx context.Context, app *Application) err epoch.MachineHash = &app.TemplateHash } } + epoch.ClaimHash = claim + + commitment, err := v.buildCommitment(ctx, app, epoch) + if err != nil { + return fmt.Errorf("failed to compute commitment for epoch %v (%v) of application %v. %w", + epoch.Index, epoch.VirtualIndex, appAddress, err, + ) + } + epoch.Commitment = commitment - // update the epoch status and its claim epoch.Status = EpochStatus_ClaimComputed - epoch.ClaimHash = claim // store the epoch and proofs in the database err = v.repository.StoreClaimAndProofs(ctx, epoch, outputs) @@ -234,6 +244,54 @@ func (v *Service) validateApplication(ctx context.Context, app *Application) err return nil } +func (s *Service) buildCommitment(ctx context.Context, app *Application, epoch *Epoch) (*common.Hash, error) { + if app == nil || epoch == nil { + return nil, fmt.Errorf("application or epoch is nil") + } + if !app.IsDaveConsensus() { + return nil, nil + } + s.Logger.Debug("DaveConsensus: Building commitment for epoch", + "application", app.Name, + "epoch", epoch.Index) + + builder := merkle.Builder{} + inputCount := epoch.InputIndexUpperBound - epoch.InputIndexLowerBound + if inputCount > 0 { + statesHashes, total, err := s.repository.ListStateHashes(ctx, app.IApplicationAddress.String(), + repository.StateHashFilter{EpochIndex: &epoch.Index}, repository.Pagination{}, false) + if err != nil { + return nil, fmt.Errorf("failed to list state hashes for epoch %d of application %s: %w", + epoch.Index, app.Name, err) + } + if total < inputCount { + return nil, fmt.Errorf("not enough state hashes for epoch %d of application %s: expected at least %d, got %d", + epoch.Index, app.Name, inputCount, total) + } + if uint64(len(statesHashes)) != total { + return nil, fmt.Errorf("inconsistent number of state hashes for epoch %d of application %s: expected %d, got %d", epoch.Index, app.Name, total, len(statesHashes)) + } + for _, stateHash := range statesHashes { + builder.AppendRepeatedUint64(merkle.TreeLeaf(stateHash.MachineHash), stateHash.Repetitions) + } + } + + remainingInputs := pkgm.InputsPerEpoch - inputCount + remainingStrides := remainingInputs << pkgm.Log2StridesPerInput + if remainingStrides > 0 { + builder.AppendRepeatedUint64(merkle.TreeLeaf(*epoch.MachineHash), remainingStrides) + } + + epochCommitmentTree := builder.Build() + commitment := epochCommitmentTree.GetRootHash() + s.Logger.Info("DaveConsensus: Epoch commitment built", + "application", app.Name, + "epoch", epoch.Index, + "commitment", commitment.String()) + return &commitment, nil + +} + // createClaimAndProofs calculates the claim and proofs for an epoch. It returns // the claim and the epoch outputs updated with their hash and proofs. In case // the epoch has no outputs, there are no proofs and it returns the pristine diff --git a/internal/validator/validator_test.go b/internal/validator/validator_test.go index 731790518..815ed7d0c 100644 --- a/internal/validator/validator_test.go +++ b/internal/validator/validator_test.go @@ -573,6 +573,12 @@ func (m *Mockrepo) StoreClaimAndProofs(ctx context.Context, epoch *Epoch, output return args.Error(0) } +func (m *Mockrepo) ListStateHashes(ctx context.Context, nameOrAddress string, + f repository.StateHashFilter, p repository.Pagination, descending bool) ([]*StateHash, uint64, error) { + args := m.Called(ctx, nameOrAddress, f, p, descending) + return args.Get(0).([]*StateHash), args.Get(1).(uint64), args.Error(2) +} + func (m *Mockrepo) UpdateApplicationState(ctx context.Context, appID int64, state ApplicationState, reason *string) error { args := m.Called(ctx, appID, state, reason) return args.Error(0) diff --git a/pkg/emulator/machine.go b/pkg/emulator/machine.go index 54b2d24fa..7fb624d43 100644 --- a/pkg/emulator/machine.go +++ b/pkg/emulator/machine.go @@ -7,6 +7,7 @@ package emulator // #include +// #include // #include "cartesi-machine/machine-c-api.h" import "C" @@ -16,6 +17,11 @@ import ( "unsafe" ) +const HashSize = C.sizeof_cm_hash + +// Common type aliases +type Hash = [HashSize]byte + // ----------------------------------------------------------------------------- // Machine Methods // ----------------------------------------------------------------------------- @@ -171,22 +177,19 @@ func (m *Machine) GetRegAddress(reg RegID) (uint64, error) { } // get_root_hash -func (m *Machine) GetRootHash() ([]byte, error) { - var hash C.cm_hash +func (m *Machine) GetRootHash() (Hash, error) { + var cHash C.cm_hash var err error - var result []byte m.callCAPI(func() { - err = newError(C.cm_get_root_hash(m.ptr, &hash)) - if err == nil { - result = C.GoBytes(unsafe.Pointer(&hash), 32) - } + err = newError(C.cm_get_root_hash(m.ptr, &cHash)) }) - if err != nil { - return nil, err + return Hash{}, err } - return result, nil + + // Zero-copy: reinterpret C array as Go array + return *(*Hash)(unsafe.Pointer(&cHash)), nil } // get_runtime_config diff --git a/pkg/machine/backend.go b/pkg/machine/backend.go index 7e880be64..236ae9e4d 100644 --- a/pkg/machine/backend.go +++ b/pkg/machine/backend.go @@ -3,7 +3,10 @@ package machine -import "time" +import ( + "encoding/json" + "time" +) type BreakReason int32 @@ -16,6 +19,15 @@ const ( ReachedTargetMcycle BreakReason = 0x5 ) +type HashCollectorState struct { + Period uint64 + Phase uint64 + MaxHashes uint64 + BundleLog2 int32 + Hashes []Hash + BackTree json.RawMessage +} + // This Backend interface covers the methods used from the emulator / remote machine server. // It is to abstract the emulator package and allow for easier testing and mocking in unit tests. type Backend interface { @@ -23,6 +35,8 @@ type Backend interface { Store(directory string, timeout time.Duration) error Run(mcycleEnd uint64, timeout time.Duration) (BreakReason, error) + RunAndCollectRootHashes(mcycleEnd uint64, state *HashCollectorState, timeout time.Duration, + ) (reason BreakReason, err error) IsAtManualYield(timeout time.Duration) (bool, error) ReadMCycle(timeout time.Duration) (uint64, error) @@ -30,7 +44,9 @@ type Backend interface { SendCmioResponse(reason uint16, data []byte, timeout time.Duration) error ReceiveCmioRequest(timeout time.Duration) (cmd uint8, reason uint16, data []byte, err error) - GetRootHash(timeout time.Duration) ([]byte, error) + WriteMemory(address uint64, data []byte, timeout time.Duration) error + + GetRootHash(timeout time.Duration) (Hash, error) Delete() ForkServer(timeout time.Duration) (Backend, string, uint32, error) diff --git a/pkg/machine/backend_test.go b/pkg/machine/backend_test.go index 806ea2dbb..cd8d73175 100644 --- a/pkg/machine/backend_test.go +++ b/pkg/machine/backend_test.go @@ -50,9 +50,14 @@ func (m *MockBackend) ReceiveCmioRequest(timeout time.Duration) (uint8, uint16, return args.Get(0).(uint8), args.Get(1).(uint16), args.Get(2).([]byte), args.Error(3) } -func (m *MockBackend) GetRootHash(timeout time.Duration) ([]byte, error) { +func (m *MockBackend) GetRootHash(timeout time.Duration) (Hash, error) { args := m.Called(timeout) - return args.Get(0).([]byte), args.Error(1) + return args.Get(0).(Hash), args.Error(1) +} + +func (m *MockBackend) WriteMemory(address uint64, data []byte, timeout time.Duration) error { + args := m.Called(address, data, timeout) + return args.Error(0) } func (m *MockBackend) Delete() { @@ -79,31 +84,38 @@ func (m *MockBackend) CmioRxBufferSize() uint64 { return args.Get(0).(uint64) } +func (m *MockBackend) RunAndCollectRootHashes(mcycleEnd uint64, state *HashCollectorState, timeout time.Duration) (reason BreakReason, err error) { + args := m.Called(mcycleEnd, state, timeout) + return args.Get(0).(BreakReason), args.Error(1) +} + // Helper functions for setting up common mock scenarios -func randomFakeHash() []byte { - data := make([]byte, HashSize) - _, _ = rand.Read(data) - return data +func randomFakeHash() Hash { + hash := Hash{} + _, _ = rand.Read(hash[:]) + return hash } // SetupAccepted configures the mock for a successful advance/inspect operation func (m *MockBackend) SetupAccepted(reqType requestType) { + hash := randomFakeHash() m.On("SendCmioResponse", uint16(reqType), mock.Anything, mock.AnythingOfType("time.Duration")).Return(nil) m.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil) m.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(0), nil) m.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonAccepted), hash[:], nil) m.On("CmioRxBufferSize").Return(uint64(1024)) } // SetupRejected configures the mock for a rejected advance/inspect operation func (m *MockBackend) SetupRejected(reqType requestType) { + hash := randomFakeHash() m.On("SendCmioResponse", uint16(reqType), mock.Anything, mock.AnythingOfType("time.Duration")).Return(nil) m.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil) m.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(0), nil) m.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonRejected), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonRejected), hash[:], nil) m.On("CmioRxBufferSize").Return(uint64(1024)) } @@ -119,11 +131,12 @@ func (m *MockBackend) SetupException(reqType requestType) { // SetupForLoad configures the mock for successful machine loading func (m *MockBackend) SetupForLoad() { + hash := randomFakeHash() m.On("NewMachineRuntimeConfig").Return(`{"concurrency":{"update_merkle_tree":1}}`, nil).Once() m.On("Load", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("time.Duration")).Return(nil).Once() m.On("IsAtManualYield", mock.AnythingOfType("time.Duration")).Return(true, nil).Once() m.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), make([]byte, HashSize), nil).Once() + uint8(0), uint16(ManualYieldReasonAccepted), hash[:], nil).Once() } // SetupForCleanup configures the mock for cleanup operations @@ -148,7 +161,7 @@ func (m *MockBackend) SetupNotAtManualYield() { } // SetupForHash configures the mock for successful hash retrieval -func (m *MockBackend) SetupForHash(hash []byte) { +func (m *MockBackend) SetupForHash(hash Hash) { m.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return(hash, nil) } @@ -159,14 +172,14 @@ func NewMockBackend() *MockBackend { // MockBackendFactory creates a backend factory that returns the provided mock func MockBackendFactory(backend *MockBackend) BackendFactory { - return func(address string, timeout time.Duration) (Backend, string, uint32, error) { + return func(_ string, _ time.Duration) (Backend, string, uint32, error) { return backend, "127.0.0.1:12345", 12345, nil } } // FailingMockBackendFactory creates a backend factory that always fails func FailingMockBackendFactory(err error) BackendFactory { - return func(address string, timeout time.Duration) (Backend, string, uint32, error) { + return func(_ string, _ time.Duration) (Backend, string, uint32, error) { return nil, "", 0, err } } diff --git a/pkg/machine/implementation.go b/pkg/machine/implementation.go index 8350d784e..eaa3d079f 100644 --- a/pkg/machine/implementation.go +++ b/pkg/machine/implementation.go @@ -47,6 +47,34 @@ const ( // Constants const maxOutputs = 65536 // 2^16 +const CheckpointAddress uint64 = 0x7ffff000 + +const ( + // log2 value of the maximal number of micro instructions that emulates a big instruction + Log2UarchSpanToBarch uint64 = 20 + // log2 value of the maximal number of big instructions that executes an input + Log2BarchSpanToInput uint64 = 48 + // log2 value of the maximal number of inputs that allowed in an epoch + Log2InputSpanToEpoch uint64 = 24 + // gap of each leaf in the commitment tree, should use the same value as ArbitrationConstants.sol:log2step(0) + Log2Stride uint64 = 44 + // log2 value of the maximal number of micro instructions that executes an input + Log2UarchSpanToInput uint64 = Log2BarchSpanToInput + Log2UarchSpanToBarch // 68 + + UarchSpanToBarch uint64 = (1 << Log2UarchSpanToBarch) - 1 // 1_048_575 + BarchSpanToInput uint64 = (1 << Log2BarchSpanToInput) - 1 // 281_474_976_710_655 + InputSpanToEpoch uint64 = (1 << Log2InputSpanToEpoch) - 1 // 16_777_215 + + BigStepsInStride uint64 = 1 << (Log2Stride - Log2UarchSpanToBarch) // 16_777_216 + StrideCountInInput uint64 = 1 << (Log2BarchSpanToInput + Log2UarchSpanToBarch - Log2Stride) // 16_777_216 + + StrideCountInEpoch uint64 = 1 << (Log2InputSpanToEpoch + Log2BarchSpanToInput + Log2UarchSpanToBarch - Log2Stride) + + Log2StridesPerInput uint64 = Log2BarchSpanToInput + Log2UarchSpanToBarch - Log2Stride + + InputsPerEpoch uint64 = 1 << Log2InputSpanToEpoch +) + // machineImpl implements the Machine interface by wrapping an emulator.RemoteMachine type machineImpl struct { backend Backend @@ -83,23 +111,16 @@ func (m *machineImpl) Fork(ctx context.Context) (Machine, error) { // Hash returns the machine's merkle tree root hash func (m *machineImpl) Hash(ctx context.Context) (Hash, error) { - hash := Hash{} if err := checkContext(ctx); err != nil { - return hash, err + return Hash{}, err } - hashSlice, err := m.backend.GetRootHash(m.params.LoadDeadline) + hash, err := m.backend.GetRootHash(m.params.LoadDeadline) if err != nil { err := fmt.Errorf("could not get the machine's root hash: %w", err) return hash, errors.Join(ErrMachineInternal, err) } - if len(hashSlice) != HashSize { - err := fmt.Errorf("invalid machine root hash length: expected 32 bytes, got %d bytes", len(hashSlice)) - return hash, errors.Join(ErrMachineInternal, err) - } - - copy(hash[:], hashSlice) return hash, nil } @@ -126,30 +147,42 @@ func (m *machineImpl) OutputsHash(ctx context.Context) (Hash, error) { return outputsHash, nil } +func (m *machineImpl) WriteCheckpointHash(ctx context.Context, hash Hash) error { + if err := checkContext(ctx); err != nil { + return err + } + + err := m.backend.WriteMemory(CheckpointAddress, hash[:], m.params.FastDeadline) + if err != nil { + err := fmt.Errorf("could not write checkpoint hash in to machine memory: %w", err) + return errors.Join(ErrMachineInternal, err) + } + return nil +} + // Advance sends an input to the machine and processes it -func (m *machineImpl) Advance(ctx context.Context, input []byte) (bool, []Output, []Report, Hash, error) { +func (m *machineImpl) Advance(ctx context.Context, input []byte, computeHashes bool) (bool, []Output, []Report, []Hash, uint64, Hash, error) { outputsHash := Hash{} - // TODO: return the exception reason - accepted, outputs, reports, data, err := m.process(ctx, input, AdvanceStateRequest) + accepted, outputs, reports, hashes, remaining, data, err := m.process(ctx, input, AdvanceStateRequest, computeHashes) if err != nil { - return accepted, outputs, reports, outputsHash, err + return accepted, outputs, reports, hashes, remaining, outputsHash, err } if accepted { if length := len(data); length != HashSize { err = fmt.Errorf("%w (it has %d bytes)", ErrHashLength, length) - return accepted, outputs, reports, outputsHash, err + return accepted, outputs, reports, hashes, remaining, outputsHash, err } copy(outputsHash[:], data) } - return accepted, outputs, reports, outputsHash, nil + return accepted, outputs, reports, hashes, remaining, outputsHash, nil } // Inspect sends a query to the machine and returns the results func (m *machineImpl) Inspect(ctx context.Context, query []byte) (bool, []Report, error) { // TODO: return the exception reason - accepted, _, reports, _, err := m.process(ctx, query, InspectStateRequest) + accepted, _, reports, _, _, _, err := m.process(ctx, query, InspectStateRequest, false) return accepted, reports, err } @@ -255,41 +288,50 @@ func (m *machineImpl) process( ctx context.Context, request []byte, reqType requestType, -) (bool, []Output, []Report, []byte, error) { + computeHashes bool, +) (bool, []Output, []Report, []Hash, uint64, []byte, error) { if err := checkContext(ctx); err != nil { - return false, nil, nil, nil, err + return false, nil, nil, nil, 0, nil, err } // Check payload length limit if length := uint64(len(request)); length > m.backend.CmioRxBufferSize() { - return false, nil, nil, nil, ErrPayloadLengthLimitExceeded + return false, nil, nil, nil, 0, nil, ErrPayloadLengthLimitExceeded } err := m.backend.SendCmioResponse(uint16(reqType), request, m.params.FastDeadline) if err != nil { - return false, nil, nil, nil, err + return false, nil, nil, nil, 0, nil, err } - outputs, reports, err := m.run(ctx, reqType) + outputs, reports, hashes, remaining, err := m.run(ctx, reqType, computeHashes) if err != nil { - return false, outputs, reports, nil, err + return false, outputs, reports, nil, 0, nil, err } accepted, data, err := m.wasLastRequestAccepted(ctx) - return accepted, outputs, reports, data, err + return accepted, outputs, reports, hashes, remaining, data, err } // run runs the machine until it manually yields. // It returns any collected responses. -func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, []Report, error) { +func (m *machineImpl) run(ctx context.Context, reqType requestType, computeHashes bool) ([]Output, []Report, []Hash, uint64, error) { startTime := time.Now() currentCycle, err := m.readMCycle(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, 0, err } limitCycle := currentCycle + m.params.AdvanceMaxCycles + stepTimeout := m.params.AdvanceIncDeadline + runTimeout := m.params.AdvanceMaxDeadline + if reqType == InspectStateRequest { + limitCycle = currentCycle + m.params.InspectMaxCycles + stepTimeout = m.params.InspectIncDeadline + runTimeout = m.params.InspectMaxDeadline + } + m.logger.Debug("run", "startingCycle", currentCycle, "limitCycle", limitCycle, @@ -298,11 +340,27 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ outputs := []Output{} reports := []Report{} - stepTimeout := m.params.AdvanceIncDeadline - runTimeout := m.params.AdvanceMaxDeadline - if reqType == InspectStateRequest { - stepTimeout = m.params.InspectIncDeadline - runTimeout = m.params.InspectMaxDeadline + var hashCollectorState *HashCollectorState + if computeHashes { + hashCollectorState = &HashCollectorState{ + Period: BigStepsInStride, + Phase: 0, + MaxHashes: 0, + BundleLog2: 0, + Hashes: []Hash{}, + } + } + hashes := func() []Hash { + if computeHashes { + return hashCollectorState.Hashes + } + return []Hash{} + } + remainingMetaCycles := func() uint64 { + if computeHashes { + return StrideCountInInput - uint64(len(hashCollectorState.Hashes)) + } + return 0 } for { @@ -312,17 +370,18 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ // Steps the machine as many times as needed until it manually/automatically yields. for yt == nil { if time.Since(startTime) > runTimeout { - return outputs, reports, fmt.Errorf("run operation timed out: %w", ErrDeadlineExceeded) + werr := fmt.Errorf("run operation timed out: %w", ErrDeadlineExceeded) + return outputs, reports, hashes(), remainingMetaCycles(), werr } - yt, currentCycle, err = m.step(ctx, currentCycle, limitCycle, stepTimeout) + yt, currentCycle, err = m.runIncrementInterval(ctx, currentCycle, limitCycle, hashCollectorState, stepTimeout) if err != nil && err != ErrReachedTargetMcycle { - return outputs, reports, err + return outputs, reports, hashes(), remainingMetaCycles(), err } } // Returns with the responses when the machine manually yields. if *yt == ManualYield { - return outputs, reports, nil + return outputs, reports, hashes(), remainingMetaCycles(), nil } // Asserts the machine yielded automatically. @@ -333,7 +392,8 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ _, yieldReason, data, err := m.backend.ReceiveCmioRequest(m.params.FastDeadline) if err != nil { - return outputs, reports, fmt.Errorf("could not read output/report: %w", err) + werr := fmt.Errorf("could not read output/report: %w", err) + return outputs, reports, hashes(), remainingMetaCycles(), werr } switch automaticYieldReason(yieldReason) { @@ -342,7 +402,7 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ case AutomaticYieldReasonOutput: // TODO: should we remove this? if len(outputs) == maxOutputs { - return outputs, reports, ErrOutputsLimitExceeded + return outputs, reports, hashes(), remainingMetaCycles(), ErrOutputsLimitExceeded } outputs = append(outputs, data) case AutomaticYieldReasonReport: @@ -353,14 +413,15 @@ func (m *machineImpl) run(ctx context.Context, reqType requestType) ([]Output, [ } } -// step runs the machine for at most machine.inc cycles (or the amount of cycles left to reach +// runIncrementInterval runs the machine for at most machine.inc cycles (or the amount of cycles left to reach // limitCycle, whichever is the lowest). -// It returns the yield type and the machine cycle after the step. -// If the machine did not manually/automatically yield, the yield type will be nil (meaning step +// It returns the yield type and the machine cycle after the increment interval. +// If the machine did not manually/automatically yield, the yield type will be nil (meaning runIncrementInterval // must be called again to complete the computation). -func (m *machineImpl) step(ctx context.Context, +func (m *machineImpl) runIncrementInterval(ctx context.Context, currentCycle Cycle, limitCycle Cycle, + hashCollectorState *HashCollectorState, timeout time.Duration, ) (*yieldType, Cycle, error) { startingCycle := currentCycle @@ -376,7 +437,7 @@ func (m *machineImpl) step(ctx context.Context, m.logger.Debug("machine step before run", "currentCycle", currentCycle, "increment", increment) // Runs the machine. - breakReason, err := m.backend.Run(currentCycle+increment, timeout) + breakReason, err := m.backend_run(currentCycle+increment, hashCollectorState, timeout) if err != nil { return nil, 0, err } @@ -414,6 +475,14 @@ func (m *machineImpl) step(ctx context.Context, } } +func (m *machineImpl) backend_run(mcycleEnd uint64, hashCollectorState *HashCollectorState, timeout time.Duration) (BreakReason, error) { + if hashCollectorState != nil { + m.logger.Debug("Running with root hash collection") + return m.backend.RunAndCollectRootHashes(mcycleEnd, hashCollectorState, timeout) + } + return m.backend.Run(mcycleEnd, timeout) +} + // Helper functions func checkContext(ctx context.Context) error { diff --git a/pkg/machine/implementation_test.go b/pkg/machine/implementation_test.go index d38a78889..dae8aa1de 100644 --- a/pkg/machine/implementation_test.go +++ b/pkg/machine/implementation_test.go @@ -102,12 +102,12 @@ func (s *ImplementationSuite) TestHash() { hash, err := machine.Hash(ctx) require.NoError(err) - require.Equal(expectedHash, hash[:]) + require.Equal(expectedHash, hash) mockBackend.AssertExpectations(s.T()) // Test hash with backend error mockBackend2 := NewMockBackend() - mockBackend2.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return(([]byte)(nil), errors.New("hash failed")) + mockBackend2.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return((Hash)(Hash{}), errors.New("hash failed")) machine2 := &machineImpl{ backend: mockBackend2, logger: s.logger, @@ -121,27 +121,11 @@ func (s *ImplementationSuite) TestHash() { require.Contains(err.Error(), "could not get the machine's root hash") mockBackend2.AssertExpectations(s.T()) - // Test hash with invalid length - mockBackend3 := NewMockBackend() - mockBackend3.On("GetRootHash", mock.AnythingOfType("time.Duration")).Return(make([]byte, 16), nil) // Invalid length - machine3 := &machineImpl{ - backend: mockBackend3, - logger: s.logger, - params: model.ExecutionParameters{ - LoadDeadline: time.Second * 5, - }, - } - _, err = machine3.Hash(ctx) - require.Error(err) - require.ErrorIs(err, ErrMachineInternal) - require.Contains(err.Error(), "invalid machine root hash length") - // Test hash with canceled context canceledCtx, cancel := context.WithCancel(ctx) cancel() _, err = machine.Hash(canceledCtx) require.ErrorIs(err, ErrCanceled) - mockBackend3.AssertExpectations(s.T()) } // Test OutputsHash method @@ -153,7 +137,7 @@ func (s *ImplementationSuite) TestOutputsHash() { mockBackend := NewMockBackend() expectedHash := randomFakeHash() mockBackend.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), expectedHash, nil) + uint8(0), uint16(ManualYieldReasonAccepted), expectedHash[:], nil) machine := &machineImpl{ backend: mockBackend, @@ -165,7 +149,7 @@ func (s *ImplementationSuite) TestOutputsHash() { hash, err := machine.OutputsHash(ctx) require.NoError(err) - require.Equal(expectedHash, hash[:]) + require.Equal(expectedHash, hash) mockBackend.AssertExpectations(s.T()) // Test outputs hash with rejected request @@ -239,7 +223,7 @@ func (s *ImplementationSuite) TestAdvance() { } input := []byte("test input") - accepted, outputs, reports, hash, err := machine.Advance(ctx, input) + accepted, outputs, reports, _, _, hash, err := machine.Advance(ctx, input, false) require.NoError(err) require.True(accepted) require.Empty(outputs) @@ -261,7 +245,7 @@ func (s *ImplementationSuite) TestAdvance() { AdvanceMaxDeadline: time.Second * 10, }, } - accepted, outputs, reports, hash, err = machine2.Advance(ctx, input) + accepted, outputs, reports, _, _, hash, err = machine2.Advance(ctx, input, false) require.NoError(err) require.False(accepted) require.Empty(outputs) @@ -283,7 +267,7 @@ func (s *ImplementationSuite) TestAdvance() { AdvanceMaxDeadline: time.Second * 10, }, } - accepted, outputs, reports, hash, err = machine3.Advance(ctx, input) + accepted, outputs, reports, _, _, hash, err = machine3.Advance(ctx, input, false) require.ErrorIs(err, ErrException) require.False(accepted) require.Equal(Hash{}, hash) @@ -304,7 +288,7 @@ func (s *ImplementationSuite) TestAdvance() { }, } largeInput := make([]byte, 10) - _, _, _, _, err = machine4.Advance(ctx, largeInput) + _, _, _, _, _, _, err = machine4.Advance(ctx, largeInput, false) require.ErrorIs(err, ErrPayloadLengthLimitExceeded) mockBackend4.AssertExpectations(s.T()) @@ -327,7 +311,7 @@ func (s *ImplementationSuite) TestAdvance() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine5.Advance(ctx, input) + _, _, _, _, _, _, err = machine5.Advance(ctx, input, false) require.Error(err) require.ErrorIs(err, ErrHashLength) mockBackend5.AssertExpectations(s.T()) @@ -560,8 +544,9 @@ func (s *ImplementationSuite) TestHelperMethods() { // Test wasLastRequestAccepted mockBackend3 := NewMockBackend() + expectedHash3 := randomFakeHash() mockBackend3.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonAccepted), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonAccepted), expectedHash3[:], nil) machine3 := &machineImpl{ backend: mockBackend3, logger: s.logger, @@ -576,8 +561,9 @@ func (s *ImplementationSuite) TestHelperMethods() { mockBackend3.AssertExpectations(s.T()) mockBackend4 := NewMockBackend() + expectedHash4 := randomFakeHash() mockBackend4.On("ReceiveCmioRequest", mock.AnythingOfType("time.Duration")).Return( - uint8(0), uint16(ManualYieldReasonRejected), randomFakeHash(), nil) + uint8(0), uint16(ManualYieldReasonRejected), expectedHash4[:], nil) machine4 := &machineImpl{ backend: mockBackend4, logger: s.logger, @@ -672,7 +658,7 @@ func (s *ImplementationSuite) TestRun() { }, } - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Empty(outputs) require.Empty(reports) @@ -692,7 +678,7 @@ func (s *ImplementationSuite) TestRun() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, err = machine2.run(ctx, AdvanceStateRequest) + _, _, _, _, err = machine2.run(ctx, AdvanceStateRequest, false) require.Error(err) require.Contains(err.Error(), "read cycle failed") mockBackend2.AssertExpectations(s.T()) @@ -715,7 +701,7 @@ func (s *ImplementationSuite) TestRun() { }, } - _, _, err = machine3.run(ctx, AdvanceStateRequest) + _, _, _, _, err = machine3.run(ctx, AdvanceStateRequest, false) require.NoError(err) mockBackend3.AssertExpectations(s.T()) @@ -740,87 +726,87 @@ func (s *ImplementationSuite) TestStep() { mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(150), nil) machine.backend = mockBackend - yieldType, cycle, err := machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err := machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.NoError(err) require.NotNil(yieldType) require.Equal(ManualYield, *yieldType) require.Equal(uint64(150), cycle) mockBackend.AssertExpectations(s.T()) - // Test step with automatic yield + // Test runIncrementInterval with automatic yield mockBackend2 := NewMockBackend() mockBackend2.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedAutomatically, nil) mockBackend2.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(200), nil) machine.backend = mockBackend2 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.NoError(err) require.NotNil(yieldType) require.Equal(AutomaticYield, *yieldType) require.Equal(uint64(200), cycle) mockBackend2.AssertExpectations(s.T()) - // Test step with soft yield (no yield) + // Test runIncrementInterval with soft yield (no yield) mockBackend3 := NewMockBackend() mockBackend3.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedSoftly, nil) mockBackend3.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(150), nil) machine.backend = mockBackend3 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.NoError(err) require.Nil(yieldType) require.Equal(uint64(150), cycle) mockBackend3.AssertExpectations(s.T()) - // Test step with reached target mcycle + // Test runIncrementInterval with reached target mcycle mockBackend4 := NewMockBackend() mockBackend4.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(ReachedTargetMcycle, nil) mockBackend4.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(1000), nil) machine.backend = mockBackend4 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.ErrorIs(err, ErrReachedTargetMcycle) require.Nil(yieldType) require.Equal(uint64(1000), cycle) mockBackend4.AssertExpectations(s.T()) - // Test step with halted + // Test runIncrementInterval with halted mockBackend5 := NewMockBackend() mockBackend5.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(Halted, nil) mockBackend5.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(500), nil) machine.backend = mockBackend5 - yieldType, cycle, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, cycle, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.ErrorIs(err, ErrHalted) require.Nil(yieldType) require.Equal(uint64(500), cycle) - // Test step already at limit cycle - yieldType, cycle, err = machine.step(ctx, 1000, 1000, time.Second) + // Test runIncrementInterval already at limit cycle + yieldType, cycle, err = machine.runIncrementInterval(ctx, 1000, 1000, nil, time.Second) require.ErrorIs(err, ErrReachedLimitMcycle) require.Nil(yieldType) require.Equal(uint64(0), cycle) mockBackend5.AssertExpectations(s.T()) - // Test step with backend run error + // Test runIncrementInterval with backend run error mockBackend6 := NewMockBackend() mockBackend6.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration"), ).Return(BreakReason(0), errors.New("run failed")) machine.backend = mockBackend6 - yieldType, _, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, _, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.Error(err) require.Contains(err.Error(), "run failed") require.Nil(yieldType) mockBackend6.AssertExpectations(s.T()) - // Test step with read cycle error + // Test runIncrementInterval with read cycle error mockBackend7 := NewMockBackend() mockBackend7.On("Run", mock.AnythingOfType("uint64"), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil) mockBackend7.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(0), errors.New("read cycle failed")) machine.backend = mockBackend7 - yieldType, _, err = machine.step(ctx, 100, 1000, time.Second) + yieldType, _, err = machine.runIncrementInterval(ctx, 100, 1000, nil, time.Second) require.Error(err) require.Contains(err.Error(), "read cycle failed") require.Nil(yieldType) @@ -854,7 +840,7 @@ func (s *ImplementationSuite) TestProcess() { } input := []byte("test input") - accepted, outputs, reports, data, err := machine.process(ctx, input, AdvanceStateRequest) + accepted, outputs, reports, _, _, data, err := machine.process(ctx, input, AdvanceStateRequest, false) require.NoError(err) require.True(accepted) require.Empty(outputs) @@ -876,7 +862,7 @@ func (s *ImplementationSuite) TestProcess() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine2.process(ctx, input, AdvanceStateRequest) + _, _, _, _, _, _, err = machine2.process(ctx, input, AdvanceStateRequest, false) require.ErrorIs(err, ErrPayloadLengthLimitExceeded) mockBackend2.AssertExpectations(s.T()) @@ -899,7 +885,7 @@ func (s *ImplementationSuite) TestProcess() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine3.process(ctx, input, AdvanceStateRequest) + _, _, _, _, _, _, err = machine3.process(ctx, input, AdvanceStateRequest, false) require.Error(err) require.Contains(err.Error(), "send failed") mockBackend3.AssertExpectations(s.T()) @@ -920,7 +906,7 @@ func (s *ImplementationSuite) TestProcess() { AdvanceMaxDeadline: time.Second * 10, }, } - _, _, _, _, err = machine4.process(ctx, input, AdvanceStateRequest) + _, _, _, _, _, _, err = machine4.process(ctx, input, AdvanceStateRequest, false) require.Error(err) require.Contains(err.Error(), "read cycle failed") mockBackend4.AssertExpectations(s.T()) @@ -955,7 +941,7 @@ func (s *ImplementationSuite) TestRunWithAutomaticYields() { mockBackend.On("Run", uint64(150), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil).Once() mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(100), nil).Once() - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Len(outputs, 1) require.Equal([]byte("test output"), outputs[0]) @@ -993,7 +979,7 @@ func (s *ImplementationSuite) TestRunWithAutomaticYieldsReports() { mockBackend.On("Run", uint64(150), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil).Once() mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(100), nil).Once() - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Empty(outputs) require.Len(reports, 1) @@ -1057,7 +1043,7 @@ func (s *ImplementationSuite) TestMultipleAutomaticYields() { mockBackend.On("Run", uint64(150), mock.AnythingOfType("time.Duration")).Return(YieldedManually, nil).Once() mockBackend.On("ReadMCycle", mock.AnythingOfType("time.Duration")).Return(uint64(60), nil).Once() - outputs, reports, err := machine.run(ctx, AdvanceStateRequest) + outputs, reports, _, _, err := machine.run(ctx, AdvanceStateRequest, false) require.NoError(err) require.Len(outputs, 2) diff --git a/pkg/machine/libcartesi.go b/pkg/machine/libcartesi.go index aadee3b43..4416f1b92 100644 --- a/pkg/machine/libcartesi.go +++ b/pkg/machine/libcartesi.go @@ -5,6 +5,7 @@ package machine import ( "encoding/json" + "errors" "fmt" "time" @@ -16,10 +17,11 @@ type RemoteMachineInterface interface { SetTimeout(timeoutMs int64) error Load(dir string, runtimeConfig string) error Run(mcycleEnd uint64) (emulator.BreakReason, error) - GetRootHash() ([]byte, error) + GetRootHash() (emulator.Hash, error) ReadReg(reg emulator.RegID) (uint64, error) SendCmioResponse(reason uint16, data []byte) error ReceiveCmioRequest() (uint8, uint16, []byte, error) + WriteMemory(address uint64, data []byte) error Store(directory string) error Delete() ForkServer() (*emulator.RemoteMachine, string, uint32, error) @@ -54,9 +56,9 @@ func (e *LibCartesiBackend) Run(mcycleEnd uint64, timeout time.Duration) (BreakR return BreakReason(br), err } -func (e *LibCartesiBackend) GetRootHash(timeout time.Duration) ([]byte, error) { +func (e *LibCartesiBackend) GetRootHash(timeout time.Duration) (Hash, error) { if err := e.inner.SetTimeout(timeout.Milliseconds()); err != nil { - return nil, fmt.Errorf("failed to set operation timeout: %w", err) + return Hash{}, fmt.Errorf("failed to set operation timeout: %w", err) } return e.inner.GetRootHash() } @@ -104,6 +106,13 @@ func (e *LibCartesiBackend) Store(directory string, timeout time.Duration) error return e.inner.Store(directory) } +func (e *LibCartesiBackend) WriteMemory(address uint64, data []byte, timeout time.Duration) error { + if err := e.inner.SetTimeout(timeout.Milliseconds()); err != nil { + return fmt.Errorf("failed to set operation timeout: %w", err) + } + return e.inner.WriteMemory(address, data) +} + func (e *LibCartesiBackend) Delete() { e.inner.Delete() } @@ -138,3 +147,125 @@ func (e *LibCartesiBackend) NewMachineRuntimeConfig() (string, error) { func (e *LibCartesiBackend) CmioRxBufferSize() uint64 { return 1 << emulator.CmioRxBufferLog2Size } + +func (e *LibCartesiBackend) RunAndCollectRootHashes( + mcycleEnd uint64, + state *HashCollectorState, + timeout time.Duration, +) (reason BreakReason, err error) { + if state == nil { + return Failed, errors.New("nil state") + } + if state.Period == 0 { + return Failed, errors.New("State.Period must be > 0") + } + + // Set up timeout management: calculate absolute deadline if timeout is specified + var deadline time.Time + hasDeadline := timeout > 0 + if hasDeadline { + deadline = time.Now().Add(timeout) + } + remaining := func() time.Duration { + if !hasDeadline { + return 0 + } + d := time.Until(deadline) + if d <= 0 { + return time.Nanosecond + } + return d + } + checkDeadline := func() error { + if hasDeadline && time.Now().After(deadline) { + return errors.New("runWithRootHashes: deadline exceeded") + } + return nil + } + + if err := checkDeadline(); err != nil { + return Failed, err + } + cur, err := e.ReadMCycle(remaining()) + if err != nil { + return Failed, err + } + + collected := (uint64)(0) + + for { + if err := checkDeadline(); err != nil { + return Failed, err + } + if cur >= mcycleEnd { + // No more cycles to execute + return ReachedTargetMcycle, nil + } + + // Calculate the next collection point: distance to the next multiple of the period + // This ensures we collect hashes at regular intervals aligned with the period + var step uint64 + if r := state.Phase % state.Period; r == 0 { + step = state.Period + } else { + step = state.Period - r + } + + nextHashCycle := cur + step + target := min(nextHashCycle, mcycleEnd) + + // Run the machine until target cycle or until it yields/halts + br, err := e.Run(target, remaining()) + if err != nil { + return Failed, err + } + + // Check where we stopped after the run + if err := checkDeadline(); err != nil { + return Failed, err + } + pos, err := e.ReadMCycle(remaining()) + if err != nil { + return Failed, err + } + + advanced := pos - cur + state.Phase = (state.Phase + advanced) % state.Period + cur = pos + + // Only collect hash if we reached the exact boundary (pos == nextHashCycle) + // This ensures "hash after each complete period", matching the C API behavior + // and avoiding duplicate collections if the machine stops early due to yields + if pos == nextHashCycle { + if err := checkDeadline(); err != nil { + return Failed, err + } + h, err := e.GetRootHash(remaining()) + if err != nil { + return Failed, err + } + + state.Hashes = append(state.Hashes, h) + + collected++ + if state.MaxHashes > 0 && collected >= state.MaxHashes { + return YieldedSoftly, nil + } + } + + switch br { + case ReachedTargetMcycle: + if cur >= mcycleEnd { + return ReachedTargetMcycle, nil + } + case YieldedManually: + return br, nil + case YieldedAutomatically, YieldedSoftly, Halted: + return br, nil + case Failed: + return Failed, errors.New("run failed") + default: + return Failed, errors.New("unknown break reason") + } + } +} diff --git a/pkg/machine/libcartesi_test.go b/pkg/machine/libcartesi_test.go index 9b21b6953..4dd008db4 100644 --- a/pkg/machine/libcartesi_test.go +++ b/pkg/machine/libcartesi_test.go @@ -98,10 +98,7 @@ func (s *LibCartesiSuite) TestRun() { func (s *LibCartesiSuite) TestGetRootHash() { require := s.Require() - expectedHash := make([]byte, 32) - for i := range expectedHash { - expectedHash[i] = byte(i) - } + expectedHash := randomFakeHash() // Test successful get root hash s.mockRemoteMachine.On("SetTimeout", int64(5000)).Return(nil) @@ -126,7 +123,7 @@ func (s *LibCartesiSuite) TestGetRootHash() { s.mockRemoteMachine = new(MockRemoteMachine) s.backend = &LibCartesiBackend{inner: s.mockRemoteMachine} s.mockRemoteMachine.On("SetTimeout", int64(5000)).Return(nil) - s.mockRemoteMachine.On("GetRootHash").Return([]byte(nil), errors.New("hash error")) + s.mockRemoteMachine.On("GetRootHash").Return(Hash{}, errors.New("hash error")) _, err = s.backend.GetRootHash(5 * time.Second) require.Error(err) @@ -437,9 +434,9 @@ func (m *MockRemoteMachine) Run(mcycleEnd uint64) (emulator.BreakReason, error) return args.Get(0).(emulator.BreakReason), args.Error(1) } -func (m *MockRemoteMachine) GetRootHash() ([]byte, error) { +func (m *MockRemoteMachine) GetRootHash() (emulator.Hash, error) { args := m.Called() - return args.Get(0).([]byte), args.Error(1) + return args.Get(0).(Hash), args.Error(1) } func (m *MockRemoteMachine) ReadReg(reg emulator.RegID) (uint64, error) { @@ -462,6 +459,11 @@ func (m *MockRemoteMachine) Store(directory string) error { return args.Error(0) } +func (m *MockRemoteMachine) WriteMemory(address uint64, data []byte) error { + args := m.Called(address, data) + return args.Error(0) +} + func (m *MockRemoteMachine) Delete() { m.Called() } diff --git a/pkg/machine/machine.go b/pkg/machine/machine.go index 45e6d1735..28625b6ff 100644 --- a/pkg/machine/machine.go +++ b/pkg/machine/machine.go @@ -54,12 +54,14 @@ type Machine interface { Hash(ctx context.Context) (Hash, error) // OutputsHash returns the outputs hash stored in the cmio tx buffer. OutputsHash(ctx context.Context) (Hash, error) + // WriteCheckpointHash writes the given checkpoint hash to the machine memory. + WriteCheckpointHash(ctx context.Context, hash Hash) error // Advance sends an input to the machine. // It returns a boolean indicating whether or not the request was accepted. // It also returns the corresponding outputs, reports, and the hash of the outputs. // In case the request is not accepted, the function does not return outputs. - Advance(ctx context.Context, input []byte) (bool, []Output, []Report, Hash, error) + Advance(ctx context.Context, input []byte, computeHashes bool) (bool, []Output, []Report, []Hash, uint64, Hash, error) // Inspect sends a query to the machine. // It returns a boolean indicating whether or not the request was accepted diff --git a/pkg/machine/machine_test.go b/pkg/machine/machine_test.go index 5325e2ec2..3da297b78 100644 --- a/pkg/machine/machine_test.go +++ b/pkg/machine/machine_test.go @@ -214,7 +214,7 @@ func (s *MachineSuite) TestMachineInterface() { require.Equal(Hash{6, 7, 8, 9, 10}, outputsHash) // Test Advance - accepted, outputs, reports, advanceHash, err := machine.Advance(ctx, []byte("input")) + accepted, outputs, reports, _, _, advanceHash, err := machine.Advance(ctx, []byte("input"), false) require.NoError(err) require.True(accepted) require.Len(outputs, 2) @@ -279,7 +279,7 @@ func (s *MachineSuite) TestMachineInterfaceErrors() { require.Contains(err.Error(), "outputs hash error") // Test Advance error - _, _, _, _, err = machine.Advance(ctx, []byte("input")) + _, _, _, _, _, _, err = machine.Advance(ctx, []byte("input"), false) require.Error(err) require.Contains(err.Error(), "advance error") @@ -310,11 +310,15 @@ type MockMachine struct { OutputsHashReturn Hash OutputsHashError error - AdvanceAcceptedReturn bool - AdvanceOutputsReturn []Output - AdvanceReportsReturn []Report - AdvanceHashReturn Hash - AdvanceError error + CheckpointHashError error + + AdvanceAcceptedReturn bool + AdvanceOutputsReturn []Output + AdvanceReportsReturn []Report + AdvanceHashesReturn []Hash + AdvanceRemainingReturn uint64 + AdvanceHashReturn Hash + AdvanceError error InspectAcceptedReturn bool InspectReportsReturn []Report @@ -339,12 +343,18 @@ func (m *MockMachine) OutputsHash(_ context.Context) (Hash, error) { return m.OutputsHashReturn, m.OutputsHashError } -func (m *MockMachine) Advance(_ context.Context, _ []byte) ( - bool, []Output, []Report, Hash, error, +func (m *MockMachine) WriteCheckpointHash(_ context.Context, _ Hash) error { + return m.CheckpointHashError +} + +func (m *MockMachine) Advance(_ context.Context, _ []byte, leafs bool) ( + bool, []Output, []Report, []Hash, uint64, Hash, error, ) { return m.AdvanceAcceptedReturn, m.AdvanceOutputsReturn, m.AdvanceReportsReturn, + m.AdvanceHashesReturn, + m.AdvanceRemainingReturn, m.AdvanceHashReturn, m.AdvanceError } diff --git a/test/validator/validator_test.go b/test/validator/validator_test.go index d32661b8f..61f113623 100644 --- a/test/validator/validator_test.go +++ b/test/validator/validator_test.go @@ -124,7 +124,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPristineClaim() { InputIndex: input.Index, Status: model.InputCompletionStatus_Accepted, OutputsHash: pristineRootHash, - MachineHash: &machinehash1, + MachineHash: machinehash1, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -215,7 +215,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { InputIndex: firstEpochInput.Index, Status: model.InputCompletionStatus_Accepted, OutputsHash: firstEpochClaim, - MachineHash: &machinehash1, + MachineHash: machinehash1, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -231,7 +231,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsPreviousClaim() { // since there are no new outputs in the second epoch, // the machine OutputsHash will remain the same OutputsHash: firstEpochClaim, - MachineHash: &machinehash2, + MachineHash: machinehash2, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -311,7 +311,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() Status: model.InputCompletionStatus_Accepted, OutputsHash: expectedClaim, Outputs: [][]byte{outputRawData}, - MachineHash: &machinehash1, + MachineHash: machinehash1, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -401,7 +401,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() Status: model.InputCompletionStatus_Accepted, OutputsHash: firstEpochClaim, Outputs: [][]byte{firstOutputData}, - MachineHash: &machinehash1, + MachineHash: machinehash1, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) @@ -452,7 +452,7 @@ func (s *ValidatorRepositoryIntegrationSuite) TestItReturnsANewClaimAndProofs() Status: model.InputCompletionStatus_Accepted, OutputsHash: expectedEpochClaim, Outputs: [][]byte{secondOutputData}, - MachineHash: &machinehash2, + MachineHash: machinehash2, } err = s.repository.StoreAdvanceResult(s.ctx, 1, &advanceResult) s.Require().Nil(err) From 1942f9d9e3bbb67b119261e46859c1c4017e1e7b Mon Sep 17 00:00:00 2001 From: Marcelo Politzer <251334+mpolitzer@users.noreply.github.com> Date: Wed, 17 Dec 2025 12:24:28 -0300 Subject: [PATCH 6/8] fix: type of time values is Duration --- internal/config/generate/Config.toml | 2 +- internal/config/generated.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/config/generate/Config.toml b/internal/config/generate/Config.toml index 7d81c8f5f..3bbedcfdd 100644 --- a/internal/config/generate/Config.toml +++ b/internal/config/generate/Config.toml @@ -152,7 +152,7 @@ used-by = ["evmreader", "claimer", "node", "prt"] [blockchain.CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT] default = "60" -go-type = "uint64" +go-type = "Duration" description = """ Block subscription timeout in seconds.""" used-by = ["evmreader", "node"] diff --git a/internal/config/generated.go b/internal/config/generated.go index 7d0030ba8..19fee96a0 100644 --- a/internal/config/generated.go +++ b/internal/config/generated.go @@ -500,7 +500,7 @@ type EvmreaderConfig struct { BlockchainId uint64 `mapstructure:"CARTESI_BLOCKCHAIN_ID"` // Block subscription timeout in seconds. - BlockchainSubscriptionTimeout uint64 `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` + BlockchainSubscriptionTimeout Duration `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` // WebSocket endpoint for the blockchain RPC provider. BlockchainWsEndpoint URL `mapstructure:"CARTESI_BLOCKCHAIN_WS_ENDPOINT"` @@ -775,7 +775,7 @@ type NodeConfig struct { BlockchainLegacyEnabled bool `mapstructure:"CARTESI_BLOCKCHAIN_LEGACY_ENABLED"` // Block subscription timeout in seconds. - BlockchainSubscriptionTimeout uint64 `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` + BlockchainSubscriptionTimeout Duration `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` // WebSocket endpoint for the blockchain RPC provider. BlockchainWsEndpoint URL `mapstructure:"CARTESI_BLOCKCHAIN_WS_ENDPOINT"` @@ -1634,16 +1634,16 @@ func GetBlockchainLegacyEnabled() (bool, error) { } // GetBlockchainSubscriptionTimeout returns the value for the environment variable CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT. -func GetBlockchainSubscriptionTimeout() (uint64, error) { +func GetBlockchainSubscriptionTimeout() (Duration, error) { s := viper.GetString(BLOCKCHAIN_SUBSCRIPTION_TIMEOUT) if s != "" { - v, err := toUint64(s) + v, err := toDuration(s) if err != nil { return v, fmt.Errorf("failed to parse %s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, err) } return v, nil } - return notDefineduint64(), fmt.Errorf("%s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, ErrNotDefined) + return notDefinedDuration(), fmt.Errorf("%s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, ErrNotDefined) } // GetBlockchainWsEndpoint returns the value for the environment variable CARTESI_BLOCKCHAIN_WS_ENDPOINT. From cf77fafee1e958b46b310a54e3a24b379527e2f1 Mon Sep 17 00:00:00 2001 From: Marcelo Politzer <251334+mpolitzer@users.noreply.github.com> Date: Wed, 17 Dec 2025 13:10:39 -0300 Subject: [PATCH 7/8] fix: respect max_retries and subscription_timeout - fixed evm-reader `Run`. It was not respecting CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT and CARTESI_BLOCKCHAIN_HTTP_MAX_RETRIES options. - Propagate `node` Context and Cancel function to services. --- internal/config/generate/Config.toml | 7 --- internal/config/generated.go | 65 ++++++---------------------- internal/evmreader/evmreader.go | 25 ++++++++--- internal/evmreader/evmreader_test.go | 16 ++++--- internal/evmreader/output_test.go | 16 ++++--- internal/evmreader/service.go | 26 +++++++---- internal/node/node.go | 12 +++++ pkg/service/service.go | 7 ++- 8 files changed, 86 insertions(+), 88 deletions(-) diff --git a/internal/config/generate/Config.toml b/internal/config/generate/Config.toml index 3bbedcfdd..c19a3386e 100644 --- a/internal/config/generate/Config.toml +++ b/internal/config/generate/Config.toml @@ -150,13 +150,6 @@ The default block to be used by EVM Reader and Claimer when requesting new block One of 'latest', 'pending', 'safe', 'finalized'""" used-by = ["evmreader", "claimer", "node", "prt"] -[blockchain.CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT] -default = "60" -go-type = "Duration" -description = """ -Block subscription timeout in seconds.""" -used-by = ["evmreader", "node"] - [rollups.CARTESI_BLOCKCHAIN_HTTP_MAX_RETRIES] default = "4" go-type = "uint64" diff --git a/internal/config/generated.go b/internal/config/generated.go index 19fee96a0..443217d51 100644 --- a/internal/config/generated.go +++ b/internal/config/generated.go @@ -33,7 +33,6 @@ const ( BLOCKCHAIN_HTTP_ENDPOINT = "CARTESI_BLOCKCHAIN_HTTP_ENDPOINT" BLOCKCHAIN_ID = "CARTESI_BLOCKCHAIN_ID" BLOCKCHAIN_LEGACY_ENABLED = "CARTESI_BLOCKCHAIN_LEGACY_ENABLED" - BLOCKCHAIN_SUBSCRIPTION_TIMEOUT = "CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT" BLOCKCHAIN_WS_ENDPOINT = "CARTESI_BLOCKCHAIN_WS_ENDPOINT" CONTRACTS_APPLICATION_FACTORY_ADDRESS = "CARTESI_CONTRACTS_APPLICATION_FACTORY_ADDRESS" CONTRACTS_AUTHORITY_FACTORY_ADDRESS = "CARTESI_CONTRACTS_AUTHORITY_FACTORY_ADDRESS" @@ -102,8 +101,6 @@ func SetDefaults() { viper.SetDefault(BLOCKCHAIN_LEGACY_ENABLED, "false") - viper.SetDefault(BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, "60") - // no default for CARTESI_BLOCKCHAIN_WS_ENDPOINT // no default for CARTESI_CONTRACTS_APPLICATION_FACTORY_ADDRESS @@ -499,9 +496,6 @@ type EvmreaderConfig struct { // An unique identifier representing a blockchain network. BlockchainId uint64 `mapstructure:"CARTESI_BLOCKCHAIN_ID"` - // Block subscription timeout in seconds. - BlockchainSubscriptionTimeout Duration `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` - // WebSocket endpoint for the blockchain RPC provider. BlockchainWsEndpoint URL `mapstructure:"CARTESI_BLOCKCHAIN_WS_ENDPOINT"` @@ -580,13 +574,6 @@ func LoadEvmreaderConfig() (*EvmreaderConfig, error) { return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_ID is required for the evmreader service: %w", err) } - cfg.BlockchainSubscriptionTimeout, err = GetBlockchainSubscriptionTimeout() - if err != nil && err != ErrNotDefined { - return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT: %w", err) - } else if err == ErrNotDefined { - return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT is required for the evmreader service: %w", err) - } - cfg.BlockchainWsEndpoint, err = GetBlockchainWsEndpoint() if err != nil && err != ErrNotDefined { return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_WS_ENDPOINT: %w", err) @@ -774,9 +761,6 @@ type NodeConfig struct { // (instead of EIP-1559). BlockchainLegacyEnabled bool `mapstructure:"CARTESI_BLOCKCHAIN_LEGACY_ENABLED"` - // Block subscription timeout in seconds. - BlockchainSubscriptionTimeout Duration `mapstructure:"CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT"` - // WebSocket endpoint for the blockchain RPC provider. BlockchainWsEndpoint URL `mapstructure:"CARTESI_BLOCKCHAIN_WS_ENDPOINT"` @@ -900,13 +884,6 @@ func LoadNodeConfig() (*NodeConfig, error) { return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_LEGACY_ENABLED is required for the node service: %w", err) } - cfg.BlockchainSubscriptionTimeout, err = GetBlockchainSubscriptionTimeout() - if err != nil && err != ErrNotDefined { - return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT: %w", err) - } else if err == ErrNotDefined { - return nil, fmt.Errorf("CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT is required for the node service: %w", err) - } - cfg.BlockchainWsEndpoint, err = GetBlockchainWsEndpoint() if err != nil && err != ErrNotDefined { return nil, fmt.Errorf("failed to get CARTESI_BLOCKCHAIN_WS_ENDPOINT: %w", err) @@ -1394,21 +1371,20 @@ func (c *NodeConfig) ToClaimerConfig() *ClaimerConfig { // ToEvmreaderConfig converts a NodeConfig to a EvmreaderConfig. func (c *NodeConfig) ToEvmreaderConfig() *EvmreaderConfig { return &EvmreaderConfig{ - BlockchainDefaultBlock: c.BlockchainDefaultBlock, - BlockchainHttpEndpoint: c.BlockchainHttpEndpoint, - BlockchainId: c.BlockchainId, - BlockchainSubscriptionTimeout: c.BlockchainSubscriptionTimeout, - BlockchainWsEndpoint: c.BlockchainWsEndpoint, - DatabaseConnection: c.DatabaseConnection, - FeatureInputReaderEnabled: c.FeatureInputReaderEnabled, - TelemetryAddress: c.TelemetryAddress, - LogColor: c.LogColor, - LogLevel: c.LogLevel, - BlockchainHttpMaxRetries: c.BlockchainHttpMaxRetries, - BlockchainHttpRetryMaxWait: c.BlockchainHttpRetryMaxWait, - BlockchainHttpRetryMinWait: c.BlockchainHttpRetryMinWait, - BlockchainMaxBlockRange: c.BlockchainMaxBlockRange, - MaxStartupTime: c.MaxStartupTime, + BlockchainDefaultBlock: c.BlockchainDefaultBlock, + BlockchainHttpEndpoint: c.BlockchainHttpEndpoint, + BlockchainId: c.BlockchainId, + BlockchainWsEndpoint: c.BlockchainWsEndpoint, + DatabaseConnection: c.DatabaseConnection, + FeatureInputReaderEnabled: c.FeatureInputReaderEnabled, + TelemetryAddress: c.TelemetryAddress, + LogColor: c.LogColor, + LogLevel: c.LogLevel, + BlockchainHttpMaxRetries: c.BlockchainHttpMaxRetries, + BlockchainHttpRetryMaxWait: c.BlockchainHttpRetryMaxWait, + BlockchainHttpRetryMinWait: c.BlockchainHttpRetryMinWait, + BlockchainMaxBlockRange: c.BlockchainMaxBlockRange, + MaxStartupTime: c.MaxStartupTime, } } @@ -1633,19 +1609,6 @@ func GetBlockchainLegacyEnabled() (bool, error) { return notDefinedbool(), fmt.Errorf("%s: %w", BLOCKCHAIN_LEGACY_ENABLED, ErrNotDefined) } -// GetBlockchainSubscriptionTimeout returns the value for the environment variable CARTESI_BLOCKCHAIN_SUBSCRIPTION_TIMEOUT. -func GetBlockchainSubscriptionTimeout() (Duration, error) { - s := viper.GetString(BLOCKCHAIN_SUBSCRIPTION_TIMEOUT) - if s != "" { - v, err := toDuration(s) - if err != nil { - return v, fmt.Errorf("failed to parse %s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, err) - } - return v, nil - } - return notDefinedDuration(), fmt.Errorf("%s: %w", BLOCKCHAIN_SUBSCRIPTION_TIMEOUT, ErrNotDefined) -} - // GetBlockchainWsEndpoint returns the value for the environment variable CARTESI_BLOCKCHAIN_WS_ENDPOINT. func GetBlockchainWsEndpoint() (URL, error) { s := viper.GetString(BLOCKCHAIN_WS_ENDPOINT) diff --git a/internal/evmreader/evmreader.go b/internal/evmreader/evmreader.go index 5f152bfb9..e2a1a1543 100644 --- a/internal/evmreader/evmreader.go +++ b/internal/evmreader/evmreader.go @@ -72,15 +72,28 @@ type appContracts struct { } func (r *Service) Run(ctx context.Context, ready chan struct{}) error { - for { + for attempt := uint64(1); ; attempt++ { err := r.watchForNewBlocks(ctx, ready) - // If the error is a SubscriptionError, re run watchForNewBlocks - // that it will restart the websocket subscription - if _, ok := err.(*SubscriptionError); !ok { + r.Logger.Error(err.Error()) + + if attempt > r.blockchainMaxRetries { + r.Logger.Error("Max attempts reached for subscription restart. Exititng", + "max_retries", r.blockchainMaxRetries, + ) return err } - r.Logger.Error(err.Error()) - r.Logger.Info("Restarting subscription") + + r.Logger.Info("Restarting subscription", + "attempt", attempt, + "remaining", r.blockchainMaxRetries - attempt, + "time_between_attempts", r.blockchainSubscriptionRetryInterval, + ) + + // sleep or cancel + select { + case <-ctx.Done(): + case <-time.After(r.blockchainSubscriptionRetryInterval): + } } } diff --git a/internal/evmreader/evmreader_test.go b/internal/evmreader/evmreader_test.go index 014c3f8b9..3e91f678e 100644 --- a/internal/evmreader/evmreader_test.go +++ b/internal/evmreader/evmreader_test.go @@ -145,13 +145,15 @@ func (s *EvmReaderSuite) SetupTest() { s.contractFactory = newMockAdapterFactory().SetupDefaultBehavior(s.applicationContract1, s.applicationContract2, s.inputBox) s.evmReader = &Service{ - client: s.client, - wsClient: s.wsClient, - repository: s.repository, - defaultBlock: DefaultBlock_Latest, - adapterFactory: s.contractFactory, - hasEnabledApps: true, - inputReaderEnabled: true, + client: s.client, + wsClient: s.wsClient, + repository: s.repository, + defaultBlock: DefaultBlock_Latest, + adapterFactory: s.contractFactory, + hasEnabledApps: true, + inputReaderEnabled: true, + blockchainMaxRetries: 0, + blockchainSubscriptionRetryInterval: time.Second, } logLevel, err := config.GetLogLevel() diff --git a/internal/evmreader/output_test.go b/internal/evmreader/output_test.go index 49a8b7970..99de38847 100644 --- a/internal/evmreader/output_test.go +++ b/internal/evmreader/output_test.go @@ -527,13 +527,15 @@ func (s *EvmReaderSuite) setupOutputMismatchTest() { s.contractFactory = newMockAdapterFactory() s.evmReader = &Service{ - client: s.client, - wsClient: s.wsClient, - repository: s.repository, - defaultBlock: DefaultBlock_Latest, - adapterFactory: s.contractFactory, - hasEnabledApps: true, - inputReaderEnabled: true, + client: s.client, + wsClient: s.wsClient, + repository: s.repository, + defaultBlock: DefaultBlock_Latest, + adapterFactory: s.contractFactory, + hasEnabledApps: true, + inputReaderEnabled: true, + blockchainMaxRetries: 0, + blockchainSubscriptionRetryInterval: time.Second, } logLevel, err := config.GetLogLevel() diff --git a/internal/evmreader/service.go b/internal/evmreader/service.go index 24e3525e9..19e3c894f 100644 --- a/internal/evmreader/service.go +++ b/internal/evmreader/service.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "time" "github.com/cartesi/rollups-node/internal/config" . "github.com/cartesi/rollups-node/internal/model" @@ -30,14 +31,16 @@ type CreateInfo struct { type Service struct { service.Service - client EthClientInterface - wsClient EthClientInterface - adapterFactory AdapterFactory - repository EvmReaderRepository - chainId uint64 - defaultBlock DefaultBlock - hasEnabledApps bool - inputReaderEnabled bool + client EthClientInterface + wsClient EthClientInterface + adapterFactory AdapterFactory + repository EvmReaderRepository + chainId uint64 + defaultBlock DefaultBlock + hasEnabledApps bool + inputReaderEnabled bool + blockchainMaxRetries uint64 + blockchainSubscriptionRetryInterval time.Duration } const EvmReaderConfigKey = "evm-reader" @@ -99,6 +102,8 @@ func Create(ctx context.Context, c *CreateInfo) (*Service, error) { return nil, fmt.Errorf("NodeConfig chainId mismatch: network %d != config %d", chainId.Uint64(), nodeConfig.ChainID) } + s.blockchainMaxRetries = c.Config.BlockchainHttpMaxRetries + s.blockchainSubscriptionRetryInterval = c.Config.BlockchainHttpRetryMinWait s.client = c.EthClient s.wsClient = c.EthWsClient @@ -140,7 +145,10 @@ func (s *Service) Tick() []error { func (s *Service) Serve() error { ready := make(chan struct{}, 1) - go s.Run(s.Context, ready) + go func() { + s.Run(s.Context, ready) + s.Service.Stop(false) + }() return s.Service.Serve() } diff --git a/internal/node/node.go b/internal/node/node.go index ef7e8ef23..e6c56a184 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -155,6 +155,8 @@ func newEVMReader(ctx context.Context, c *CreateInfo, s *Service) service.IServi readerArgs := evmreader.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "evm-reader", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -179,6 +181,8 @@ func newAdvancer(ctx context.Context, c *CreateInfo, s *Service) service.IServic advancerArgs := advancer.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "advancer", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -202,6 +206,8 @@ func newValidator(ctx context.Context, c *CreateInfo, s *Service) service.IServi validatorArgs := validator.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "validator", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -225,6 +231,8 @@ func newClaimer(ctx context.Context, c *CreateInfo, s *Service) service.IService claimerArgs := claimer.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "claimer", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -249,6 +257,8 @@ func newJsonrpc(ctx context.Context, c *CreateInfo, s *Service) service.IService jsonrpcArgs := jsonrpc.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "jsonrpc", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, @@ -271,6 +281,8 @@ func newPrt(ctx context.Context, c *CreateInfo, s *Service) service.IService { prtArgs := prt.CreateInfo{ CreateInfo: service.CreateInfo{ Name: "prt", + Context: s.Context, + Cancel: s.Cancel, LogLevel: c.Config.LogLevel, LogColor: c.Config.LogColor, EnableSignalHandling: false, diff --git a/pkg/service/service.go b/pkg/service/service.go index c689580c4..97660c492 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -103,6 +103,7 @@ type CreateInfo struct { Impl ServiceImpl ServeMux *http.ServeMux Context context.Context + Cancel context.CancelFunc } // Service stores runtime information. @@ -151,7 +152,10 @@ func Create(ctx context.Context, c *CreateInfo, s *Service) error { s.Context = c.Context } if s.Cancel == nil { - s.Context, s.Cancel = context.WithCancel(c.Context) + if c.Cancel == nil { + s.Context, c.Cancel = context.WithCancel(c.Context) + } + s.Cancel = c.Cancel } // ticker @@ -246,6 +250,7 @@ func (s *Service) Stop(force bool) []error { elapsed := time.Since(start) s.Running.Store(false) + s.Cancel() if len(errs) > 0 { s.Logger.Error("Stop", "force", force, From 4fb5b7a95279cd9dc5bbb65817ea395d1012dedd Mon Sep 17 00:00:00 2001 From: Marcelo Politzer <251334+mpolitzer@users.noreply.github.com> Date: Wed, 17 Dec 2025 17:36:22 -0300 Subject: [PATCH 8/8] feature: add a method to run the prototype PRT in the reference context --- Makefile | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/Makefile b/Makefile index 125f3bcdf..6d9a18e7c 100644 --- a/Makefile +++ b/Makefile @@ -343,6 +343,41 @@ start-postgres: ## Run the PostgreSQL 16 docker container @docker run --rm --name postgres -p 5432:5432 -d -e POSTGRES_PASSWORD=password -e POSTGRES_DB=rollupsdb -v $(CURDIR)/test/postgres/init-test-db.sh:/docker-entrypoint-initdb.d/init-test-db.sh postgres:17-alpine @$(MAKE) migrate +################################################################################ +# Protorype PRT (WIP) +DAVE_ROOT=$(PWD)/dave +APP_NAME?=honeypot +DEPLOYMENTS_DIR := "$(DAVE_ROOT)/cartesi-rollups/contracts/deployments/31337" +ANVIL_KEY_0=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +ANVIL_KEY_7=0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356 + +# retrive application variables from the database based on APP_NAME +APP_ADDRESS=$(shell psql $${CARTESI_DATABASE_CONNECTION} -qtc "select iapplication_address from application WHERE name = '$(APP_NAME)';" | lua5.4 -e 'print((io.read("*a"):gsub("\\x", "0x"):gsub("%s+", "")))') +APP_URI=$(shell psql $${CARTESI_DATABASE_CONNECTION} -qtc "select template_uri from application WHERE name = '$(APP_NAME)';" | lua5.4 -e 'print((io.read("*a"):gsub("%s+", "")))') +APP_STATE=$(APP_URI)/_state/ + +APP_KEY=$(ANVIL_KEY_7) + +# prototype node expected environment variables +export WEB3_PRIVATE_KEY=$(ANVIL_KEY_0) +export DAVE_APP_FACTORY=$(shell jq -j ".DaveAppFactory" deployment.json) +export INPUT_BOX=$(shell jq -j .InputBox deployment.json) +export ERC20_PORTAL=$(shell jq -j .ERC20Portal deployment.json) +export ERC20_TOKEN=$(shell jq -j .TestFungibleToken deployment.json) +start-prt: deployment.json ## Run the prototype PRT node + rm -rf $(APP_STATE) && mkdir -p $(APP_STATE) + docker container create --name cp cartesi/rollups-node-devnet:devel + docker cp cp:/opt/cartesi/rollups-contracts/deployments/31337/ \ + $(DAVE_ROOT)/cartesi-rollups/contracts/deployments/; + docker container rm cp + env \ + MACHINE_PATH=$(APP_URI) \ + APP_ADDRESS=$(APP_ADDRESS) \ + STATE_DIR=$(APP_STATE) \ + RUST_BACKTRACE=full \ + $(DAVE_ROOT)/target/debug/cartesi-rollups-prt-node --sleep-duration-seconds 1 pk --web3-private-key $(APP_KEY) +################################################################################ + start: start-postgres start-devnet ## Start the anvil devnet and PostgreSQL 16 docker containers stop-devnet: ## Stop the anvil devnet docker container