From 3cffe8202f7fafe69cd96a88f807b4e6a50f11dc Mon Sep 17 00:00:00 2001 From: pavel-raykov Date: Tue, 24 Feb 2026 18:56:54 +0100 Subject: [PATCH 1/2] Minor. --- pkg/mercury/config_digest.go | 65 ++ pkg/mercury/config_digest_test.go | 201 ++++++ pkg/mercury/config_poller.go | 176 +++++ pkg/mercury/config_poller_test.go | 122 ++++ pkg/mercury/helpers_test.go | 188 ++++++ pkg/mercury/mocks/async_deleter.go | 68 ++ pkg/mercury/offchain_config_digester.go | 74 +++ pkg/mercury/offchain_config_digester_test.go | 57 ++ pkg/mercury/orm.go | 191 ++++++ pkg/mercury/orm_test.go | 378 +++++++++++ pkg/mercury/persistence_manager.go | 149 +++++ pkg/mercury/persistence_manager_test.go | 179 +++++ pkg/mercury/queue.go | 260 ++++++++ pkg/mercury/queue_test.go | 144 +++++ pkg/mercury/test_helpers.go | 17 + pkg/mercury/transmitter.go | 611 ++++++++++++++++++ pkg/mercury/transmitter_test.go | 579 +++++++++++++++++ pkg/mercury/types/types.go | 33 + pkg/mercury/utils/feeds.go | 117 ++++ pkg/mercury/utils/feeds_test.go | 58 ++ pkg/mercury/v2/reportcodec/report_codec.go | 79 +++ .../v2/reportcodec/report_codec_test.go | 162 +++++ pkg/mercury/v2/types/types.go | 52 ++ pkg/mercury/v3/reportcodec/report_codec.go | 85 +++ .../v3/reportcodec/report_codec_test.go | 168 +++++ pkg/mercury/v3/types/types.go | 56 ++ pkg/mercury/v4/reportcodec/report_codec.go | 78 +++ .../v4/reportcodec/report_codec_test.go | 164 +++++ pkg/mercury/v4/types/types.go | 54 ++ pkg/mercury/verifier/verifier.go | 111 ++++ pkg/mercury/verifier/verifier_test.go | 80 +++ pkg/mercury/wsrpc/cache/cache.go | 395 +++++++++++ pkg/mercury/wsrpc/cache/cache_set.go | 117 ++++ pkg/mercury/wsrpc/cache/cache_set_test.go | 58 ++ pkg/mercury/wsrpc/cache/cache_test.go | 202 ++++++ pkg/mercury/wsrpc/cache/helpers_test.go | 38 ++ pkg/mercury/wsrpc/client.go | 420 ++++++++++++ pkg/mercury/wsrpc/client_test.go | 188 ++++++ pkg/mercury/wsrpc/metrics.go | 49 ++ pkg/mercury/wsrpc/mocks/mocks.go | 49 ++ pkg/mercury/wsrpc/pb/generate.go | 2 + pkg/mercury/wsrpc/pb/mercury.pb.go | 529 +++++++++++++++ pkg/mercury/wsrpc/pb/mercury.proto | 60 ++ pkg/mercury/wsrpc/pb/mercury_wsrpc.pb.go | 87 +++ pkg/mercury/wsrpc/pool.go | 309 +++++++++ pkg/mercury/wsrpc/pool_test.go | 265 ++++++++ 46 files changed, 7524 insertions(+) create mode 100644 pkg/mercury/config_digest.go create mode 100644 pkg/mercury/config_digest_test.go create mode 100644 pkg/mercury/config_poller.go create mode 100644 pkg/mercury/config_poller_test.go create mode 100644 pkg/mercury/helpers_test.go create mode 100644 pkg/mercury/mocks/async_deleter.go create mode 100644 pkg/mercury/offchain_config_digester.go create mode 100644 pkg/mercury/offchain_config_digester_test.go create mode 100644 pkg/mercury/orm.go create mode 100644 pkg/mercury/orm_test.go create mode 100644 pkg/mercury/persistence_manager.go create mode 100644 pkg/mercury/persistence_manager_test.go create mode 100644 pkg/mercury/queue.go create mode 100644 pkg/mercury/queue_test.go create mode 100644 pkg/mercury/test_helpers.go create mode 100644 pkg/mercury/transmitter.go create mode 100644 pkg/mercury/transmitter_test.go create mode 100644 pkg/mercury/types/types.go create mode 100644 pkg/mercury/utils/feeds.go create mode 100644 pkg/mercury/utils/feeds_test.go create mode 100644 pkg/mercury/v2/reportcodec/report_codec.go create mode 100644 pkg/mercury/v2/reportcodec/report_codec_test.go create mode 100644 pkg/mercury/v2/types/types.go create mode 100644 pkg/mercury/v3/reportcodec/report_codec.go create mode 100644 pkg/mercury/v3/reportcodec/report_codec_test.go create mode 100644 pkg/mercury/v3/types/types.go create mode 100644 pkg/mercury/v4/reportcodec/report_codec.go create mode 100644 pkg/mercury/v4/reportcodec/report_codec_test.go create mode 100644 pkg/mercury/v4/types/types.go create mode 100644 pkg/mercury/verifier/verifier.go create mode 100644 pkg/mercury/verifier/verifier_test.go create mode 100644 pkg/mercury/wsrpc/cache/cache.go create mode 100644 pkg/mercury/wsrpc/cache/cache_set.go create mode 100644 pkg/mercury/wsrpc/cache/cache_set_test.go create mode 100644 pkg/mercury/wsrpc/cache/cache_test.go create mode 100644 pkg/mercury/wsrpc/cache/helpers_test.go create mode 100644 pkg/mercury/wsrpc/client.go create mode 100644 pkg/mercury/wsrpc/client_test.go create mode 100644 pkg/mercury/wsrpc/metrics.go create mode 100644 pkg/mercury/wsrpc/mocks/mocks.go create mode 100644 pkg/mercury/wsrpc/pb/generate.go create mode 100644 pkg/mercury/wsrpc/pb/mercury.pb.go create mode 100644 pkg/mercury/wsrpc/pb/mercury.proto create mode 100644 pkg/mercury/wsrpc/pb/mercury_wsrpc.pb.go create mode 100644 pkg/mercury/wsrpc/pool.go create mode 100644 pkg/mercury/wsrpc/pool_test.go diff --git a/pkg/mercury/config_digest.go b/pkg/mercury/config_digest.go new file mode 100644 index 0000000000..d17cd30aaf --- /dev/null +++ b/pkg/mercury/config_digest.go @@ -0,0 +1,65 @@ +package mercury + +import ( + "encoding/binary" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/wsrpc/credentials" + + "github.com/smartcontractkit/chainlink-evm/gethwrappers/llo-feeds/generated/exposed_verifier" +) + +func makeConfigDigestArgs() abi.Arguments { + abi, err := abi.JSON(strings.NewReader(exposed_verifier.ExposedVerifierABI)) + if err != nil { + // assertion + panic("could not parse aggregator ABI: " + err.Error()) + } + return abi.Methods["exposedConfigDigestFromConfigData"].Inputs +} + +var configDigestArgs = makeConfigDigestArgs() + +func configDigest( + feedID common.Hash, + chainID *big.Int, + contractAddress common.Address, + configCount uint64, + oracles []common.Address, + transmitters []credentials.StaticSizedPublicKey, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, + prefix types.ConfigDigestPrefix, +) types.ConfigDigest { + msg, err := configDigestArgs.Pack( + feedID, + chainID, + contractAddress, + configCount, + oracles, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + // assertion + panic(err) + } + rawHash := crypto.Keccak256(msg) + configDigest := types.ConfigDigest{} + if n := copy(configDigest[:], rawHash); n != len(configDigest) { + // assertion + panic("copy too little data") + } + binary.BigEndian.PutUint16(configDigest[:2], uint16(prefix)) + return configDigest +} diff --git a/pkg/mercury/config_digest_test.go b/pkg/mercury/config_digest_test.go new file mode 100644 index 0000000000..274717c28c --- /dev/null +++ b/pkg/mercury/config_digest_test.go @@ -0,0 +1,201 @@ +package mercury + +import ( + "math/big" + "reflect" + "testing" + "unsafe" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/wsrpc/credentials" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-evm/gethwrappers/llo-feeds/generated/exposed_verifier" +) + +// Adapted from: https://github.com/smartcontractkit/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/config_digest_test.go + +func TestConfigCalculationMatches(t *testing.T) { + key, err := crypto.GenerateKey() + require.NoError(t, err, "could not make private key for EOA owner") + owner, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + backend := simulated.NewBackend( + types.GenesisAlloc{owner.From: {Balance: new(big.Int).Lsh(big.NewInt(1), 60)}}, + simulated.WithBlockGasLimit(ethconfig.Defaults.Miner.GasCeil), + ) + _, _, eoa, err := exposed_verifier.DeployExposedVerifier( + owner, backend.Client(), + ) + backend.Commit() + require.NoError(t, err, "could not deploy test EOA") + p := gopter.NewProperties(nil) + p.Property("onchain/offchain config digests match", prop.ForAll( + func( + feedID [32]byte, + chainID uint64, + contractAddress common.Address, + configCount uint64, + oracles []common.Address, + transmitters [][32]byte, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, + ) bool { + chainIDBig := new(big.Int).SetUint64(chainID) + golangDigest := configDigest( + feedID, + chainIDBig, + contractAddress, + configCount, + oracles, + *(*[]credentials.StaticSizedPublicKey)(unsafe.Pointer(&transmitters)), + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ocrtypes.ConfigDigestPrefixMercuryV02, + ) + + bigChainID := new(big.Int) + bigChainID.SetUint64(chainID) + + solidityDigest, err := eoa.ExposedConfigDigestFromConfigData(nil, + feedID, + bigChainID, + contractAddress, + configCount, + oracles, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + require.NoError(t, err, "could not compute solidity version of config digest") + return golangDigest == solidityDigest + }, + GenHash(t), + gen.UInt64(), + GenAddress(t), + gen.UInt64(), + GenAddressArray(t), + GenClientPubKeyArray(t), + gen.UInt8(), + GenBytes(t), + gen.UInt64(), + GenBytes(t), + )) + p.TestingRun(t) +} + +func GenHash(t *testing.T) gopter.Gen { + var byteGens []gopter.Gen + for range 32 { + byteGens = append(byteGens, gen.UInt8()) + } + return gopter.CombineGens(byteGens...).Map( + func(byteArray any) (rv common.Hash) { + array, ok := byteArray.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve gen result") + for i, byteVal := range array.([]any) { + rv[i] = byteVal.(uint8) + } + return rv + }, + ) +} + +func GenHashArray(t *testing.T) gopter.Gen { + return gen.UInt8Range(0, 31).FlatMap( + func(length any) gopter.Gen { + var hashGens []gopter.Gen + for i := uint8(0); i < length.(uint8); i++ { + hashGens = append(hashGens, GenHash(t)) + } + return gopter.CombineGens(hashGens...).Map( + func(hashArray any) (rv []common.Hash) { + array, ok := hashArray.(*gopter.GenResult).Retrieve() + require.True(t, ok, "could not extract hash array") + for _, hashVal := range array.([]any) { + rv = append(rv, hashVal.(common.Hash)) + } + return rv + }, + ) + }, + reflect.ValueOf([]common.Hash{}).Type(), + ) +} + +func GenAddress(t *testing.T) gopter.Gen { + return GenHash(t).Map( + func(hash any) common.Address { + iHash, ok := hash.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hash") + return common.BytesToAddress(iHash.(common.Hash).Bytes()) + }, + ) +} + +func GenAddressArray(t *testing.T) gopter.Gen { + return GenHashArray(t).Map( + func(hashes any) (rv []common.Address) { + hashArray, ok := hashes.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hashes") + for _, hash := range hashArray.([]common.Hash) { + rv = append(rv, common.BytesToAddress(hash.Bytes())) + } + return rv + }, + ) +} + +func GenClientPubKeyArray(t *testing.T) gopter.Gen { + return GenHashArray(t).Map( + func(hashes any) (rv [][32]byte) { + hashArray, ok := hashes.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hashes") + for _, hash := range hashArray.([]common.Hash) { + pk := [32]byte{} + copy(pk[:], hash.Bytes()) + rv = append(rv, pk) + } + return rv + }, + ) +} + +func GenBytes(t *testing.T) gopter.Gen { + return gen.UInt16Range(0, 2000).FlatMap( + func(length any) gopter.Gen { + var byteGens []gopter.Gen + for i := uint16(0); i < length.(uint16); i++ { + byteGens = append(byteGens, gen.UInt8()) + } + return gopter.CombineGens(byteGens...).Map( + func(byteArray any) []byte { + array, ok := byteArray.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve gen result") + iArray := array.([]any) + rv := make([]byte, len(iArray)) + for i, byteVal := range iArray { + rv[i] = byteVal.(uint8) + } + return rv + }, + ) + }, + reflect.ValueOf([]byte{}).Type(), + ) +} diff --git a/pkg/mercury/config_poller.go b/pkg/mercury/config_poller.go new file mode 100644 index 0000000000..5141e8ef11 --- /dev/null +++ b/pkg/mercury/config_poller.go @@ -0,0 +1,176 @@ +package mercury + +import ( + "context" + "database/sql" + "encoding/hex" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink-evm/gethwrappers/llo-feeds/generated/verifier" + "github.com/smartcontractkit/chainlink-evm/pkg/logpoller" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" +) + +// FeedScopedConfigSet ConfigSet with FeedID for use with mercury (and multi-config DON) +var FeedScopedConfigSet common.Hash + +var verifierABI abi.ABI + +const ( + configSetEventName = "ConfigSet" + feedIdTopicIndex = 1 +) + +func init() { + var err error + verifierABI, err = abi.JSON(strings.NewReader(verifier.VerifierABI)) + if err != nil { + panic(err) + } + FeedScopedConfigSet = verifierABI.Events[configSetEventName].ID +} + +// FullConfigFromLog defines the contract config with the feedID +type FullConfigFromLog struct { + ocrtypes.ContractConfig + feedID utils.FeedID +} + +func unpackLogData(d []byte) (*verifier.VerifierConfigSet, error) { + unpacked := new(verifier.VerifierConfigSet) + + err := verifierABI.UnpackIntoInterface(unpacked, configSetEventName, d) + if err != nil { + return nil, errors.Wrap(err, "failed to unpack log data") + } + + return unpacked, nil +} + +func ConfigFromLog(logData []byte) (FullConfigFromLog, error) { + unpacked, err := unpackLogData(logData) + if err != nil { + return FullConfigFromLog{}, err + } + + var transmitAccounts []ocrtypes.Account + for _, addr := range unpacked.OffchainTransmitters { + transmitAccounts = append(transmitAccounts, ocrtypes.Account(hex.EncodeToString(addr[:]))) + } + var signers []ocrtypes.OnchainPublicKey + for _, addr := range unpacked.Signers { + signers = append(signers, addr[:]) + } + + return FullConfigFromLog{ + feedID: unpacked.FeedId, + ContractConfig: ocrtypes.ContractConfig{ + ConfigDigest: unpacked.ConfigDigest, + ConfigCount: unpacked.ConfigCount, + Signers: signers, + Transmitters: transmitAccounts, + F: unpacked.F, + OnchainConfig: unpacked.OnchainConfig, + OffchainConfigVersion: unpacked.OffchainConfigVersion, + OffchainConfig: unpacked.OffchainConfig, + }, + }, nil +} + +// ConfigPoller defines the Mercury Config Poller +type ConfigPoller struct { + lggr logger.Logger + destChainLogPoller logpoller.LogPoller + addr common.Address + feedId common.Hash +} + +func FilterName(addr common.Address, feedID common.Hash) string { + return logpoller.FilterName("OCR3 Mercury ConfigPoller", addr.String(), feedID.Hex()) +} + +// NewConfigPoller creates a new Mercury ConfigPoller +func NewConfigPoller(ctx context.Context, lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address, feedId common.Hash) (*ConfigPoller, error) { + err := destChainPoller.RegisterFilter(ctx, logpoller.Filter{Name: FilterName(addr, feedId), EventSigs: []common.Hash{FeedScopedConfigSet}, Addresses: []common.Address{addr}}) + if err != nil { + return nil, err + } + + cp := &ConfigPoller{ + lggr: lggr, + destChainLogPoller: destChainPoller, + addr: addr, + feedId: feedId, + } + + return cp, nil +} + +func (cp *ConfigPoller) Start() {} + +func (cp *ConfigPoller) Close() error { + return nil +} + +func (cp *ConfigPoller) Notify() <-chan struct{} { + return nil // rely on libocr's builtin config polling +} + +// Replay abstracts the logpoller.LogPoller Replay() implementation +func (cp *ConfigPoller) Replay(ctx context.Context, fromBlock int64) error { + return cp.destChainLogPoller.Replay(ctx, fromBlock) +} + +// LatestConfigDetails returns the latest config details from the logs +func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + cp.lggr.Debugw("LatestConfigDetails", "eventSig", FeedScopedConfigSet, "addr", cp.addr, "topicIndex", feedIdTopicIndex, "feedID", cp.feedId) + logs, err := cp.destChainLogPoller.IndexedLogs(ctx, FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1) + if err != nil { + return 0, ocrtypes.ConfigDigest{}, err + } + if len(logs) == 0 { + return 0, ocrtypes.ConfigDigest{}, nil + } + latest := logs[len(logs)-1] + latestConfigSet, err := ConfigFromLog(latest.Data) + if err != nil { + return 0, ocrtypes.ConfigDigest{}, err + } + return uint64(latest.BlockNumber), latestConfigSet.ConfigDigest, nil +} + +// LatestConfig returns the latest config from the logs on a certain block +func (cp *ConfigPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { + lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(ctx, int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + if len(lgs) == 0 { + return ocrtypes.ContractConfig{}, nil + } + latestConfigSet, err := ConfigFromLog(lgs[len(lgs)-1].Data) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + cp.lggr.Infow("LatestConfig", "latestConfig", latestConfigSet) + return latestConfigSet.ContractConfig, nil +} + +// LatestBlockHeight returns the latest block height from the logs +func (cp *ConfigPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { + latest, err := cp.destChainLogPoller.LatestBlock(ctx) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return 0, err + } + return uint64(latest.BlockNumber), nil +} diff --git a/pkg/mercury/config_poller_test.go b/pkg/mercury/config_poller_test.go new file mode 100644 index 0000000000..1645f972fe --- /dev/null +++ b/pkg/mercury/config_poller_test.go @@ -0,0 +1,122 @@ +package mercury + +import ( + "encoding/hex" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/pkg/errors" + confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/umbracle/ethgo/abi" + + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" + evmutils "github.com/smartcontractkit/chainlink-evm/pkg/utils" +) + +func TestMercuryConfigPoller(t *testing.T) { + feedID := evmutils.NewHash() + feedIDBytes := [32]byte(feedID) + + th := SetupTH(t, feedID) + + notify := th.configPoller.Notify() + assert.Empty(t, notify) + + // Should have no config to begin with. + _, config, err := th.configPoller.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, ocrtypes2.ConfigDigest{}, config) + + // Create minimum number of nodes. + n := 4 + var oracles []confighelper2.OracleIdentityExtra + for range n { + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: evmutils.RandomAddress().Bytes(), + TransmitAccount: ocrtypes2.Account(evmutils.RandomAddress().String()), + OffchainPublicKey: evmutils.RandomBytes32(), + }, + ConfigEncryptionPublicKey: evmutils.RandomBytes32(), + }) + } + f := uint8(1) + // Setup config on contract + configType := abi.MustNewType("tuple()") + onchainConfigVal, err := abi.Encode(map[string]any{}, configType) + require.NoError(t, err) + signers, _, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 100*time.Millisecond, // DeltaRound + 0, // DeltaGrace + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(oracles)}, // S + oracles, + []byte{}, // reportingPluginConfig []byte, + nil, + 0, // Max duration query + 250*time.Millisecond, // Max duration observation + 250*time.Millisecond, // MaxDurationReport + 250*time.Millisecond, // MaxDurationShouldAcceptFinalizedReport + 250*time.Millisecond, // MaxDurationShouldTransmitAcceptedReport + int(f), // f + onchainConfigVal, + ) + require.NoError(t, err) + signerAddresses, err := onchainPublicKeyToAddress(signers) + require.NoError(t, err) + offchainTransmitters := make([][32]byte, n) + encodedTransmitter := make([]ocrtypes2.Account, n) + for i := range n { + offchainTransmitters[i] = oracles[i].OffchainPublicKey + encodedTransmitter[i] = ocrtypes2.Account(hex.EncodeToString(oracles[i].OffchainPublicKey[:])) + } + + _, err = th.verifierContract.SetConfig(th.user, feedIDBytes, signerAddresses, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, nil) + require.NoError(t, err, "failed to setConfig with feed ID") + th.backend.Commit() + + latest, err := th.backend.Client().BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + // Ensure we capture this config set log. + require.NoError(t, th.logPoller.Replay(testutils.Context(t), latest.Number().Int64()-1)) + + // Send blocks until we see the config updated. + var configBlock uint64 + var digest [32]byte + gomega.NewGomegaWithT(t).Eventually(func() bool { + th.backend.Commit() + configBlock, digest, err = th.configPoller.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + return ocrtypes2.ConfigDigest{} != digest + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the config returned is the one we configured. + newConfig, err := th.configPoller.LatestConfig(testutils.Context(t), configBlock) + require.NoError(t, err) + // Note we don't check onchainConfig, as that is populated in the contract itself. + assert.Equal(t, digest, [32]byte(newConfig.ConfigDigest)) + assert.Equal(t, signers, newConfig.Signers) + assert.Equal(t, threshold, newConfig.F) + assert.Equal(t, encodedTransmitter, newConfig.Transmitters) + assert.Equal(t, offchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, offchainConfig, newConfig.OffchainConfig) +} + +func onchainPublicKeyToAddress(publicKeys []types.OnchainPublicKey) (addresses []common.Address, err error) { + for _, signer := range publicKeys { + if len(signer) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(signer)) + } + return addresses, nil +} diff --git a/pkg/mercury/helpers_test.go b/pkg/mercury/helpers_test.go new file mode 100644 index 0000000000..e3d2be4712 --- /dev/null +++ b/pkg/mercury/helpers_test.go @@ -0,0 +1,188 @@ +package mercury + +import ( + "encoding/base64" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + + evmclient "github.com/smartcontractkit/chainlink-evm/pkg/client" + "github.com/smartcontractkit/chainlink-evm/pkg/heads/headstest" + "github.com/smartcontractkit/chainlink-evm/pkg/logpoller" + + "github.com/smartcontractkit/chainlink-evm/gethwrappers/llo-feeds/generated/verifier" + "github.com/smartcontractkit/chainlink-evm/gethwrappers/llo-feeds/generated/verifier_proxy" + reportcodecv2 "github.com/smartcontractkit/chainlink-evm/pkg/mercury/v2/reportcodec" + reportcodecv3 "github.com/smartcontractkit/chainlink-evm/pkg/mercury/v3/reportcodec" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +const sampleClientPubKey = "0x724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + +var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + +var sampleReports [][]byte + +var ( + sampleV2Report = buildSampleV2Report(242) + sampleV3Report = buildSampleV3Report(242) + sig2 = ocrtypes.AttributedOnchainSignature{Signature: mustDecodeBase64("kbeuRczizOJCxBzj7MUAFpz3yl2WRM6K/f0ieEBvA+oTFUaKslbQey10krumVjzAvlvKxMfyZo0WkOgNyfF6xwE="), Signer: 2} + sig3 = ocrtypes.AttributedOnchainSignature{Signature: mustDecodeBase64("9jz4b6Dh2WhXxQ97a6/S9UNjSfrEi9016XKTrfN0mLQFDiNuws23x7Z4n+6g0sqKH/hnxx1VukWUH/ohtw83/wE="), Signer: 3} + sampleSigs = []ocrtypes.AttributedOnchainSignature{sig2, sig3} + sampleReportContext = ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: MustHexToConfigDigest("0x0006fc30092226b37f6924b464e16a54a7978a9a524519a73403af64d487dc45"), + Epoch: 6, + Round: 28, + }, + ExtraHash: [32]uint8{27, 144, 106, 73, 166, 228, 123, 166, 179, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}, + } +) + +func init() { + sampleReports = make([][]byte, 4) + for i := 0; i < len(sampleReports); i++ { + sampleReports[i] = buildSampleV2Report(int64(i)) + } +} + +func buildSampleV2Report(ts int64) []byte { + feedID := sampleFeedID + timestamp := uint32(ts) + bp := big.NewInt(242) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := reportcodecv2.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp) + if err != nil { + panic(err) + } + return b +} + +func buildSampleV3Report(ts int64) []byte { + feedID := sampleFeedID + timestamp := uint32(ts) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := reportcodecv3.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask) + if err != nil { + panic(err) + } + return b +} + +func buildSamplePayload(report []byte) []byte { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + for i, as := range sampleSigs { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := evmutil.RawReportContext(sampleReportContext) + payload, err := PayloadTypes.Pack(rawReportCtx, report, rs, ss, vs) + if err != nil { + panic(err) + } + return payload +} + +type TestHarness struct { + configPoller *ConfigPoller + user *bind.TransactOpts + backend *simulated.Backend + verifierAddress common.Address + verifierContract *verifier.Verifier + logPoller logpoller.LogPoller +} + +func SetupTH(t *testing.T, feedID common.Hash) TestHarness { + key, err := crypto.GenerateKey() + require.NoError(t, err) + user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + b := simulated.NewBackend(types.GenesisAlloc{ + user.From: {Balance: big.NewInt(1000000000000000000)}}, + simulated.WithBlockGasLimit(5*ethconfig.Defaults.Miner.GasCeil)) + + proxyAddress, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(user, b.Client(), common.Address{}) + require.NoError(t, err, "failed to deploy test mercury verifier proxy contract") + b.Commit() + verifierAddress, _, verifierContract, err := verifier.DeployVerifier(user, b.Client(), proxyAddress) + require.NoError(t, err, "failed to deploy test mercury verifier contract") + b.Commit() + _, err = verifierProxy.InitializeVerifier(user, verifierAddress) + require.NoError(t, err) + b.Commit() + + db := testutils.NewSqlxDB(t) + ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337)) + lggr := logger.Test(t) + lorm := logpoller.NewORM(big.NewInt(1337), db, lggr) + + lpOpts := logpoller.Opts{ + PollPeriod: 100 * time.Millisecond, + FinalityDepth: 1, + BackfillBatchSize: 2, + RPCBatchSize: 2, + KeepFinalizedBlocksDepth: 1000, + } + ht := headstest.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, ht, lpOpts) + servicetest.Run(t, lp) + + configPoller, err := NewConfigPoller(testutils.Context(t), lggr, lp, verifierAddress, feedID) + require.NoError(t, err) + + configPoller.Start() + t.Cleanup(func() { + assert.NoError(t, configPoller.Close()) + }) + + return TestHarness{ + configPoller: configPoller, + user: user, + backend: b, + verifierAddress: verifierAddress, + verifierContract: verifierContract, + logPoller: lp, + } +} + +func mustDecodeBase64(s string) (b []byte) { + var err error + b, err = base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return +} diff --git a/pkg/mercury/mocks/async_deleter.go b/pkg/mercury/mocks/async_deleter.go new file mode 100644 index 0000000000..98e14799f5 --- /dev/null +++ b/pkg/mercury/mocks/async_deleter.go @@ -0,0 +1,68 @@ +// Code generated by mockery v2.53.0. DO NOT EDIT. + +package mocks + +import ( + pb "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + mock "github.com/stretchr/testify/mock" +) + +// AsyncDeleter is an autogenerated mock type for the asyncDeleter type +type AsyncDeleter struct { + mock.Mock +} + +type AsyncDeleter_Expecter struct { + mock *mock.Mock +} + +func (_m *AsyncDeleter) EXPECT() *AsyncDeleter_Expecter { + return &AsyncDeleter_Expecter{mock: &_m.Mock} +} + +// AsyncDelete provides a mock function with given fields: req +func (_m *AsyncDeleter) AsyncDelete(req *pb.TransmitRequest) { + _m.Called(req) +} + +// AsyncDeleter_AsyncDelete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsyncDelete' +type AsyncDeleter_AsyncDelete_Call struct { + *mock.Call +} + +// AsyncDelete is a helper method to define mock.On call +// - req *pb.TransmitRequest +func (_e *AsyncDeleter_Expecter) AsyncDelete(req interface{}) *AsyncDeleter_AsyncDelete_Call { + return &AsyncDeleter_AsyncDelete_Call{Call: _e.mock.On("AsyncDelete", req)} +} + +func (_c *AsyncDeleter_AsyncDelete_Call) Run(run func(req *pb.TransmitRequest)) *AsyncDeleter_AsyncDelete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*pb.TransmitRequest)) + }) + return _c +} + +func (_c *AsyncDeleter_AsyncDelete_Call) Return() *AsyncDeleter_AsyncDelete_Call { + _c.Call.Return() + return _c +} + +func (_c *AsyncDeleter_AsyncDelete_Call) RunAndReturn(run func(*pb.TransmitRequest)) *AsyncDeleter_AsyncDelete_Call { + _c.Run(run) + return _c +} + +// NewAsyncDeleter creates a new instance of AsyncDeleter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAsyncDeleter(t interface { + mock.TestingT + Cleanup(func()) +}) *AsyncDeleter { + mock := &AsyncDeleter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/mercury/offchain_config_digester.go b/pkg/mercury/offchain_config_digester.go new file mode 100644 index 0000000000..6e9c2089be --- /dev/null +++ b/pkg/mercury/offchain_config_digester.go @@ -0,0 +1,74 @@ +package mercury + +import ( + "context" + "crypto/ed25519" + "encoding/hex" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/wsrpc/credentials" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" +) + +// Originally sourced from: https://github.com/smartcontractkit/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/offchain_config_digester.go + +var _ ocrtypes.OffchainConfigDigester = OffchainConfigDigester{} + +func NewOffchainConfigDigester(feedID [32]byte, chainID *big.Int, contractAddress common.Address, prefix ocrtypes.ConfigDigestPrefix) OffchainConfigDigester { + return OffchainConfigDigester{feedID, chainID, contractAddress, prefix} +} + +type OffchainConfigDigester struct { + FeedID utils.FeedID + ChainID *big.Int + ContractAddress common.Address + Prefix ocrtypes.ConfigDigestPrefix +} + +func (d OffchainConfigDigester) ConfigDigest(ctx context.Context, cc ocrtypes.ContractConfig) (ocrtypes.ConfigDigest, error) { + signers := []common.Address{} + for i, signer := range cc.Signers { + if len(signer) != 20 { + return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm signer should be a 20 byte address, but got %x", i, signer) + } + a := common.BytesToAddress(signer) + signers = append(signers, a) + } + transmitters := []credentials.StaticSizedPublicKey{} + for i, transmitter := range cc.Transmitters { + if len(transmitter) != 2*ed25519.PublicKeySize { + return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm transmitter should be a 64 character hex-encoded ed25519 public key, but got '%v' (%d chars)", i, transmitter, len(transmitter)) + } + var t credentials.StaticSizedPublicKey + b, err := hex.DecodeString(string(transmitter)) + if err != nil { + return ocrtypes.ConfigDigest{}, errors.Wrapf(err, "%v-th evm transmitter is not valid hex, got: %q", i, transmitter) + } + copy(t[:], b) + + transmitters = append(transmitters, t) + } + + return configDigest( + common.Hash(d.FeedID), + d.ChainID, + d.ContractAddress, + cc.ConfigCount, + signers, + transmitters, + cc.F, + cc.OnchainConfig, + cc.OffchainConfigVersion, + cc.OffchainConfig, + d.Prefix, + ), nil +} + +func (d OffchainConfigDigester) ConfigDigestPrefix(ctx context.Context) (ocrtypes.ConfigDigestPrefix, error) { + return d.Prefix, nil +} diff --git a/pkg/mercury/offchain_config_digester_test.go b/pkg/mercury/offchain_config_digester_test.go new file mode 100644 index 0000000000..2f6d19b8b3 --- /dev/null +++ b/pkg/mercury/offchain_config_digester_test.go @@ -0,0 +1,57 @@ +package mercury + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +func Test_OffchainConfigDigester_ConfigDigest(t *testing.T) { + ctx := t.Context() + // ChainID and ContractAddress are taken into account for computation + cd1, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(ctx, types.ContractConfig{}) + require.NoError(t, err) + cd2, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(ctx, types.ContractConfig{}) + require.NoError(t, err) + cd3, err := OffchainConfigDigester{ChainID: big.NewInt(1)}.ConfigDigest(ctx, types.ContractConfig{}) + require.NoError(t, err) + cd4, err := OffchainConfigDigester{ChainID: big.NewInt(1), ContractAddress: common.Address{1}}.ConfigDigest(ctx, types.ContractConfig{}) + require.NoError(t, err) + + require.Equal(t, cd1, cd2) + require.NotEqual(t, cd2, cd3) + require.NotEqual(t, cd2, cd4) + require.NotEqual(t, cd3, cd4) + + // malformed signers + _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{ + Signers: []types.OnchainPublicKey{{1, 2}}, + }) + require.Error(t, err) + + // malformed transmitters + _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{ + Transmitters: []types.Account{"0x"}, + }) + require.Error(t, err) + + _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353"}, + }) + require.Error(t, err) + + _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaz"}, + }) + require.Error(t, err) + + // well-formed transmitters + _, err = OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(ctx, types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaa"}, + }) + require.NoError(t, err) +} diff --git a/pkg/mercury/orm.go b/pkg/mercury/orm.go new file mode 100644 index 0000000000..f7f1772c4b --- /dev/null +++ b/pkg/mercury/orm.go @@ -0,0 +1,191 @@ +package mercury + +import ( + "context" + "crypto/sha256" + "database/sql" + "errors" + "fmt" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +type ORM interface { + InsertTransmitRequest(ctx context.Context, serverURLs []string, req *pb.TransmitRequest, jobID int32, reportCtx ocrtypes.ReportContext) error + DeleteTransmitRequests(ctx context.Context, serverURL string, reqs []*pb.TransmitRequest) error + GetTransmitRequests(ctx context.Context, serverURL string, jobID int32) ([]*Transmission, error) + PruneTransmitRequests(ctx context.Context, serverURL string, jobID int32, maxSize int) error + LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) +} + +func FeedIDFromReport(report ocrtypes.Report) (feedID utils.FeedID, err error) { + if n := copy(feedID[:], report); n != 32 { + return feedID, pkgerrors.Errorf("invalid length for report: %d", len(report)) + } + return feedID, nil +} + +type orm struct { + ds sqlutil.DataSource +} + +func NewORM(ds sqlutil.DataSource) ORM { + return &orm{ds: ds} +} + +// InsertTransmitRequest inserts one transmit request if the payload does not exist already. +func (o *orm) InsertTransmitRequest(ctx context.Context, serverURLs []string, req *pb.TransmitRequest, jobID int32, reportCtx ocrtypes.ReportContext) error { + feedID, err := FeedIDFromReport(req.Payload) + if err != nil { + return err + } + if len(serverURLs) == 0 { + return errors.New("no server URLs provided") + } + + var wg sync.WaitGroup + wg.Add(2) + var err1, err2 error + + go func() { + defer wg.Done() + + values := make([]string, len(serverURLs)) + args := []any{ + req.Payload, + hashPayload(req.Payload), + reportCtx.ConfigDigest[:], + reportCtx.Epoch, + reportCtx.Round, + reportCtx.ExtraHash[:], + jobID, + feedID[:], + } + for i, serverURL := range serverURLs { + // server url is the only thing that changes, might as well re-use + // the same parameters for each insert + values[i] = fmt.Sprintf("($1, $2, $3, $4, $5, $6, $7, $8, $%d)", i+9) + args = append(args, serverURL) + } + + _, err1 = o.ds.ExecContext(ctx, fmt.Sprintf(` + INSERT INTO mercury_transmit_requests (payload, payload_hash, config_digest, epoch, round, extra_hash, job_id, feed_id, server_url) + VALUES %s + ON CONFLICT (server_url, payload_hash) DO NOTHING + `, strings.Join(values, ",")), args...) + }() + + go func() { + defer wg.Done() + _, err2 = o.ds.ExecContext(ctx, ` + INSERT INTO feed_latest_reports (feed_id, report, epoch, round, updated_at, job_id) + VALUES ($1, $2, $3, $4, NOW(), $5) + ON CONFLICT (feed_id) DO UPDATE + SET feed_id=$1, report=$2, epoch=$3, round=$4, updated_at=NOW() + WHERE excluded.epoch > feed_latest_reports.epoch OR (excluded.epoch = feed_latest_reports.epoch AND excluded.round > feed_latest_reports.round) + `, feedID[:], req.Payload, reportCtx.Epoch, reportCtx.Round, jobID) + }() + wg.Wait() + return errors.Join(err1, err2) +} + +// DeleteTransmitRequest deletes the given transmit requests if they exist. +func (o *orm) DeleteTransmitRequests(ctx context.Context, serverURL string, reqs []*pb.TransmitRequest) error { + if len(reqs) == 0 { + return nil + } + + var hashes pq.ByteaArray + for _, req := range reqs { + hashes = append(hashes, hashPayload(req.Payload)) + } + + _, err := o.ds.ExecContext(ctx, ` + DELETE FROM mercury_transmit_requests + WHERE server_url = $1 AND payload_hash = ANY($2) + `, serverURL, hashes) + return err +} + +// GetTransmitRequests returns all transmit requests in chronologically descending order. +func (o *orm) GetTransmitRequests(ctx context.Context, serverURL string, jobID int32) ([]*Transmission, error) { + // The priority queue uses epoch and round to sort transmissions so order by + // the same fields here for optimal insertion into the pq. + rows, err := o.ds.QueryContext(ctx, ` + SELECT payload, config_digest, epoch, round, extra_hash + FROM mercury_transmit_requests + WHERE job_id = $1 AND server_url = $2 + ORDER BY epoch DESC, round DESC + `, jobID, serverURL) + if err != nil { + return nil, err + } + defer rows.Close() + + var transmissions []*Transmission + for rows.Next() { + transmission := &Transmission{Req: &pb.TransmitRequest{}} + var digest, extraHash common.Hash + + err := rows.Scan( + &transmission.Req.Payload, + &digest, + &transmission.ReportCtx.Epoch, + &transmission.ReportCtx.Round, + &extraHash, + ) + if err != nil { + return nil, err + } + transmission.ReportCtx.ConfigDigest = ocrtypes.ConfigDigest(digest) + transmission.ReportCtx.ExtraHash = extraHash + + transmissions = append(transmissions, transmission) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return transmissions, nil +} + +// PruneTransmitRequests keeps at most maxSize rows for the given job ID, +// deleting the oldest transactions. +func (o *orm) PruneTransmitRequests(ctx context.Context, serverURL string, jobID int32, maxSize int) error { + // Prune the oldest requests by epoch and round. + _, err := o.ds.ExecContext(ctx, ` + DELETE FROM mercury_transmit_requests + WHERE job_id = $1 AND server_url = $2 AND + payload_hash NOT IN ( + SELECT payload_hash + FROM mercury_transmit_requests + WHERE job_id = $1 AND server_url = $2 + ORDER BY epoch DESC, round DESC + LIMIT $3 + ) + `, jobID, serverURL, maxSize) + return err +} + +func (o *orm) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) { + err = o.ds.GetContext(ctx, &report, `SELECT report FROM feed_latest_reports WHERE feed_id = $1`, feedID[:]) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return report, err +} + +func hashPayload(payload []byte) []byte { + checksum := sha256.Sum256(payload) + return checksum[:] +} diff --git a/pkg/mercury/orm_test.go b/pkg/mercury/orm_test.go new file mode 100644 index 0000000000..2efecdc42d --- /dev/null +++ b/pkg/mercury/orm_test.go @@ -0,0 +1,378 @@ +package mercury + +import ( + "math/rand/v2" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +var ( + sURL = "wss://example.com/mercury" + sURL2 = "wss://mercuryserver.test" + sURL3 = "wss://mercuryserver.example/foo" +) + +func TestORM(t *testing.T) { + ctx := testutils.Context(t) + db := testutils.NewSqlxDB(t) + + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + orm := NewORM(db) + feedID := sampleFeedID + + reports := sampleReports + reportContexts := make([]ocrtypes.ReportContext, 4) + for i := range reportContexts { + reportContexts[i] = ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: 10, + Round: uint8(i), + }, + ExtraHash: [32]byte{'2'}, + } + } + + l, err := orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Nil(t, l) + + // Test insert and get requests. + // s1 + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, reportContexts[0]) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[1]}, jobID, reportContexts[1]) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[2]}, jobID, reportContexts[2]) + require.NoError(t, err) + + // s2 + err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[0]) + require.NoError(t, err) + + transmissions, err := orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]}, + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: reportContexts[1]}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, + }, transmissions) + transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[0]}, + }, transmissions) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.NotEqual(t, reports[0], l) + assert.Equal(t, reports[2], l) + + // Test requests can be deleted. + err = orm.DeleteTransmitRequests(ctx, sURL, []*pb.TransmitRequest{{Payload: reports[1]}}) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, + }, transmissions) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[2], l) + + // Test deleting non-existent requests does not error. + err = orm.DeleteTransmitRequests(ctx, sURL, []*pb.TransmitRequest{{Payload: []byte("does-not-exist")}}) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, + }, transmissions) + + // Test deleting multiple requests. + err = orm.DeleteTransmitRequests(ctx, sURL, []*pb.TransmitRequest{ + {Payload: reports[0]}, + {Payload: reports[2]}, + }) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[2], l) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Empty(t, transmissions) + + // More inserts. + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3]) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[3]}, + }, transmissions) + + // Duplicate requests are ignored. + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3]) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3]) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[3]}, + }, transmissions) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[3], l) + + // s2 not affected by deletion + transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID) + require.NoError(t, err) + require.Len(t, transmissions, 1) +} + +func TestORM_InsertTransmitRequest_MultipleServerURLs(t *testing.T) { + ctx := testutils.Context(t) + db := testutils.NewSqlxDB(t) + + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + orm := NewORM(db) + feedID := sampleFeedID + + reports := sampleReports + reportContexts := make([]ocrtypes.ReportContext, 4) + for i := range reportContexts { + reportContexts[i] = ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: 10, + Round: uint8(i), + }, + ExtraHash: [32]byte{'2'}, + } + } + err := orm.InsertTransmitRequest(ctx, []string{sURL, sURL2, sURL3}, &pb.TransmitRequest{Payload: reports[0]}, jobID, reportContexts[0]) + require.NoError(t, err) + + transmissions, err := orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Len(t, transmissions, 1) + assert.Equal(t, &Transmission{Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, transmissions[0]) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID) + require.NoError(t, err) + require.Len(t, transmissions, 1) + assert.Equal(t, &Transmission{Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, transmissions[0]) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL3, jobID) + require.NoError(t, err) + require.Len(t, transmissions, 1) + assert.Equal(t, &Transmission{Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, transmissions[0]) + + l, err := orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[0], l) +} + +func TestORM_PruneTransmitRequests(t *testing.T) { + ctx := testutils.Context(t) + db := testutils.NewSqlxDB(t) + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + + orm := NewORM(db) + + reports := sampleReports + + makeReportContext := func(epoch uint32, round uint8) ocrtypes.ReportContext { + return ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: epoch, + Round: round, + }, + ExtraHash: [32]byte{'2'}, + } + } + + // s1 + err := orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 1)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 2)) + require.NoError(t, err) + // s2 - should not be touched + err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 0)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 1)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 2)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(1, 3)) + require.NoError(t, err) + + // Max size greater than number of records, expect no-op + err = orm.PruneTransmitRequests(ctx, sURL, jobID, 5) + require.NoError(t, err) + + transmissions, err := orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)}, + }, transmissions) + + // Max size equal to number of records, expect no-op + err = orm.PruneTransmitRequests(ctx, sURL, jobID, 2) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)}, + }, transmissions) + + // Max size is number of records + 1, but jobID differs, expect no-op + err = orm.PruneTransmitRequests(ctx, sURL, -1, 2) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)}, + }, transmissions) + + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(2, 1)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, makeReportContext(2, 2)) + require.NoError(t, err) + + // Max size is table size - 1, expect the oldest row to be pruned. + err = orm.PruneTransmitRequests(ctx, sURL, jobID, 3) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: makeReportContext(2, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: makeReportContext(2, 1)}, + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + }, transmissions) + + // s2 not touched + transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID) + require.NoError(t, err) + assert.Len(t, transmissions, 3) +} + +func TestORM_InsertTransmitRequest_LatestReport(t *testing.T) { + ctx := testutils.Context(t) + db := testutils.NewSqlxDB(t) + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + + orm := NewORM(db) + feedID := sampleFeedID + + reports := sampleReports + + makeReportContext := func(epoch uint32, round uint8) ocrtypes.ReportContext { + return ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: epoch, + Round: round, + }, + ExtraHash: [32]byte{'2'}, + } + } + + err := orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext( + 0, 0, + )) + require.NoError(t, err) + + // this should be ignored, because report context is the same + err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext( + 0, 0, + )) + require.NoError(t, err) + + l, err := orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[0], l) + + t.Run("replaces if epoch and round are larger", func(t *testing.T) { + err = orm.InsertTransmitRequest(ctx, []string{"foo"}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 1)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[1], l) + }) + t.Run("replaces if epoch is the same but round is greater", func(t *testing.T) { + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(1, 2)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[2], l) + }) + t.Run("replaces if epoch is larger but round is smaller", func(t *testing.T) { + err = orm.InsertTransmitRequest(ctx, []string{"bar"}, &pb.TransmitRequest{Payload: reports[3]}, jobID, makeReportContext(2, 1)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[3], l) + }) + t.Run("does not overwrite if epoch/round is the same", func(t *testing.T) { + err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(2, 1)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[3], l) + }) +} + +func Test_ReportCodec_FeedIDFromReport(t *testing.T) { + t.Run("FeedIDFromReport extracts the current block number from a valid report", func(t *testing.T) { + report := buildSampleV2Report(42) + + f, err := FeedIDFromReport(report) + require.NoError(t, err) + + assert.Equal(t, sampleFeedID[:], f[:]) + }) + t.Run("FeedIDFromReport returns error if report is invalid", func(t *testing.T) { + report := []byte{1} + + _, err := FeedIDFromReport(report) + assert.EqualError(t, err, "invalid length for report: 1") + }) +} diff --git a/pkg/mercury/persistence_manager.go b/pkg/mercury/persistence_manager.go new file mode 100644 index 0000000000..06a24b7434 --- /dev/null +++ b/pkg/mercury/persistence_manager.go @@ -0,0 +1,149 @@ +package mercury + +import ( + "context" + "sync" + "time" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +var ( + flushDeletesFrequency = time.Second + pruneFrequency = time.Hour +) + +type PersistenceManager struct { + lggr logger.Logger + orm ORM + serverURL string + + once services.StateMachine + stopCh services.StopChan + wg sync.WaitGroup + + deleteMu sync.Mutex + deleteQueue []*pb.TransmitRequest + + jobID int32 + + maxTransmitQueueSize int + flushDeletesFrequency time.Duration + pruneFrequency time.Duration +} + +func NewPersistenceManager(lggr logger.Logger, serverURL string, orm ORM, jobID int32, maxTransmitQueueSize int, flushDeletesFrequency, pruneFrequency time.Duration) *PersistenceManager { + return &PersistenceManager{ + lggr: logger.Sugared(lggr).Named("MercuryPersistenceManager").With("serverURL", serverURL), + orm: orm, + serverURL: serverURL, + stopCh: make(services.StopChan), + jobID: jobID, + maxTransmitQueueSize: maxTransmitQueueSize, + flushDeletesFrequency: flushDeletesFrequency, + pruneFrequency: pruneFrequency, + } +} + +func (pm *PersistenceManager) Start(ctx context.Context) error { + return pm.once.StartOnce("MercuryPersistenceManager", func() error { + pm.wg.Add(2) + go pm.runFlushDeletesLoop() + go pm.runPruneLoop() + return nil + }) +} + +func (pm *PersistenceManager) Close() error { + return pm.once.StopOnce("MercuryPersistenceManager", func() error { + close(pm.stopCh) + pm.wg.Wait() + return nil + }) +} + +func (pm *PersistenceManager) Insert(ctx context.Context, req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) error { + return pm.orm.InsertTransmitRequest(ctx, []string{pm.serverURL}, req, pm.jobID, reportCtx) +} + +func (pm *PersistenceManager) Delete(ctx context.Context, req *pb.TransmitRequest) error { + return pm.orm.DeleteTransmitRequests(ctx, pm.serverURL, []*pb.TransmitRequest{req}) +} + +func (pm *PersistenceManager) AsyncDelete(req *pb.TransmitRequest) { + pm.addToDeleteQueue(req) +} + +func (pm *PersistenceManager) Load(ctx context.Context) ([]*Transmission, error) { + return pm.orm.GetTransmitRequests(ctx, pm.serverURL, pm.jobID) +} + +func (pm *PersistenceManager) runFlushDeletesLoop() { + defer pm.wg.Done() + + ctx, cancel := pm.stopCh.NewCtx() + defer cancel() + + ticker := services.NewTicker(pm.flushDeletesFrequency) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + queuedReqs := pm.resetDeleteQueue() + if err := pm.orm.DeleteTransmitRequests(ctx, pm.serverURL, queuedReqs); err != nil { + pm.lggr.Errorw("Failed to delete queued transmit requests", "err", err) + pm.addToDeleteQueue(queuedReqs...) + } else { + pm.lggr.Debugw("Deleted queued transmit requests") + } + } + } +} + +func (pm *PersistenceManager) runPruneLoop() { + defer pm.wg.Done() + + ctx, cancel := pm.stopCh.NewCtx() + defer cancel() + + ticker := services.NewTicker(pm.pruneFrequency) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + func(ctx context.Context) { + ctx, cancelPrune := context.WithTimeout(sqlutil.WithoutDefaultTimeout(ctx), time.Minute) + defer cancelPrune() + if err := pm.orm.PruneTransmitRequests(ctx, pm.serverURL, pm.jobID, pm.maxTransmitQueueSize); err != nil { + pm.lggr.Errorw("Failed to prune transmit requests table", "err", err) + } else { + pm.lggr.Debugw("Pruned transmit requests table") + } + }(ctx) + } + } +} + +func (pm *PersistenceManager) addToDeleteQueue(reqs ...*pb.TransmitRequest) { + pm.deleteMu.Lock() + defer pm.deleteMu.Unlock() + pm.deleteQueue = append(pm.deleteQueue, reqs...) +} + +func (pm *PersistenceManager) resetDeleteQueue() []*pb.TransmitRequest { + pm.deleteMu.Lock() + defer pm.deleteMu.Unlock() + queue := pm.deleteQueue + pm.deleteQueue = nil + return queue +} diff --git a/pkg/mercury/persistence_manager_test.go b/pkg/mercury/persistence_manager_test.go new file mode 100644 index 0000000000..f2d3f67296 --- /dev/null +++ b/pkg/mercury/persistence_manager_test.go @@ -0,0 +1,179 @@ +package mercury + +import ( + "math/rand/v2" + "testing" + "time" + + "github.com/jmoiron/sqlx" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +func bootstrapPersistenceManager(t *testing.T, jobID int32, db *sqlx.DB) *PersistenceManager { + t.Helper() + orm := NewORM(db) + return NewPersistenceManager(logger.Test(t), "mercuryserver.example", orm, jobID, 2, 5*time.Millisecond, 5*time.Millisecond) +} + +func TestPersistenceManager(t *testing.T) { + jobID1 := rand.Int32() + jobID2 := jobID1 + 1 + + ctx := testutils.Context(t) + db := testutils.NewSqlxDB(t) + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + pm := bootstrapPersistenceManager(t, jobID1, db) + + reports := sampleReports + + err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[0]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[1]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + + transmissions, err := pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[0]}}, + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) + + err = pm.Delete(ctx, &pb.TransmitRequest{Payload: reports[0]}) + require.NoError(t, err) + + transmissions, err = pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) + + t.Run("scopes load to only transmissions with matching job ID", func(t *testing.T) { + pm2 := bootstrapPersistenceManager(t, jobID2, db) + transmissions, err = pm2.Load(ctx) + require.NoError(t, err) + + assert.Empty(t, transmissions) + }) +} + +func TestPersistenceManagerAsyncDelete(t *testing.T) { + ctx := testutils.Context(t) + jobID := rand.Int32() + db := testutils.NewSqlxDB(t) + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + pm := bootstrapPersistenceManager(t, jobID, db) + + reports := sampleReports + + err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[0]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[1]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + + err = pm.Start(ctx) + require.NoError(t, err) + + pm.AsyncDelete(&pb.TransmitRequest{Payload: reports[0]}) + + // Wait for next poll. + testutils.RequireEventually(t, func() bool { + pm.deleteMu.Lock() + defer pm.deleteMu.Unlock() + return len(pm.deleteQueue) == 0 + }) + + transmissions, err := pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) + + // Test AsyncDelete is a no-op after Close. + err = pm.Close() + require.NoError(t, err) + + pm.AsyncDelete(&pb.TransmitRequest{Payload: reports[1]}) + + time.Sleep(15 * time.Millisecond) + + transmissions, err = pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) +} + +func TestPersistenceManagerPrune(t *testing.T) { + jobID1 := rand.Int32() + jobID2 := jobID1 + 1 + db := testutils.NewSqlxDB(t) + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + + ctx := testutils.Context(t) + + reports := make([][]byte, 25) + for i := range 25 { + reports[i] = buildSampleV2Report(int64(i)) + } + + pm2 := bootstrapPersistenceManager(t, jobID2, db) + for i := range 20 { + err := pm2.Insert(ctx, &pb.TransmitRequest{Payload: reports[i]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: uint32(i)}}) //nolint:gosec // G115 + require.NoError(t, err) + } + + pm := bootstrapPersistenceManager(t, jobID1, db) + + err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[21]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 21}}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[22]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[23]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}) + require.NoError(t, err) + + err = pm.Start(ctx) + require.NoError(t, err) + + testutils.RequireEventually(t, func() bool { + requests, err2 := pm.Load(testutils.Context(t)) + require.NoError(t, err2) + return len(requests) == pm.maxTransmitQueueSize + }) + + transmissions, err := pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[23]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}}, + {Req: &pb.TransmitRequest{Payload: reports[22]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}}, + }, transmissions) + + // Test pruning stops after Close. + err = pm.Close() + require.NoError(t, err) + + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[24]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 24}}) + require.NoError(t, err) + + transmissions, err = pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[24]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 24}}}, + {Req: &pb.TransmitRequest{Payload: reports[23]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}}, + {Req: &pb.TransmitRequest{Payload: reports[22]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}}, + }, transmissions) + + t.Run("prune was scoped to job ID", func(t *testing.T) { + transmissions, err = pm2.Load(ctx) + require.NoError(t, err) + assert.Len(t, transmissions, 20) + }) +} diff --git a/pkg/mercury/queue.go b/pkg/mercury/queue.go new file mode 100644 index 0000000000..6a9ba00541 --- /dev/null +++ b/pkg/mercury/queue.go @@ -0,0 +1,260 @@ +package mercury + +import ( + "context" + "errors" + "fmt" + "strconv" + "sync" + "time" + + heap "github.com/esote/minmaxheap" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +type asyncDeleter interface { + AsyncDelete(req *pb.TransmitRequest) +} + +var _ services.Service = (*transmitQueue)(nil) + +var transmitQueueLoad = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "mercury_transmit_queue_load", + Help: "Current count of items in the transmit queue", +}, + []string{"feedID", "serverURL", "capacity"}, +) + +// Prometheus' default interval is 15s, set this to under 7.5s to avoid +// aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) +const promInterval = 6500 * time.Millisecond + +// TransmitQueue is the high-level package that everything outside of this file should be using +// It stores pending transmissions, yielding the latest (highest priority) first to the caller +type transmitQueue struct { + services.StateMachine + + cond sync.Cond + lggr logger.SugaredLogger + asyncDeleter asyncDeleter + mu *sync.RWMutex + + pq *priorityQueue + maxlen int + closed bool + + // monitor loop + stopMonitor func() + transmitQueueLoad prometheus.Gauge +} + +type Transmission struct { + Req *pb.TransmitRequest // the payload to transmit + ReportCtx ocrtypes.ReportContext // contains priority information (latest epoch/round wins) +} + +type TransmitQueue interface { + services.Service + + BlockingPop() (t *Transmission) + Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool) + Init(transmissions []*Transmission) + IsEmpty() bool +} + +// maxlen controls how many items will be stored in the queue +// 0 means unlimited - be careful, this can cause memory leaks +func NewTransmitQueue(lggr logger.Logger, serverURL, feedID string, maxlen int, asyncDeleter asyncDeleter) TransmitQueue { + mu := new(sync.RWMutex) + return &transmitQueue{ + services.StateMachine{}, + sync.Cond{L: mu}, + logger.Sugared(lggr).Named("TransmitQueue"), + asyncDeleter, + mu, + nil, // pq needs to be initialized by calling tq.Init before use + maxlen, + false, + nil, + transmitQueueLoad.WithLabelValues(feedID, serverURL, strconv.Itoa(maxlen)), + } +} + +func (tq *transmitQueue) Init(transmissions []*Transmission) { + pq := priorityQueue(transmissions) + heap.Init(&pq) // ensure the heap is ordered + tq.pq = &pq +} + +func (tq *transmitQueue) Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool) { + tq.cond.L.Lock() + defer tq.cond.L.Unlock() + + if tq.closed { + return false + } + + if tq.maxlen != 0 && tq.pq.Len() == tq.maxlen { + // evict oldest entry to make room + tq.lggr.Criticalf("Transmit queue is full; dropping oldest transmission (reached max length of %d)", tq.maxlen) + removed := heap.PopMax(tq.pq) + if transmission, ok := removed.(*Transmission); ok { + tq.asyncDeleter.AsyncDelete(transmission.Req) + } + } + + heap.Push(tq.pq, &Transmission{req, reportCtx}) + tq.cond.Signal() + + return true +} + +// BlockingPop will block until at least one item is in the heap, and then return it +// If the queue is closed, it will immediately return nil +func (tq *transmitQueue) BlockingPop() (t *Transmission) { + tq.cond.L.Lock() + defer tq.cond.L.Unlock() + if tq.closed { + return nil + } + for t = tq.pop(); t == nil; t = tq.pop() { + tq.cond.Wait() + if tq.closed { + return nil + } + } + return t +} + +func (tq *transmitQueue) IsEmpty() bool { + tq.mu.RLock() + defer tq.mu.RUnlock() + return tq.pq.Len() == 0 +} + +func (tq *transmitQueue) Start(context.Context) error { + return tq.StartOnce("TransmitQueue", func() error { + t := services.NewTicker(promInterval) + wg := new(sync.WaitGroup) + chStop := make(chan struct{}) + tq.stopMonitor = func() { + t.Stop() + close(chStop) + wg.Wait() + } + wg.Add(1) + go tq.monitorLoop(t.C, chStop, wg) + return nil + }) +} + +func (tq *transmitQueue) Close() error { + return tq.StopOnce("TransmitQueue", func() error { + tq.cond.L.Lock() + tq.closed = true + tq.cond.L.Unlock() + tq.cond.Broadcast() + tq.stopMonitor() + return nil + }) +} + +func (tq *transmitQueue) monitorLoop(c <-chan time.Time, chStop <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + for { + select { + case <-c: + tq.report() + case <-chStop: + return + } + } +} + +func (tq *transmitQueue) report() { + tq.mu.RLock() + length := tq.pq.Len() + tq.mu.RUnlock() + tq.transmitQueueLoad.Set(float64(length)) +} + +func (tq *transmitQueue) Ready() error { + return nil +} +func (tq *transmitQueue) Name() string { return tq.lggr.Name() } +func (tq *transmitQueue) HealthReport() map[string]error { + report := map[string]error{tq.Name(): errors.Join( + tq.status(), + )} + return report +} + +func (tq *transmitQueue) status() (merr error) { + tq.mu.RLock() + length := tq.pq.Len() + closed := tq.closed + tq.mu.RUnlock() + if tq.maxlen != 0 && length > (tq.maxlen/2) { + merr = errors.Join(merr, fmt.Errorf("transmit priority queue is greater than 50%% full (%d/%d)", length, tq.maxlen)) + } + if closed { + merr = errors.New("transmit queue is closed") + } + return merr +} + +// pop latest Transmission from the heap +// Not thread-safe +func (tq *transmitQueue) pop() *Transmission { + if tq.pq.Len() == 0 { + return nil + } + return heap.Pop(tq.pq).(*Transmission) +} + +// HEAP +// Adapted from https://pkg.go.dev/container/heap#example-package-PriorityQueue + +// WARNING: None of these methods are thread-safe, caller must synchronize + +var _ heap.Interface = &priorityQueue{} + +type priorityQueue []*Transmission + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + // We want Pop to give us the latest round, so we use greater than here + // i.e. a later epoch/round is "less" than an earlier one + return pq[i].ReportCtx.Epoch > pq[j].ReportCtx.Epoch && + pq[i].ReportCtx.Round > pq[j].ReportCtx.Round +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *priorityQueue) Pop() any { + n := len(*pq) + if n == 0 { + return nil + } + old := *pq + item := old[n-1] + old[n-1] = nil // avoid memory leak + *pq = old[0 : n-1] + return item +} + +func (pq *priorityQueue) Push(x any) { + *pq = append(*pq, x.(*Transmission)) +} diff --git a/pkg/mercury/queue_test.go b/pkg/mercury/queue_test.go new file mode 100644 index 0000000000..cb8bc10a9d --- /dev/null +++ b/pkg/mercury/queue_test.go @@ -0,0 +1,144 @@ +package mercury + +import ( + "sync" + "testing" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/mocks" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +type TestTransmissionWithReport struct { + tr *pb.TransmitRequest + ctx ocrtypes.ReportContext +} + +func createTestTransmissions(t *testing.T) []TestTransmissionWithReport { + t.Helper() + return []TestTransmissionWithReport{ + { + tr: &pb.TransmitRequest{ + Payload: []byte("test1"), + }, + ctx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 1, + Round: 1, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + { + tr: &pb.TransmitRequest{ + Payload: []byte("test2"), + }, + ctx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 2, + Round: 2, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + { + tr: &pb.TransmitRequest{ + Payload: []byte("test3"), + }, + ctx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 3, + Round: 3, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + } +} + +func Test_Queue(t *testing.T) { + t.Parallel() + testTransmissions := createTestTransmissions(t) + deleter := mocks.NewAsyncDeleter(t) + transmitQueue := NewTransmitQueue(logger.Test(t), sURL, "foo feed ID", 7, deleter) + transmitQueue.Init([]*Transmission{}) + + t.Run("successfully add transmissions to transmit queue", func(t *testing.T) { + for _, tt := range testTransmissions { + ok := transmitQueue.Push(tt.tr, tt.ctx) + require.True(t, ok) + } + report := transmitQueue.HealthReport() + assert.NoError(t, report[transmitQueue.Name()]) + }) + + t.Run("transmit queue is more than 50% full", func(t *testing.T) { + transmitQueue.Push(testTransmissions[2].tr, testTransmissions[2].ctx) + report := transmitQueue.HealthReport() + assert.Equal(t, "transmit priority queue is greater than 50% full (4/7)", report[transmitQueue.Name()].Error()) + }) + + t.Run("transmit queue pops the highest priority transmission", func(t *testing.T) { + tr := transmitQueue.BlockingPop() + assert.Equal(t, testTransmissions[2].tr, tr.Req) + }) + + t.Run("transmit queue is full and evicts the oldest transmission", func(t *testing.T) { + deleter.On("AsyncDelete", testTransmissions[0].tr).Once() + + // add 5 more transmissions to overflow the queue by 1 + for range 5 { + transmitQueue.Push(testTransmissions[1].tr, testTransmissions[1].ctx) + } + + // expecting testTransmissions[0] to get evicted, processed by deleter and not present in the queue anymore + for range 7 { + tr := transmitQueue.BlockingPop() + assert.NotEqual(t, tr.Req, testTransmissions[0].tr) + } + }) + + t.Run("transmit queue blocks when empty and resumes when transmission available", func(t *testing.T) { + assert.True(t, transmitQueue.IsEmpty()) + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tr := transmitQueue.BlockingPop() + assert.Equal(t, tr.Req, testTransmissions[0].tr) + }() + go func() { + defer wg.Done() + transmitQueue.Push(testTransmissions[0].tr, testTransmissions[0].ctx) + }() + wg.Wait() + }) + + t.Run("initializes transmissions", func(t *testing.T) { + transmissions := []*Transmission{ + { + Req: &pb.TransmitRequest{ + Payload: []byte("new1"), + }, + ReportCtx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 1, + Round: 1, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + } + transmitQueue := NewTransmitQueue(logger.Test(t), sURL, "foo feed ID", 7, deleter) + transmitQueue.Init(transmissions) + + transmission := transmitQueue.BlockingPop() + assert.Equal(t, transmission.Req.Payload, []byte("new1")) + assert.True(t, transmitQueue.IsEmpty()) + }) +} diff --git a/pkg/mercury/test_helpers.go b/pkg/mercury/test_helpers.go new file mode 100644 index 0000000000..a18379499d --- /dev/null +++ b/pkg/mercury/test_helpers.go @@ -0,0 +1,17 @@ +package mercury + +import ( + "github.com/ethereum/go-ethereum/common/hexutil" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +func MustHexToConfigDigest(s string) (cd ocrtypes.ConfigDigest) { + b := hexutil.MustDecode(s) + var err error + cd, err = ocrtypes.BytesToConfigDigest(b) + if err != nil { + panic(err) + } + return +} diff --git a/pkg/mercury/transmitter.go b/pkg/mercury/transmitter.go new file mode 100644 index 0000000000..9a96371c63 --- /dev/null +++ b/pkg/mercury/transmitter.go @@ -0,0 +1,611 @@ +package mercury + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math/big" + "sort" + "strconv" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/jpillora/backoff" + pkgerrors "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + capStreams "github.com/smartcontractkit/chainlink-common/pkg/capabilities/datastreams" + "github.com/smartcontractkit/chainlink-common/pkg/capabilities/triggers" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/types/mercury" + "github.com/smartcontractkit/chainlink-common/pkg/utils" + + mercuryutils "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +const ( + // Mercury server error codes + DuplicateReport = 2 +) + +var ( + transmitSuccessCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_success_count", + Help: "Number of successful transmissions (duplicates are counted as success)", + }, + []string{"feedID", "serverURL"}, + ) + transmitDuplicateCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_duplicate_count", + Help: "Number of transmissions where the server told us it was a duplicate", + }, + []string{"feedID", "serverURL"}, + ) + transmitConnectionErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_connection_error_count", + Help: "Number of errored transmissions that failed due to problem with the connection", + }, + []string{"feedID", "serverURL"}, + ) + transmitQueueDeleteErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_queue_delete_error_count", + Help: "Running count of DB errors when trying to delete an item from the queue DB", + }, + []string{"feedID", "serverURL"}, + ) + transmitQueueInsertErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_queue_insert_error_count", + Help: "Running count of DB errors when trying to insert an item into the queue DB", + }, + []string{"feedID", "serverURL"}, + ) + transmitQueuePushErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_queue_push_error_count", + Help: "Running count of DB errors when trying to push an item onto the queue", + }, + []string{"feedID", "serverURL"}, + ) + transmitServerErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_server_error_count", + Help: "Number of errored transmissions that failed due to an error returned by the mercury server", + }, + []string{"feedID", "serverURL", "code"}, + ) +) + +type Transmitter interface { + mercury.Transmitter + services.Service +} + +type ConfigTracker interface { + LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) +} + +type TransmitterReportDecoder interface { + BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) + ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) +} + +type BenchmarkPriceDecoder func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) + +var _ Transmitter = (*mercuryTransmitter)(nil) + +type TransmitterConfig interface { + TransmitTimeout() time.Duration +} + +type mercuryTransmitter struct { + services.StateMachine + lggr logger.SugaredLogger + cfg TransmitterConfig + + orm ORM + servers map[string]*server + + codec TransmitterReportDecoder + benchmarkPriceDecoder BenchmarkPriceDecoder + triggerCapability *triggers.MercuryTriggerService + + feedID mercuryutils.FeedID + jobID int32 + fromAccount string + + stopCh services.StopChan + wg *sync.WaitGroup +} + +var PayloadTypes = getPayloadTypes() + +func getPayloadTypes() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "reportContext", Type: mustNewType("bytes32[3]")}, + {Name: "report", Type: mustNewType("bytes")}, + {Name: "rawRs", Type: mustNewType("bytes32[]")}, + {Name: "rawSs", Type: mustNewType("bytes32[]")}, + {Name: "rawVs", Type: mustNewType("bytes32")}, + }) +} + +type server struct { + lggr logger.SugaredLogger + + transmitTimeout time.Duration + + c wsrpc.Client + pm *PersistenceManager + q TransmitQueue + + deleteQueue chan *pb.TransmitRequest + + url string + + transmitSuccessCount prometheus.Counter + transmitDuplicateCount prometheus.Counter + transmitConnectionErrorCount prometheus.Counter + transmitQueueDeleteErrorCount prometheus.Counter + transmitQueueInsertErrorCount prometheus.Counter + transmitQueuePushErrorCount prometheus.Counter +} + +func (s *server) HealthReport() map[string]error { + report := map[string]error{} + services.CopyHealth(report, s.c.HealthReport()) + services.CopyHealth(report, s.q.HealthReport()) + return report +} + +func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup) { + defer wg.Done() + ctx, cancel := stopCh.NewCtx() + defer cancel() + + // Exponential backoff for very rarely occurring errors (DB disconnect etc) + b := backoff.Backoff{ + Min: 1 * time.Second, + Max: 120 * time.Second, + Factor: 2, + Jitter: true, + } + + for { + select { + case req := <-s.deleteQueue: + for { + if err := s.pm.Delete(ctx, req); err != nil { + s.lggr.Errorw("Failed to delete transmit request record", "err", err, "req.Payload", req.Payload) + s.transmitQueueDeleteErrorCount.Inc() + select { + case <-time.After(b.Duration()): + // Wait a backoff duration before trying to delete again + continue + case <-stopCh: + // abort and return immediately on stop even if items remain in queue + return + } + } + break + } + // success + b.Reset() + case <-stopCh: + // abort and return immediately on stop even if items remain in queue + return + } + } +} + +func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, feedIDHex string) { + defer wg.Done() + // Exponential backoff with very short retry interval (since latency is a priority) + // 5ms, 10ms, 20ms, 40ms etc + b := backoff.Backoff{ + Min: 5 * time.Millisecond, + Max: 1 * time.Second, + Factor: 2, + Jitter: true, + } + ctx, cancel := stopCh.NewCtx() + defer cancel() + for { + t := s.q.BlockingPop() + if t == nil { + // queue was closed + return + } + res, err := func(ctx context.Context) (*pb.TransmitResponse, error) { + ctx, cancel := context.WithTimeout(ctx, utils.WithJitter(s.transmitTimeout)) + defer cancel() + return s.c.Transmit(ctx, t.Req) + }(ctx) + if ctx.Err() != nil { + // only canceled on transmitter close so we can exit + return + } else if err != nil { + s.transmitConnectionErrorCount.Inc() + s.lggr.Errorw("Transmit report failed", "err", err, "reportCtx", t.ReportCtx) + if ok := s.q.Push(t.Req, t.ReportCtx); !ok { + s.lggr.Error("Failed to push report to transmit queue; queue is closed") + return + } + // Wait a backoff duration before pulling the most recent transmission + // the heap + select { + case <-time.After(b.Duration()): + continue + case <-stopCh: + return + } + } + + b.Reset() + if res.Error == "" { + s.transmitSuccessCount.Inc() + s.lggr.Debugw("Transmit report success", "payload", hexutil.Encode(t.Req.Payload), "response", res, "repts", t.ReportCtx.ReportTimestamp) + } else { + // We don't need to retry here because the mercury server + // has confirmed it received the report. We only need to retry + // on networking/unknown errors + switch res.Code { + case DuplicateReport: + s.transmitSuccessCount.Inc() + s.transmitDuplicateCount.Inc() + s.lggr.Debugw("Transmit report success; duplicate report", "payload", hexutil.Encode(t.Req.Payload), "response", res, "repts", t.ReportCtx.ReportTimestamp) + default: + transmitServerErrorCount.WithLabelValues(feedIDHex, s.url, strconv.Itoa(int(res.Code))).Inc() + s.lggr.Errorw("Transmit report failed; mercury server returned error", "response", res, "reportCtx", t.ReportCtx, "err", res.Error, "code", res.Code) + } + } + + select { + case s.deleteQueue <- t.Req: + default: + s.lggr.Criticalw("Delete queue is full", "reportCtx", t.ReportCtx) + } + } +} + +const TransmitQueueMaxSize = 10_000 // hardcode this for legacy transmitter since we want the config var to apply only to LLO + +func newServer(lggr logger.Logger, cfg TransmitterConfig, client wsrpc.Client, pm *PersistenceManager, serverURL, feedIDHex string) *server { + return &server{ + logger.Sugared(lggr), + cfg.TransmitTimeout(), + client, + pm, + NewTransmitQueue(lggr, serverURL, feedIDHex, TransmitQueueMaxSize, pm), + make(chan *pb.TransmitRequest, TransmitQueueMaxSize), + serverURL, + transmitSuccessCount.WithLabelValues(feedIDHex, serverURL), + transmitDuplicateCount.WithLabelValues(feedIDHex, serverURL), + transmitConnectionErrorCount.WithLabelValues(feedIDHex, serverURL), + transmitQueueDeleteErrorCount.WithLabelValues(feedIDHex, serverURL), + transmitQueueInsertErrorCount.WithLabelValues(feedIDHex, serverURL), + transmitQueuePushErrorCount.WithLabelValues(feedIDHex, serverURL), + } +} + +func NewTransmitter(lggr logger.Logger, cfg TransmitterConfig, clients map[string]wsrpc.Client, fromAccountHex string, jobID int32, feedID [32]byte, orm ORM, codec TransmitterReportDecoder, benchmarkPriceDecoder BenchmarkPriceDecoder, triggerCapability *triggers.MercuryTriggerService) *mercuryTransmitter { + sugared := logger.Sugared(lggr) + feedIDHex := fmt.Sprintf("0x%x", feedID[:]) + servers := make(map[string]*server, len(clients)) + for serverURL, client := range clients { + cLggr := sugared.Named(serverURL).With("serverURL", serverURL) + pm := NewPersistenceManager(cLggr, serverURL, orm, jobID, TransmitQueueMaxSize, flushDeletesFrequency, pruneFrequency) + servers[serverURL] = newServer(cLggr, cfg, client, pm, serverURL, feedIDHex) + } + return &mercuryTransmitter{ + services.StateMachine{}, + sugared.Named("MercuryTransmitter").With("feedID", feedIDHex), + cfg, + orm, + servers, + codec, + benchmarkPriceDecoder, + triggerCapability, + feedID, + jobID, + fromAccountHex, + make(services.StopChan), + &sync.WaitGroup{}, + } +} + +func (mt *mercuryTransmitter) Start(ctx context.Context) (err error) { + return mt.StartOnce("MercuryTransmitter", func() error { + mt.lggr.Debugw("Loading transmit requests from database") + + { + var startClosers []services.StartClose + for _, s := range mt.servers { + transmissions, err := s.pm.Load(ctx) + if err != nil { + return err + } + s.q.Init(transmissions) + // starting pm after loading from it is fine because it simply spawns some garbage collection/prune goroutines + startClosers = append(startClosers, s.c, s.q, s.pm) + + mt.wg.Add(2) + go s.runDeleteQueueLoop(mt.stopCh, mt.wg) + go s.runQueueLoop(mt.stopCh, mt.wg, mt.feedID.Hex()) + } + if err := (&services.MultiStart{}).Start(ctx, startClosers...); err != nil { + return err + } + } + + return nil + }) +} + +func (mt *mercuryTransmitter) Close() error { + return mt.StopOnce("MercuryTransmitter", func() error { + // Drain all the queues first + var qs []io.Closer + for _, s := range mt.servers { + qs = append(qs, s.q) + } + if err := services.CloseAll(qs...); err != nil { + return err + } + + close(mt.stopCh) + mt.wg.Wait() + + // Close all the persistence managers + // Close all the clients + var closers []io.Closer + for _, s := range mt.servers { + closers = append(closers, s.pm) + closers = append(closers, s.c) + } + return services.CloseAll(closers...) + }) +} + +func (mt *mercuryTransmitter) Name() string { return mt.lggr.Name() } + +func (mt *mercuryTransmitter) HealthReport() map[string]error { + report := map[string]error{mt.Name(): mt.Healthy()} + for _, s := range mt.servers { + services.CopyHealth(report, s.HealthReport()) + } + return report +} + +func (mt *mercuryTransmitter) sendToTrigger(report ocrtypes.Report, rawReportCtx [3][32]byte, signatures []ocrtypes.AttributedOnchainSignature) error { + rawSignatures := [][]byte{} + for _, sig := range signatures { + rawSignatures = append(rawSignatures, sig.Signature) + } + + reportContextFlat := []byte{} + reportContextFlat = append(reportContextFlat, rawReportCtx[0][:]...) + reportContextFlat = append(reportContextFlat, rawReportCtx[1][:]...) + reportContextFlat = append(reportContextFlat, rawReportCtx[2][:]...) + + converted := capStreams.FeedReport{ + FeedID: mt.feedID.Hex(), + FullReport: report, + ReportContext: reportContextFlat, + Signatures: rawSignatures, + // NOTE: Skipping fields derived from FullReport, they will be filled out at a later stage + // after decoding and validating signatures. + } + return mt.triggerCapability.ProcessReport([]capStreams.FeedReport{converted}) +} + +// Transmit sends the report to the on-chain smart contract's Transmit method. +func (mt *mercuryTransmitter) Transmit(ctx context.Context, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signatures []ocrtypes.AttributedOnchainSignature) error { + rawReportCtx := evmutil.RawReportContext(reportCtx) + if mt.triggerCapability != nil { + // Acting as a Capability - send report to trigger service and exit. + return mt.sendToTrigger(report, rawReportCtx, signatures) + } + + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + for i, as := range signatures { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + + payload, err := PayloadTypes.Pack(rawReportCtx, []byte(report), rs, ss, vs) + if err != nil { + return pkgerrors.Wrap(err, "abi.Pack failed") + } + + req := &pb.TransmitRequest{ + Payload: payload, + } + + ts, err := mt.codec.ObservationTimestampFromReport(ctx, report) + if err != nil { + mt.lggr.Warnw("Failed to get observation timestamp from report", "err", err) + } + mt.lggr.Debugw("Transmit enqueue", "req.Payload", hexutil.Encode(req.Payload), "report", report, "repts", reportCtx.ReportTimestamp, "signatures", signatures, "observationsTimestamp", ts) + + if err := mt.orm.InsertTransmitRequest(ctx, maps.Keys(mt.servers), req, mt.jobID, reportCtx); err != nil { + return err + } + + g := new(errgroup.Group) + for _, s := range mt.servers { + // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + if ok := s.q.Push(req, reportCtx); !ok { + s.transmitQueuePushErrorCount.Inc() + return errors.New("transmit queue is closed") + } + return nil + }) + } + + return g.Wait() +} + +// FromAccount returns the stringified (hex) CSA public key +func (mt *mercuryTransmitter) FromAccount(ctx context.Context) (ocrtypes.Account, error) { + return ocrtypes.Account(mt.fromAccount), nil +} + +// LatestConfigDigestAndEpoch retrieves the latest config digest and epoch from the OCR2 contract. +func (mt *mercuryTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (cd ocrtypes.ConfigDigest, epoch uint32, err error) { + panic("not needed for OCR3") +} + +func (mt *mercuryTransmitter) FetchInitialMaxFinalizedBlockNumber(ctx context.Context) (*int64, error) { + mt.lggr.Trace("FetchInitialMaxFinalizedBlockNumber") + + report, err := mt.latestReport(ctx, mt.feedID) + if err != nil { + return nil, err + } + + if report == nil { + mt.lggr.Debugw("FetchInitialMaxFinalizedBlockNumber success; got nil report") + return nil, nil + } + + mt.lggr.Debugw("FetchInitialMaxFinalizedBlockNumber success", "currentBlockNum", report.CurrentBlockNumber) + + return &report.CurrentBlockNumber, nil +} + +func (mt *mercuryTransmitter) LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) { + mt.lggr.Trace("LatestPrice") + + fullReport, err := mt.latestReport(ctx, feedID) + if err != nil { + return nil, err + } + if fullReport == nil { + return nil, nil + } + payload := fullReport.Payload + m := make(map[string]any) + if err := PayloadTypes.UnpackIntoMap(m, payload); err != nil { + return nil, err + } + report, is := m["report"].([]byte) + if !is { + return nil, fmt.Errorf("expected report to be []byte, but it was %T", m["report"]) + } + return mt.benchmarkPriceDecoder(ctx, feedID, report) +} + +// LatestTimestamp will return -1, nil if the feed is missing +func (mt *mercuryTransmitter) LatestTimestamp(ctx context.Context) (int64, error) { + mt.lggr.Trace("LatestTimestamp") + + report, err := mt.latestReport(ctx, mt.feedID) + if err != nil { + return 0, err + } + + if report == nil { + mt.lggr.Debugw("LatestTimestamp success; got nil report") + return -1, nil + } + + mt.lggr.Debugw("LatestTimestamp success", "timestamp", report.ObservationsTimestamp) + + return report.ObservationsTimestamp, nil +} + +func (mt *mercuryTransmitter) latestReport(ctx context.Context, feedID [32]byte) (*pb.Report, error) { + mt.lggr.Trace("latestReport") + + req := &pb.LatestReportRequest{ + FeedId: feedID[:], + } + + var reports []*pb.Report + mu := sync.Mutex{} + var g errgroup.Group + for _, s := range mt.servers { + g.Go(func() error { + resp, err := s.c.LatestReport(ctx, req) + if err != nil { + s.lggr.Warnw("latestReport failed", "err", err) + return err + } + if resp == nil { + err = errors.New("latestReport expected non-nil response from server") + s.lggr.Warn(err.Error()) + return err + } + if resp.Error != "" { + err = errors.New(resp.Error) + s.lggr.Warnw("latestReport failed; mercury server returned error", "err", err) + return fmt.Errorf("latestReport failed; mercury server returned error: %s", resp.Error) + } + if resp.Report == nil { + s.lggr.Tracew("latestReport success: returned nil") + } else if !bytes.Equal(resp.Report.FeedId, feedID[:]) { + err = fmt.Errorf("latestReport failed; mismatched feed IDs, expected: 0x%x, got: 0x%x", mt.feedID[:], resp.Report.FeedId) + s.lggr.Errorw("latestReport failed", "err", err) + return err + } else { + s.lggr.Tracew("latestReport success", "observationsTimestamp", resp.Report.ObservationsTimestamp, "currentBlockNum", resp.Report.CurrentBlockNumber) + } + mu.Lock() + defer mu.Unlock() + reports = append(reports, resp.Report) + return nil + }) + } + err := g.Wait() + + if len(reports) == 0 { + return nil, fmt.Errorf("latestReport failed; all servers returned an error: %w", err) + } + + sortReportsLatestFirst(reports) + + return reports[0], nil +} + +func sortReportsLatestFirst(reports []*pb.Report) { + sort.Slice(reports, func(i, j int) bool { + // nils are "earliest" so they go to the end + if reports[i] == nil { + return false + } else if reports[j] == nil { + return true + } + // Handle block number case + if reports[i].ObservationsTimestamp == reports[j].ObservationsTimestamp { + return reports[i].CurrentBlockNumber > reports[j].CurrentBlockNumber + } + // Timestamp case + return reports[i].ObservationsTimestamp > reports[j].ObservationsTimestamp + }) +} diff --git a/pkg/mercury/transmitter_test.go b/pkg/mercury/transmitter_test.go new file mode 100644 index 0000000000..4f5e0c13da --- /dev/null +++ b/pkg/mercury/transmitter_test.go @@ -0,0 +1,579 @@ +package mercury + +import ( + "context" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities/triggers" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + mercurytypes "github.com/smartcontractkit/chainlink-evm/pkg/mercury/types" + mercuryutils "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/mocks" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" + "github.com/smartcontractkit/chainlink-evm/pkg/utils" +) + +type mockCfg struct{} + +func (m mockCfg) TransmitQueueMaxSize() uint32 { + return 100_000 +} + +func (m mockCfg) TransmitTimeout() time.Duration { + return 1 * time.Hour +} + +func Test_MercuryTransmitter_Transmit(t *testing.T) { + lggr := logger.Test(t) + db := testutils.NewSqlxDB(t) + var jobID int32 + testutils.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + testutils.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + codec := new(mockCodec) + benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) { + return codec.BenchmarkPriceFromReport(ctx, report) + } + orm := NewORM(db) + clients := map[string]wsrpc.Client{} + + t.Run("with one mercury server", func(t *testing.T) { + t.Run("v2 report transmission successfully enqueued", func(t *testing.T) { + report := sampleV2Report + c := &mocks.MockWSRPCClient{} + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + // init the queue since we skipped starting transmitter + mt.servers[sURL].q.Init([]*Transmission{}) + err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + require.NoError(t, err) + + // ensure it was added to the queue + require.Equal(t, 1, mt.servers[sURL].q.(*transmitQueue).pq.Len()) + assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report) + }) + t.Run("v3 report transmission successfully enqueued", func(t *testing.T) { + report := sampleV3Report + c := &mocks.MockWSRPCClient{} + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + // init the queue since we skipped starting transmitter + mt.servers[sURL].q.Init([]*Transmission{}) + err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + require.NoError(t, err) + + // ensure it was added to the queue + require.Equal(t, 1, mt.servers[sURL].q.(*transmitQueue).pq.Len()) + assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report) + }) + t.Run("v3 report transmission sent only to trigger service", func(t *testing.T) { + report := sampleV3Report + c := &mocks.MockWSRPCClient{} + clients[sURL] = c + triggerService, err := triggers.NewMercuryTriggerService(0, "", "", lggr) + require.NoError(t, err) + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, triggerService) + // init the queue since we skipped starting transmitter + mt.servers[sURL].q.Init([]*Transmission{}) + err = mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + require.NoError(t, err) + // queue is empty + require.Equal(t, 0, mt.servers[sURL].q.(*transmitQueue).pq.Len()) + }) + }) + + t.Run("with multiple mercury servers", func(t *testing.T) { + report := sampleV3Report + c := &mocks.MockWSRPCClient{} + clients[sURL] = c + clients[sURL2] = c + clients[sURL3] = c + + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + // init the queue since we skipped starting transmitter + mt.servers[sURL].q.Init([]*Transmission{}) + mt.servers[sURL2].q.Init([]*Transmission{}) + mt.servers[sURL3].q.Init([]*Transmission{}) + + err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + require.NoError(t, err) + + // ensure it was added to the queue + require.Equal(t, 1, mt.servers[sURL].q.(*transmitQueue).pq.Len()) + assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report) + require.Equal(t, 1, mt.servers[sURL2].q.(*transmitQueue).pq.Len()) + assert.Subset(t, mt.servers[sURL2].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report) + require.Equal(t, 1, mt.servers[sURL3].q.(*transmitQueue).pq.Len()) + assert.Subset(t, mt.servers[sURL3].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report) + }) +} + +func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + db := testutils.NewSqlxDB(t) + var jobID int32 + codec := new(mockCodec) + benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) { + return codec.BenchmarkPriceFromReport(ctx, report) + } + + orm := NewORM(db) + clients := map[string]wsrpc.Client{} + + t.Run("successful query", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.ObservationsTimestamp = 42 + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + ts, err := mt.LatestTimestamp(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, int64(42), ts) + }) + + t.Run("successful query returning nil report (new feed) gives latest timestamp = -1", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = nil + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + ts, err := mt.LatestTimestamp(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, int64(-1), ts) + }) + + t.Run("failing query", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + _, err := mt.LatestTimestamp(testutils.Context(t)) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) + + t.Run("with multiple servers, uses latest", func(t *testing.T) { + clients[sURL] = &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + clients[sURL2] = &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.ObservationsTimestamp = 42 + return out, nil + }, + } + clients[sURL3] = &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.ObservationsTimestamp = 41 + return out, nil + }, + } + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + ts, err := mt.LatestTimestamp(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, int64(42), ts) + }) +} + +type mockCodec struct { + val *big.Int + err error +} + +var _ mercurytypes.ReportCodec = &mockCodec{} + +func (m *mockCodec) BenchmarkPriceFromReport(ctx context.Context, _ ocrtypes.Report) (*big.Int, error) { + return m.val, m.err +} + +func (m *mockCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) { + return 0, nil +} + +func Test_MercuryTransmitter_LatestPrice(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + db := testutils.NewSqlxDB(t) + var jobID int32 + + codec := new(mockCodec) + benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) { + return codec.BenchmarkPriceFromReport(ctx, report) + } + orm := NewORM(db) + clients := map[string]wsrpc.Client{} + + t.Run("successful query", func(t *testing.T) { + originalPrice := big.NewInt(123456789) + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.Payload = buildSamplePayload([]byte("doesn't matter")) + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + + t.Run("BenchmarkPriceFromReport succeeds", func(t *testing.T) { + codec.val = originalPrice + codec.err = nil + + price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.NoError(t, err) + + assert.Equal(t, originalPrice, price) + }) + t.Run("BenchmarkPriceFromReport fails", func(t *testing.T) { + codec.val = nil + codec.err = errors.New("something exploded") + + _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.Error(t, err) + + assert.EqualError(t, err, "something exploded") + }) + }) + + t.Run("successful query returning nil report (new feed)", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = nil + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.NoError(t, err) + + assert.Nil(t, price) + }) + + t.Run("failing query", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) +} + +func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { + t.Parallel() + + lggr := logger.Test(t) + db := testutils.NewSqlxDB(t) + var jobID int32 + codec := new(mockCodec) + benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) { + return codec.BenchmarkPriceFromReport(ctx, report) + } + orm := NewORM(db) + clients := map[string]wsrpc.Client{} + + t.Run("successful query", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.CurrentBlockNumber = 42 + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.NoError(t, err) + + require.NotNil(t, bn) + assert.Equal(t, 42, int(*bn)) + }) + t.Run("successful query returning nil report (new feed)", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = nil + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.NoError(t, err) + + assert.Nil(t, bn) + }) + t.Run("failing query", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) + t.Run("return feed ID is wrong", func(t *testing.T) { + c := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.CurrentBlockNumber = 42 + out.Report.FeedId = []byte{1, 2} + return out, nil + }, + } + clients[sURL] = c + mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil) + _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.Error(t, err) + assert.Contains(t, err.Error(), "latestReport failed; mismatched feed IDs, expected: 0x1c916b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472, got: 0x") + }) +} + +func Test_sortReportsLatestFirst(t *testing.T) { + reports := []*pb.Report{ + nil, + {ObservationsTimestamp: 1}, + {ObservationsTimestamp: 1}, + {ObservationsTimestamp: 2}, + {CurrentBlockNumber: 1}, + nil, + {CurrentBlockNumber: 2}, + {}, + } + + sortReportsLatestFirst(reports) + + assert.Equal(t, int64(2), reports[0].ObservationsTimestamp) + assert.Equal(t, int64(1), reports[1].ObservationsTimestamp) + assert.Equal(t, int64(1), reports[2].ObservationsTimestamp) + assert.Equal(t, int64(0), reports[3].ObservationsTimestamp) + assert.Equal(t, int64(2), reports[3].CurrentBlockNumber) + assert.Equal(t, int64(0), reports[4].ObservationsTimestamp) + assert.Equal(t, int64(1), reports[4].CurrentBlockNumber) + assert.Equal(t, int64(0), reports[5].ObservationsTimestamp) + assert.Equal(t, int64(0), reports[5].CurrentBlockNumber) + assert.Nil(t, reports[6]) + assert.Nil(t, reports[7]) +} + +type mockQ struct { + ch chan *Transmission +} + +func newMockQ() *mockQ { + return &mockQ{make(chan *Transmission, 100)} +} + +func (m *mockQ) Start(context.Context) error { return nil } +func (m *mockQ) Close() error { + m.ch <- nil + return nil +} +func (m *mockQ) Ready() error { return nil } +func (m *mockQ) HealthReport() map[string]error { return nil } +func (m *mockQ) Name() string { return "" } +func (m *mockQ) BlockingPop() (t *Transmission) { + val := <-m.ch + return val +} +func (m *mockQ) Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool) { + m.ch <- &Transmission{Req: req, ReportCtx: reportCtx} + return true +} +func (m *mockQ) Init(transmissions []*Transmission) {} +func (m *mockQ) IsEmpty() bool { return false } + +func Test_MercuryTransmitter_runQueueLoop(t *testing.T) { + feedIDHex := utils.NewHash().Hex() + lggr := logger.Test(t) + c := &mocks.MockWSRPCClient{} + db := testutils.NewSqlxDB(t) + orm := NewORM(db) + pm := NewPersistenceManager(lggr, sURL, orm, 0, 0, 0, 0) + cfg := mockCfg{} + + s := newServer(lggr, cfg, c, pm, sURL, feedIDHex) + + req := &pb.TransmitRequest{ + Payload: []byte{1, 2, 3}, + ReportFormat: 32, + } + + t.Run("pulls from queue and transmits successfully", func(t *testing.T) { + transmit := make(chan *pb.TransmitRequest, 1) + c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + transmit <- in + return &pb.TransmitResponse{Code: 0, Error: ""}, nil + } + q := newMockQ() + s.q = q + wg := &sync.WaitGroup{} + wg.Add(1) + + go s.runQueueLoop(nil, wg, feedIDHex) + + q.Push(req, sampleReportContext) + + select { + case tr := <-transmit: + assert.Equal(t, []byte{1, 2, 3}, tr.Payload) + assert.Equal(t, 32, int(tr.ReportFormat)) + // case <-time.After(testutils.WaitTimeout(t)): + case <-time.After(1 * time.Second): + t.Fatal("expected a transmit request to be sent") + } + + q.Close() + wg.Wait() + }) + + t.Run("on duplicate, success", func(t *testing.T) { + transmit := make(chan *pb.TransmitRequest, 1) + c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + transmit <- in + return &pb.TransmitResponse{Code: DuplicateReport, Error: ""}, nil + } + q := newMockQ() + s.q = q + wg := &sync.WaitGroup{} + wg.Add(1) + + go s.runQueueLoop(nil, wg, feedIDHex) + + q.Push(req, sampleReportContext) + + select { + case tr := <-transmit: + assert.Equal(t, []byte{1, 2, 3}, tr.Payload) + assert.Equal(t, 32, int(tr.ReportFormat)) + // case <-time.After(testutils.WaitTimeout(t)): + case <-time.After(1 * time.Second): + t.Fatal("expected a transmit request to be sent") + } + + q.Close() + wg.Wait() + }) + t.Run("on server-side error, does not retry", func(t *testing.T) { + transmit := make(chan *pb.TransmitRequest, 1) + c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + transmit <- in + return &pb.TransmitResponse{Code: DuplicateReport, Error: ""}, nil + } + q := newMockQ() + s.q = q + wg := &sync.WaitGroup{} + wg.Add(1) + + go s.runQueueLoop(nil, wg, feedIDHex) + + q.Push(req, sampleReportContext) + + select { + case tr := <-transmit: + assert.Equal(t, []byte{1, 2, 3}, tr.Payload) + assert.Equal(t, 32, int(tr.ReportFormat)) + // case <-time.After(testutils.WaitTimeout(t)): + case <-time.After(1 * time.Second): + t.Fatal("expected a transmit request to be sent") + } + + q.Close() + wg.Wait() + }) + t.Run("on transmit error, retries", func(t *testing.T) { + transmit := make(chan *pb.TransmitRequest, 1) + c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + transmit <- in + return &pb.TransmitResponse{}, errors.New("transmission error") + } + q := newMockQ() + s.q = q + wg := &sync.WaitGroup{} + wg.Add(1) + stopCh := make(chan struct{}, 1) + + go s.runQueueLoop(stopCh, wg, feedIDHex) + + q.Push(req, sampleReportContext) + + cnt := 0 + Loop: + for { + select { + case tr := <-transmit: + assert.Equal(t, []byte{1, 2, 3}, tr.Payload) + assert.Equal(t, 32, int(tr.ReportFormat)) + if cnt > 2 { + break Loop + } + cnt++ + // case <-time.After(testutils.WaitTimeout(t)): + case <-time.After(1 * time.Second): + t.Fatal("expected 3 transmit requests to be sent") + } + } + + close(stopCh) + wg.Wait() + }) +} diff --git a/pkg/mercury/types/types.go b/pkg/mercury/types/types.go new file mode 100644 index 0000000000..9891088711 --- /dev/null +++ b/pkg/mercury/types/types.go @@ -0,0 +1,33 @@ +package types + +import ( + "context" + "math/big" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +type DataSourceORM interface { + LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) +} + +type ReportCodec interface { + BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) +} + +var ( + PriceFeedMissingCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_price_feed_missing", + Help: "Running count of times mercury tried to query a price feed for billing from mercury server, but it was missing", + }, + []string{"queriedFeedID"}, + ) + PriceFeedErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_price_feed_errors", + Help: "Running count of times mercury tried to query a price feed for billing from mercury server, but got an error", + }, + []string{"queriedFeedID"}, + ) +) diff --git a/pkg/mercury/utils/feeds.go b/pkg/mercury/utils/feeds.go new file mode 100644 index 0000000000..51b500b593 --- /dev/null +++ b/pkg/mercury/utils/feeds.go @@ -0,0 +1,117 @@ +package utils + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" +) + +var legacyV1FeedIDs = []FeedID{ + // Arbitrum mainnet (prod) + mustHexToFeedID("0xb43dc495134fa357725f93539511c5a4febeadf56e7c29c96566c825094f0b20"), + mustHexToFeedID("0xe65b31c6d5b9bdff43a8194dc5b2edc6914ddbc5e9f9e9521f605fc3738fabf5"), + mustHexToFeedID("0x30f9926cdef3de98995fb38a100d5c582ae025ebbb8f9a931500596ce080280a"), + mustHexToFeedID("0x0f49a4533a64c7f53bfdf5e86d791620d93afdec00cfe1896548397b0f4ec81c"), + mustHexToFeedID("0x2cdd4aea8298f5d2e7f8505b91e3313e3aa04376a81f401b4a48c5aab78ee5cf"), + mustHexToFeedID("0x5f82d154119f4251d83b2a58bf61c9483c84241053038a2883abf16ed4926433"), + mustHexToFeedID("0x74aca63821bf7ead199e924d261d277cbec96d1026ab65267d655c51b4536914"), + mustHexToFeedID("0x64ee16b94fdd72d0b3769955445cc82d6804573c22f0f49b67cd02edd07461e7"), + mustHexToFeedID("0x95241f154d34539741b19ce4bae815473fd1b2a90ac3b4b023a692f31edfe90e"), + mustHexToFeedID("0x297cc1e1ee5fc2f45dff1dd11a46694567904f4dbc596c7cc216d6c688605a1b"), + // // Arbitrum mainnet (staging) + mustHexToFeedID("0x62ce6a99c4bebb150191d7b72f7a0c0206af00baca480ab007caa4b5bf4bf02a"), + mustHexToFeedID("0x984126712e6a8b5b4fe138c49b29483a12e77b5cb3213a0769252380c57480e4"), + mustHexToFeedID("0xb74f650d9cae6259ab4212f76abe746600be3a4926947725ed107943915346c1"), + mustHexToFeedID("0xa0098c4c06cbab05b2598aecad0cbf49d44780c56d40514e09fd7a9e76a2db00"), + mustHexToFeedID("0x2206b467d04656a8a83af43a428d6b66f787162db629f9caed0c12b54a32998e"), + mustHexToFeedID("0x55488e61b59ea629df66698c8eea1390f0aedc24942e074a6d565569fb90afde"), + mustHexToFeedID("0x98d66aab30d62d044cc55ffccb79ae35151348f40ff06a98c92001ed6ec8e886"), + mustHexToFeedID("0x2e768c0eca65d0449ee825b8a921349501339a2487c02146f77611ae01c31a50"), + mustHexToFeedID("0xb29931d9fe1e9fc023b4d2f0f1789c8b5e21aabf389f86f9702241a0178345dd"), + mustHexToFeedID("0xd8b8cfc1e2dd75116e5792d11810d830ef48843fd44e1633385e81157f8da6b5"), + mustHexToFeedID("0x09f8d0caff8cecb7f5e493d4de2ab98b4392f6d07923cd19b2cb524779301b85"), + mustHexToFeedID("0xe645924bbf507304dc4bd37f02c8dac73da3b7eb67378de98cfc59f17ba6774a"), + // Arbitrum testnet (production) + mustHexToFeedID("0x695be66b6a7979f2b3ed33a3d718eabebaf0a881f1f6598b5530875b7e8150ab"), + mustHexToFeedID("0x259b566b9d3c64d1e4a8656e2d6fd4c08e19f9fa9637ae76d52e428d07cca8e9"), + mustHexToFeedID("0x26c16f2054b7a1d77ae83a0429dace9f3000ba4dbf1690236e8f575742e98f66"), + mustHexToFeedID("0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"), + mustHexToFeedID("0xbf1febc8c335cb236c1995c1007a928a3f7ae8307a1a20cb31334e6d316c62d1"), + mustHexToFeedID("0x4ce52cf28e49f4673198074968aeea280f13b5f897c687eb713bcfc1eeab89ba"), + mustHexToFeedID("0xb21d58dccab05dcea22ab780ca010c4bec34e61ce7310e30f4ad0ff8c1621d27"), + mustHexToFeedID("0x5ad0d18436dd95672e69903efe95bdfb43a05cb55e8965c5af93db8170c8820c"), + mustHexToFeedID("0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"), + mustHexToFeedID("0x14e044f932bb959cc2aa8dc1ba110c09224e639aae00264c1ffc2a0830904a3c"), + mustHexToFeedID("0x555344432d5553442d415242495452554d2d544553544e455400000000000000"), + mustHexToFeedID("0x12be1859ee43f46bab53750915f20855f54e891f88ddd524f26a72d6f4deed1d"), + // // Arbitrum testnet (staging) + mustHexToFeedID("0x8837f28f5172f18071f164b8540fe8c95162dc0051e31005023fadc1cd9c4b50"), + mustHexToFeedID("0xd130b5acd88b47eb7c372611205d5a9ca474829a2719e396ab1eb4f956674e4e"), + mustHexToFeedID("0x6d2f5a4b3ba6c1953b4bb636f6ad03aec01b6222274f8ca1e39e53ee12a8cdf3"), + mustHexToFeedID("0x6962e629c3a0f5b7e3e9294b0c283c9b20f94f1c89c8ba8c1ee4650738f20fb2"), + mustHexToFeedID("0x557b817c6be7392364cef0dd11007c43caea1de78ce42e4f1eadc383e7cb209c"), + mustHexToFeedID("0x3250b5dd9491cb11138048d070b8636c35d96fff29671dc68b0723ad41f53433"), + mustHexToFeedID("0x3781c2691f6980dc66a72c03a32edb769fe05a9c9cb729cd7e96ecfd89450a0a"), + mustHexToFeedID("0xbbbf52c5797cc86d6bd9413d59ec624f07baf5045290ecd5ac6541d5a7ffd234"), + mustHexToFeedID("0xf753e1201d54ac94dfd9334c542562ff7e42993419a661261d010af0cbfd4e34"), + mustHexToFeedID("0x2489ce4577e814d6794218a13ef3c04cac976f991305400a4c0a1ddcffb90357"), + mustHexToFeedID("0xa5b07943b89e2c278fc8a2754e2854316e03cb959f6d323c2d5da218fb6b0ff8"), + mustHexToFeedID("0x1c2c0dfac0eb2aae2c05613f0d677daae164cdd406bd3dd6153d743302ce56e8"), +} + +var legacyV1FeedIDM map[FeedID]struct{} + +func init() { + legacyV1FeedIDM = make(map[FeedID]struct{}) + for _, feedID := range legacyV1FeedIDs { + legacyV1FeedIDM[feedID] = struct{}{} + } +} + +func mustHexToFeedID(s string) FeedID { + f := new(FeedID) + if err := f.UnmarshalText([]byte(s)); err != nil { + panic(err) + } + return *f +} + +type FeedVersion uint16 + +const ( + _ FeedVersion = iota + REPORT_V1 + REPORT_V2 + REPORT_V3 + REPORT_V4 + _ +) + +type FeedID [32]byte + +func BytesToFeedID(b []byte) FeedID { + return (FeedID)(common.BytesToHash(b)) +} + +func (f FeedID) Hex() string { return (common.Hash)(f).Hex() } + +func (f FeedID) String() string { return (common.Hash)(f).String() } + +func (f *FeedID) UnmarshalText(input []byte) error { + return (*common.Hash)(f).UnmarshalText(input) +} + +func (f FeedID) Version() FeedVersion { + if _, exists := legacyV1FeedIDM[f]; exists { + return REPORT_V1 + } else if f[0] == 0x01 { // Keystone Feed IDs + return FeedVersion(binary.BigEndian.Uint16(f[5:7])) + } + + return FeedVersion(binary.BigEndian.Uint16(f[:2])) +} + +func (f FeedID) IsV1() bool { return f.Version() == REPORT_V1 } +func (f FeedID) IsV2() bool { return f.Version() == REPORT_V2 } +func (f FeedID) IsV3() bool { return f.Version() == REPORT_V3 } +func (f FeedID) IsV4() bool { return f.Version() == REPORT_V4 } diff --git a/pkg/mercury/utils/feeds_test.go b/pkg/mercury/utils/feeds_test.go new file mode 100644 index 0000000000..d6db7a4a8c --- /dev/null +++ b/pkg/mercury/utils/feeds_test.go @@ -0,0 +1,58 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + v1FeedID = (FeedID)([32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}) + v2FeedID = (FeedID)([32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}) + v3FeedID = (FeedID)([32]uint8{00, 03, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}) + keystonev2Feed = (FeedID)([32]uint8{01, 12, 34, 56, 78, 00, 02, 04, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00}) + keystonev3Feed = (FeedID)([32]uint8{01, 12, 34, 56, 78, 00, 03, 04, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00}) + keystonev4Feed = (FeedID)([32]uint8{01, 12, 34, 56, 78, 00, 04, 04, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00}) +) + +func Test_FeedID_Version(t *testing.T) { + t.Run("versioned feed ID", func(t *testing.T) { + assert.Equal(t, REPORT_V1, v1FeedID.Version()) + assert.True(t, v1FeedID.IsV1()) + assert.False(t, v1FeedID.IsV2()) + assert.False(t, v1FeedID.IsV3()) + + assert.Equal(t, REPORT_V2, v2FeedID.Version()) + assert.False(t, v2FeedID.IsV1()) + assert.True(t, v2FeedID.IsV2()) + assert.False(t, v2FeedID.IsV3()) + + assert.Equal(t, REPORT_V3, v3FeedID.Version()) + assert.False(t, v3FeedID.IsV1()) + assert.False(t, v3FeedID.IsV2()) + assert.True(t, v3FeedID.IsV3()) + + assert.Equal(t, REPORT_V2, keystonev2Feed.Version()) + assert.False(t, keystonev2Feed.IsV1()) + assert.True(t, keystonev2Feed.IsV2()) + assert.False(t, keystonev2Feed.IsV3()) + assert.False(t, keystonev2Feed.IsV4()) + + assert.Equal(t, REPORT_V3, keystonev3Feed.Version()) + assert.False(t, keystonev3Feed.IsV1()) + assert.False(t, keystonev3Feed.IsV2()) + assert.True(t, keystonev3Feed.IsV3()) + assert.False(t, keystonev3Feed.IsV4()) + + assert.Equal(t, REPORT_V4, keystonev4Feed.Version()) + assert.False(t, keystonev4Feed.IsV1()) + assert.False(t, keystonev4Feed.IsV2()) + assert.False(t, keystonev4Feed.IsV3()) + assert.True(t, keystonev4Feed.IsV4()) + }) + t.Run("legacy special cases", func(t *testing.T) { + for _, feedID := range legacyV1FeedIDs { + assert.Equal(t, REPORT_V1, feedID.Version()) + } + }) +} diff --git a/pkg/mercury/v2/reportcodec/report_codec.go b/pkg/mercury/v2/reportcodec/report_codec.go new file mode 100644 index 0000000000..0fd8563615 --- /dev/null +++ b/pkg/mercury/v2/reportcodec/report_codec.go @@ -0,0 +1,79 @@ +package reportcodec + +import ( + "context" + "errors" + "fmt" + "math/big" + + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + reporttypes "github.com/smartcontractkit/chainlink-evm/pkg/mercury/v2/types" +) + +var ReportTypes = reporttypes.GetSchema() +var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word +var zero = big.NewInt(0) + +var _ v2.ReportCodec = &ReportCodec{} + +type ReportCodec struct { + logger logger.Logger + feedID utils.FeedID +} + +func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec { + return &ReportCodec{lggr, feedID} +} + +func (r *ReportCodec) BuildReport(ctx context.Context, rf v2.ReportFields) (ocrtypes.Report, error) { + var merr error + if rf.BenchmarkPrice == nil { + merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil")) + } + if rf.LinkFee == nil { + merr = errors.Join(merr, errors.New("linkFee may not be nil")) + } else if rf.LinkFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee)) + } + if rf.NativeFee == nil { + merr = errors.Join(merr, errors.New("nativeFee may not be nil")) + } else if rf.NativeFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee)) + } + if merr != nil { + return nil, merr + } + reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice) + return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob") +} + +func (r *ReportCodec) MaxReportLength(ctx context.Context, n int) (int, error) { + return maxReportLength, nil +} + +func (r *ReportCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) { + decoded, err := r.Decode(ctx, report) + if err != nil { + return 0, err + } + return decoded.ObservationsTimestamp, nil +} + +func (r *ReportCodec) Decode(ctx context.Context, report ocrtypes.Report) (*reporttypes.Report, error) { + return reporttypes.Decode(report) +} + +func (r *ReportCodec) BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) { + decoded, err := r.Decode(ctx, report) + if err != nil { + return nil, err + } + return decoded.BenchmarkPrice, nil +} diff --git a/pkg/mercury/v2/reportcodec/report_codec_test.go b/pkg/mercury/v2/reportcodec/report_codec_test.go new file mode 100644 index 0000000000..e1bb07a236 --- /dev/null +++ b/pkg/mercury/v2/reportcodec/report_codec_test.go @@ -0,0 +1,162 @@ +package reportcodec + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +func newValidReportFields() v2.ReportFields { + return v2.ReportFields{ + Timestamp: 242, + BenchmarkPrice: big.NewInt(243), + ValidFromTimestamp: 123, + ExpiresAt: 20, + LinkFee: big.NewInt(456), + NativeFee: big.NewInt(457), + } +} + +func Test_ReportCodec_BuildReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BuildReport errors on zero values", func(t *testing.T) { + ctx := testutils.Context(t) + _, err := r.BuildReport(ctx, v2.ReportFields{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "benchmarkPrice may not be nil") + assert.Contains(t, err.Error(), "linkFee may not be nil") + assert.Contains(t, err.Error(), "nativeFee may not be nil") + }) + + t.Run("BuildReport constructs a report from observations", func(t *testing.T) { + ctx := testutils.Context(t) + rf := newValidReportFields() + // only need to test happy path since validations are done in relaymercury + + report, err := r.BuildReport(ctx, rf) + require.NoError(t, err) + + reportElems := make(map[string]any) + err = ReportTypes.UnpackIntoMap(reportElems, report) + require.NoError(t, err) + + assert.Equal(t, 242, int(reportElems["observationsTimestamp"].(uint32))) + assert.Equal(t, int64(243), reportElems["benchmarkPrice"].(*big.Int).Int64()) + assert.Equal(t, uint32(123), reportElems["validFromTimestamp"].(uint32)) + assert.Equal(t, uint32(20), reportElems["expiresAt"].(uint32)) + assert.Equal(t, int64(456), reportElems["linkFee"].(*big.Int).Int64()) + assert.Equal(t, int64(457), reportElems["nativeFee"].(*big.Int).Int64()) + + assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3}, report) + max, err := r.MaxReportLength(ctx, 4) + require.NoError(t, err) + assert.LessOrEqual(t, len(report), max) + + t.Run("Decode decodes the report", func(t *testing.T) { + ctx := testutils.Context(t) + decoded, err := r.Decode(ctx, report) + require.NoError(t, err) + + require.NotNil(t, decoded) + + assert.Equal(t, uint32(242), decoded.ObservationsTimestamp) + assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice) + assert.Equal(t, uint32(123), decoded.ValidFromTimestamp) + assert.Equal(t, uint32(20), decoded.ExpiresAt) + assert.Equal(t, big.NewInt(456), decoded.LinkFee) + assert.Equal(t, big.NewInt(457), decoded.NativeFee) + }) + }) + + t.Run("errors on negative fee", func(t *testing.T) { + rf := newValidReportFields() + rf.LinkFee = big.NewInt(-1) + rf.NativeFee = big.NewInt(-1) + ctx := testutils.Context(t) + _, err := r.BuildReport(ctx, rf) + require.Error(t, err) + + assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)") + assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)") + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + ctx := testutils.Context(t) + _, err := r.Decode(ctx, []byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := range longBad { + longBad[i] = byte(i) + } + _, err = r.Decode(ctx, longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value") + }) +} + +func buildSampleReport(ts int64) []byte { + feedID := [32]byte{'f', 'o', 'o'} + timestamp := uint32(ts) + bp := big.NewInt(242) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp) + if err != nil { + panic(err) + } + return b +} + +func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) { + report := buildSampleReport(123) + + ctx := testutils.Context(t) + ts, err := r.ObservationTimestampFromReport(ctx, report) + require.NoError(t, err) + + assert.Equal(t, uint32(123), ts) + }) + t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) { + report := []byte{1, 2, 3} + + ctx := testutils.Context(t) + _, err := r.ObservationTimestampFromReport(ctx, report) + require.Error(t, err) + + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} + +func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) { + ctx := testutils.Context(t) + report := buildSampleReport(123) + + bp, err := r.BenchmarkPriceFromReport(ctx, report) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(242), bp) + }) + t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) { + ctx := testutils.Context(t) + _, err := r.BenchmarkPriceFromReport(ctx, []byte{1, 2, 3}) + require.Error(t, err) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} diff --git a/pkg/mercury/v2/types/types.go b/pkg/mercury/v2/types/types.go new file mode 100644 index 0000000000..3c1df286d1 --- /dev/null +++ b/pkg/mercury/v2/types/types.go @@ -0,0 +1,52 @@ +package reporttypes + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +var schema = GetSchema() + +func GetSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "feedId", Type: mustNewType("bytes32")}, + {Name: "validFromTimestamp", Type: mustNewType("uint32")}, + {Name: "observationsTimestamp", Type: mustNewType("uint32")}, + {Name: "nativeFee", Type: mustNewType("uint192")}, + {Name: "linkFee", Type: mustNewType("uint192")}, + {Name: "expiresAt", Type: mustNewType("uint32")}, + {Name: "benchmarkPrice", Type: mustNewType("int192")}, + }) +} + +type Report struct { + FeedId [32]byte + ObservationsTimestamp uint32 + BenchmarkPrice *big.Int + ValidFromTimestamp uint32 + ExpiresAt uint32 + LinkFee *big.Int + NativeFee *big.Int +} + +// Decode is made available to external users (i.e. mercury server) +func Decode(report []byte) (*Report, error) { + values, err := schema.Unpack(report) + if err != nil { + return nil, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(Report) + if err = schema.Copy(decoded, values); err != nil { + return nil, fmt.Errorf("failed to copy report values to struct: %w", err) + } + return decoded, nil +} diff --git a/pkg/mercury/v3/reportcodec/report_codec.go b/pkg/mercury/v3/reportcodec/report_codec.go new file mode 100644 index 0000000000..d76cf7dd14 --- /dev/null +++ b/pkg/mercury/v3/reportcodec/report_codec.go @@ -0,0 +1,85 @@ +package reportcodec + +import ( + "context" + "errors" + "fmt" + "math/big" + + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + reporttypes "github.com/smartcontractkit/chainlink-evm/pkg/mercury/v3/types" +) + +var ReportTypes = reporttypes.GetSchema() +var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word +var zero = big.NewInt(0) + +var _ v3.ReportCodec = &ReportCodec{} + +type ReportCodec struct { + logger logger.Logger + feedID utils.FeedID +} + +func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec { + return &ReportCodec{lggr, feedID} +} + +func (r *ReportCodec) BuildReport(ctx context.Context, rf v3.ReportFields) (ocrtypes.Report, error) { + var merr error + if rf.BenchmarkPrice == nil { + merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil")) + } + if rf.Bid == nil { + merr = errors.Join(merr, errors.New("bid may not be nil")) + } + if rf.Ask == nil { + merr = errors.Join(merr, errors.New("ask may not be nil")) + } + if rf.LinkFee == nil { + merr = errors.Join(merr, errors.New("linkFee may not be nil")) + } else if rf.LinkFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee)) + } + if rf.NativeFee == nil { + merr = errors.Join(merr, errors.New("nativeFee may not be nil")) + } else if rf.NativeFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee)) + } + if merr != nil { + return nil, merr + } + reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice, rf.Bid, rf.Ask) + return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob") +} + +func (r *ReportCodec) MaxReportLength(ctx context.Context, n int) (int, error) { + return maxReportLength, nil +} + +func (r *ReportCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) { + decoded, err := r.Decode(report) + if err != nil { + return 0, err + } + return decoded.ObservationsTimestamp, nil +} + +func (r *ReportCodec) Decode(report ocrtypes.Report) (*reporttypes.Report, error) { + return reporttypes.Decode(report) +} + +func (r *ReportCodec) BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) { + decoded, err := r.Decode(report) + if err != nil { + return nil, err + } + return decoded.BenchmarkPrice, nil +} diff --git a/pkg/mercury/v3/reportcodec/report_codec_test.go b/pkg/mercury/v3/reportcodec/report_codec_test.go new file mode 100644 index 0000000000..39592c49ae --- /dev/null +++ b/pkg/mercury/v3/reportcodec/report_codec_test.go @@ -0,0 +1,168 @@ +package reportcodec + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +func newValidReportFields() v3.ReportFields { + return v3.ReportFields{ + Timestamp: 242, + BenchmarkPrice: big.NewInt(243), + Bid: big.NewInt(244), + Ask: big.NewInt(245), + ValidFromTimestamp: 123, + ExpiresAt: 20, + LinkFee: big.NewInt(456), + NativeFee: big.NewInt(457), + } +} + +func Test_ReportCodec_BuildReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BuildReport errors on zero values", func(t *testing.T) { + ctx := testutils.Context(t) + _, err := r.BuildReport(ctx, v3.ReportFields{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "benchmarkPrice may not be nil") + assert.Contains(t, err.Error(), "linkFee may not be nil") + assert.Contains(t, err.Error(), "nativeFee may not be nil") + }) + + t.Run("BuildReport constructs a report from observations", func(t *testing.T) { + rf := newValidReportFields() + // only need to test happy path since validations are done in relaymercury + + ctx := testutils.Context(t) + report, err := r.BuildReport(ctx, rf) + require.NoError(t, err) + + reportElems := make(map[string]any) + err = ReportTypes.UnpackIntoMap(reportElems, report) + require.NoError(t, err) + + assert.Equal(t, 242, int(reportElems["observationsTimestamp"].(uint32))) + assert.Equal(t, int64(243), reportElems["benchmarkPrice"].(*big.Int).Int64()) + assert.Equal(t, int64(244), reportElems["bid"].(*big.Int).Int64()) + assert.Equal(t, int64(245), reportElems["ask"].(*big.Int).Int64()) + assert.Equal(t, uint32(123), reportElems["validFromTimestamp"].(uint32)) + assert.Equal(t, uint32(20), reportElems["expiresAt"].(uint32)) + assert.Equal(t, int64(456), reportElems["linkFee"].(*big.Int).Int64()) + assert.Equal(t, int64(457), reportElems["nativeFee"].(*big.Int).Int64()) + + assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf5}, report) + max, err := r.MaxReportLength(ctx, 4) + require.NoError(t, err) + assert.LessOrEqual(t, len(report), max) + + t.Run("Decode decodes the report", func(t *testing.T) { + decoded, err := r.Decode(report) + require.NoError(t, err) + + require.NotNil(t, decoded) + + assert.Equal(t, uint32(242), decoded.ObservationsTimestamp) + assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice) + assert.Equal(t, big.NewInt(244), decoded.Bid) + assert.Equal(t, big.NewInt(245), decoded.Ask) + assert.Equal(t, uint32(123), decoded.ValidFromTimestamp) + assert.Equal(t, uint32(20), decoded.ExpiresAt) + assert.Equal(t, big.NewInt(456), decoded.LinkFee) + assert.Equal(t, big.NewInt(457), decoded.NativeFee) + }) + }) + + t.Run("errors on negative fee", func(t *testing.T) { + rf := newValidReportFields() + rf.LinkFee = big.NewInt(-1) + rf.NativeFee = big.NewInt(-1) + ctx := testutils.Context(t) + _, err := r.BuildReport(ctx, rf) + require.Error(t, err) + + assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)") + assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)") + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + _, err := r.Decode([]byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := range longBad { + longBad[i] = byte(i) + } + _, err = r.Decode(longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value") + }) +} + +func buildSampleReport(ts int64) []byte { + feedID := [32]byte{'f', 'o', 'o'} + timestamp := uint32(ts) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask) + if err != nil { + panic(err) + } + return b +} + +func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) { + report := buildSampleReport(123) + + ctx := testutils.Context(t) + ts, err := r.ObservationTimestampFromReport(ctx, report) + require.NoError(t, err) + + assert.Equal(t, uint32(123), ts) + }) + t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) { + report := []byte{1, 2, 3} + + ctx := testutils.Context(t) + _, err := r.ObservationTimestampFromReport(ctx, report) + require.Error(t, err) + + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} + +func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) { + ctx := testutils.Context(t) + report := buildSampleReport(123) + + bp, err := r.BenchmarkPriceFromReport(ctx, report) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(242), bp) + }) + t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) { + ctx := testutils.Context(t) + _, err := r.BenchmarkPriceFromReport(ctx, []byte{1, 2, 3}) + require.Error(t, err) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} diff --git a/pkg/mercury/v3/types/types.go b/pkg/mercury/v3/types/types.go new file mode 100644 index 0000000000..e99f529f85 --- /dev/null +++ b/pkg/mercury/v3/types/types.go @@ -0,0 +1,56 @@ +package reporttypes + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +var schema = GetSchema() + +func GetSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "feedId", Type: mustNewType("bytes32")}, + {Name: "validFromTimestamp", Type: mustNewType("uint32")}, + {Name: "observationsTimestamp", Type: mustNewType("uint32")}, + {Name: "nativeFee", Type: mustNewType("uint192")}, + {Name: "linkFee", Type: mustNewType("uint192")}, + {Name: "expiresAt", Type: mustNewType("uint32")}, + {Name: "benchmarkPrice", Type: mustNewType("int192")}, + {Name: "bid", Type: mustNewType("int192")}, + {Name: "ask", Type: mustNewType("int192")}, + }) +} + +type Report struct { + FeedId [32]byte + ObservationsTimestamp uint32 + BenchmarkPrice *big.Int + Bid *big.Int + Ask *big.Int + ValidFromTimestamp uint32 + ExpiresAt uint32 + LinkFee *big.Int + NativeFee *big.Int +} + +// Decode is made available to external users (i.e. mercury server) +func Decode(report []byte) (*Report, error) { + values, err := schema.Unpack(report) + if err != nil { + return nil, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(Report) + if err = schema.Copy(decoded, values); err != nil { + return nil, fmt.Errorf("failed to copy report values to struct: %w", err) + } + return decoded, nil +} diff --git a/pkg/mercury/v4/reportcodec/report_codec.go b/pkg/mercury/v4/reportcodec/report_codec.go new file mode 100644 index 0000000000..99def29fb8 --- /dev/null +++ b/pkg/mercury/v4/reportcodec/report_codec.go @@ -0,0 +1,78 @@ +package reportcodec + +import ( + "context" + "errors" + "fmt" + "math/big" + + pkgerrors "github.com/pkg/errors" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + reporttypes "github.com/smartcontractkit/chainlink-evm/pkg/mercury/v4/types" +) + +var ReportTypes = reporttypes.GetSchema() +var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word +var zero = big.NewInt(0) + +var _ v4.ReportCodec = &ReportCodec{} + +type ReportCodec struct { + logger logger.Logger + feedID utils.FeedID +} + +func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec { + return &ReportCodec{lggr, feedID} +} + +func (r *ReportCodec) BuildReport(ctx context.Context, rf v4.ReportFields) (ocrtypes.Report, error) { + var merr error + if rf.BenchmarkPrice == nil { + merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil")) + } + if rf.LinkFee == nil { + merr = errors.Join(merr, errors.New("linkFee may not be nil")) + } else if rf.LinkFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee)) + } + if rf.NativeFee == nil { + merr = errors.Join(merr, errors.New("nativeFee may not be nil")) + } else if rf.NativeFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee)) + } + if merr != nil { + return nil, merr + } + reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice, rf.MarketStatus) + return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob") +} + +func (r *ReportCodec) MaxReportLength(ctx context.Context, n int) (int, error) { + return maxReportLength, nil +} + +func (r *ReportCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) { + decoded, err := r.Decode(ctx, report) + if err != nil { + return 0, err + } + return decoded.ObservationsTimestamp, nil +} + +func (r *ReportCodec) Decode(ctx context.Context, report ocrtypes.Report) (*reporttypes.Report, error) { + return reporttypes.Decode(report) +} + +func (r *ReportCodec) BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) { + decoded, err := r.Decode(ctx, report) + if err != nil { + return nil, err + } + return decoded.BenchmarkPrice, nil +} diff --git a/pkg/mercury/v4/reportcodec/report_codec_test.go b/pkg/mercury/v4/reportcodec/report_codec_test.go new file mode 100644 index 0000000000..2caa4f4d75 --- /dev/null +++ b/pkg/mercury/v4/reportcodec/report_codec_test.go @@ -0,0 +1,164 @@ +package reportcodec + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4" +) + +func newValidReportFields() v4.ReportFields { + return v4.ReportFields{ + Timestamp: 242, + BenchmarkPrice: big.NewInt(243), + ValidFromTimestamp: 123, + ExpiresAt: 20, + LinkFee: big.NewInt(456), + NativeFee: big.NewInt(457), + MarketStatus: 1, + } +} + +func Test_ReportCodec_BuildReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BuildReport errors on zero values", func(t *testing.T) { + ctx := t.Context() + _, err := r.BuildReport(ctx, v4.ReportFields{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "benchmarkPrice may not be nil") + assert.Contains(t, err.Error(), "linkFee may not be nil") + assert.Contains(t, err.Error(), "nativeFee may not be nil") + }) + + t.Run("BuildReport constructs a report from observations", func(t *testing.T) { + ctx := t.Context() + rf := newValidReportFields() + // only need to test happy path since validations are done in relaymercury + + report, err := r.BuildReport(ctx, rf) + require.NoError(t, err) + + reportElems := make(map[string]any) + err = ReportTypes.UnpackIntoMap(reportElems, report) + require.NoError(t, err) + + assert.Equal(t, 242, int(reportElems["observationsTimestamp"].(uint32))) + assert.Equal(t, int64(243), reportElems["benchmarkPrice"].(*big.Int).Int64()) + assert.Equal(t, uint32(123), reportElems["validFromTimestamp"].(uint32)) + assert.Equal(t, uint32(20), reportElems["expiresAt"].(uint32)) + assert.Equal(t, int64(456), reportElems["linkFee"].(*big.Int).Int64()) + assert.Equal(t, int64(457), reportElems["nativeFee"].(*big.Int).Int64()) + assert.Equal(t, uint32(1), reportElems["marketStatus"].(uint32)) + + assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, report) + max, err := r.MaxReportLength(ctx, 4) + require.NoError(t, err) + assert.LessOrEqual(t, len(report), max) + + t.Run("Decode decodes the report", func(t *testing.T) { + decoded, err := r.Decode(t.Context(), report) + require.NoError(t, err) + + require.NotNil(t, decoded) + + assert.Equal(t, uint32(242), decoded.ObservationsTimestamp) + assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice) + assert.Equal(t, uint32(123), decoded.ValidFromTimestamp) + assert.Equal(t, uint32(20), decoded.ExpiresAt) + assert.Equal(t, big.NewInt(456), decoded.LinkFee) + assert.Equal(t, big.NewInt(457), decoded.NativeFee) + assert.Equal(t, uint32(1), decoded.MarketStatus) + }) + }) + + t.Run("errors on negative fee", func(t *testing.T) { + ctx := t.Context() + rf := newValidReportFields() + rf.LinkFee = big.NewInt(-1) + rf.NativeFee = big.NewInt(-1) + _, err := r.BuildReport(ctx, rf) + require.Error(t, err) + + assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)") + assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)") + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + ctx := t.Context() + _, err := r.Decode(ctx, []byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := range longBad { + longBad[i] = byte(i) + } + _, err = r.Decode(ctx, longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value") + }) +} + +func buildSampleReport(ts int64) []byte { + feedID := [32]byte{'f', 'o', 'o'} + timestamp := uint32(ts) + bp := big.NewInt(242) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + marketStatus := uint32(1) + + b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, marketStatus) + if err != nil { + panic(err) + } + return b +} + +func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) { + ctx := t.Context() + report := buildSampleReport(123) + + ts, err := r.ObservationTimestampFromReport(ctx, report) + require.NoError(t, err) + + assert.Equal(t, uint32(123), ts) + }) + t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) { + ctx := t.Context() + report := []byte{1, 2, 3} + + _, err := r.ObservationTimestampFromReport(ctx, report) + require.Error(t, err) + + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} + +func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) { + ctx := t.Context() + report := buildSampleReport(123) + + bp, err := r.BenchmarkPriceFromReport(ctx, report) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(242), bp) + }) + t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) { + ctx := t.Context() + _, err := r.BenchmarkPriceFromReport(ctx, []byte{1, 2, 3}) + require.Error(t, err) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} diff --git a/pkg/mercury/v4/types/types.go b/pkg/mercury/v4/types/types.go new file mode 100644 index 0000000000..584836c1e9 --- /dev/null +++ b/pkg/mercury/v4/types/types.go @@ -0,0 +1,54 @@ +package reporttypes + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +var schema = GetSchema() + +func GetSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "feedId", Type: mustNewType("bytes32")}, + {Name: "validFromTimestamp", Type: mustNewType("uint32")}, + {Name: "observationsTimestamp", Type: mustNewType("uint32")}, + {Name: "nativeFee", Type: mustNewType("uint192")}, + {Name: "linkFee", Type: mustNewType("uint192")}, + {Name: "expiresAt", Type: mustNewType("uint32")}, + {Name: "benchmarkPrice", Type: mustNewType("int192")}, + {Name: "marketStatus", Type: mustNewType("uint32")}, + }) +} + +type Report struct { + FeedId [32]byte + ObservationsTimestamp uint32 + BenchmarkPrice *big.Int + ValidFromTimestamp uint32 + ExpiresAt uint32 + LinkFee *big.Int + NativeFee *big.Int + MarketStatus uint32 +} + +// Decode is made available to external users (i.e. mercury server) +func Decode(report []byte) (*Report, error) { + values, err := schema.Unpack(report) + if err != nil { + return nil, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(Report) + if err = schema.Copy(decoded, values); err != nil { + return nil, fmt.Errorf("failed to copy report values to struct: %w", err) + } + return decoded, nil +} diff --git a/pkg/mercury/verifier/verifier.go b/pkg/mercury/verifier/verifier.go new file mode 100644 index 0000000000..02bb17d387 --- /dev/null +++ b/pkg/mercury/verifier/verifier.go @@ -0,0 +1,111 @@ +package verifier + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +var ( + ErrVerificationFailed = errors.New("verification failed") + + ErrFailedUnmarshalPubkey = fmt.Errorf("%w: failed to unmarshal pubkey", ErrVerificationFailed) + ErrVerifyInvalidSignatureCount = fmt.Errorf("%w: invalid signature count", ErrVerificationFailed) + ErrVerifyMismatchedSignatureCount = fmt.Errorf("%w: mismatched signature count", ErrVerificationFailed) + ErrVerifyInvalidSignature = fmt.Errorf("%w: invalid signature", ErrVerificationFailed) + ErrVerifySomeSignerUnauthorized = fmt.Errorf("%w: node unauthorized", ErrVerificationFailed) + ErrVerifyNonUniqueSignature = fmt.Errorf("%w: signer has already signed", ErrVerificationFailed) +) + +type SignedReport struct { + RawRs [][32]byte + RawSs [][32]byte + RawVs [32]byte + ReportContext [3][32]byte + Report []byte +} + +type Verifier interface { + // Verify checks the report against its configuration, and then verifies signatures. + // It replicates the Verifier contract's "verify" function for server side + // report verification. + // See also: contracts/src/v0.8/llo-feeds/Verifier.sol + Verify(report SignedReport, f uint8, authorizedSigners []common.Address) (signers []common.Address, err error) +} + +var _ Verifier = (*verifier)(nil) + +type verifier struct{} + +func NewVerifier() Verifier { + return &verifier{} +} + +func (v *verifier) Verify(sr SignedReport, f uint8, authorizedSigners []common.Address) (signers []common.Address, err error) { + if len(sr.RawRs) != int(f+1) { + return signers, fmt.Errorf("%w: expected the number of signatures (len(rs)) to equal the number of signatures required (f), but f=%d and len(rs)=%d", ErrVerifyInvalidSignatureCount, f+1, len(sr.RawRs)) + } + if len(sr.RawRs) != len(sr.RawSs) { + return signers, fmt.Errorf("%w: got %d rs and %d ss, expected equal", ErrVerifyMismatchedSignatureCount, len(sr.RawRs), len(sr.RawSs)) + } + + sigData := ReportToSigData(sr.ReportContext, sr.Report) + + signerMap := make(map[common.Address]bool) + for _, signer := range authorizedSigners { + signerMap[signer] = false + } + + // Loop over every signature and collect errors. This wastes some CPU cycles, but we need to know everyone who + // signed the report. Some risk mitigated by checking that the number of signatures matches the expected (F) earlier + var verifyErrors error + reportSigners := make([]common.Address, len(sr.RawRs)) // For logging + metrics, string for convenience + for i := 0; i < len(sr.RawRs); i++ { + sig := append(sr.RawRs[i][:], sr.RawSs[i][:]...) + sig = append(sig, sr.RawVs[i]) // In the contract, you'll see vs+27. We don't do that here since geth adds +27 internally + + sigPubKey, err := crypto.Ecrecover(sigData, sig) + if err != nil { + verifyErrors = errors.Join(verifyErrors, fmt.Errorf("failed to recover signature: %w", err)) + continue + } + + verified := crypto.VerifySignature(sigPubKey, sigData, sig[:64]) + if !verified { + verifyErrors = errors.Join(verifyErrors, ErrVerifyInvalidSignature, fmt.Errorf("signature verification failed for pubKey: %x, sig: %x", sigPubKey, sig)) + continue + } + + unmarshalledPub, err := crypto.UnmarshalPubkey(sigPubKey) + if err != nil { + verifyErrors = errors.Join(verifyErrors, ErrFailedUnmarshalPubkey, fmt.Errorf("public key=%x error=%w", sigPubKey, err)) + continue + } + + address := crypto.PubkeyToAddress(*unmarshalledPub) + reportSigners[i] = address + encountered, authorized := signerMap[address] + if !authorized { + verifyErrors = errors.Join(verifyErrors, ErrVerifySomeSignerUnauthorized, fmt.Errorf("signer %s not in list of authorized nodes", address.String())) + continue + } + if encountered { + verifyErrors = errors.Join(verifyErrors, ErrVerifyNonUniqueSignature, fmt.Errorf("signer %s has already signed this report", address.String())) + continue + } + signerMap[address] = true + signers = append(signers, address) + } + return signers, verifyErrors +} + +func ReportToSigData(reportCtx [3][32]byte, sr types.Report) []byte { + sigData := crypto.Keccak256(sr) + sigData = append(sigData, reportCtx[0][:]...) + sigData = append(sigData, reportCtx[1][:]...) + sigData = append(sigData, reportCtx[2][:]...) + return crypto.Keccak256(sigData) +} diff --git a/pkg/mercury/verifier/verifier_test.go b/pkg/mercury/verifier/verifier_test.go new file mode 100644 index 0000000000..1ded2b2eff --- /dev/null +++ b/pkg/mercury/verifier/verifier_test.go @@ -0,0 +1,80 @@ +package verifier + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury" +) + +func Test_Verifier(t *testing.T) { + t.Parallel() + + signedReportBinary := hexutil.MustDecode(`0x0006e1dde86b8a12add45546a14ea7e5efd10b67a373c6f4c41ecfa17d0005350000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000022000000000000000000000000000000000000000000000000000000000000002800001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012000034c9214519c942ad0aa84a3dd31870e6efe8b3fcab4e176c5226879b26c77000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000669150aa0000000000000000000000000000000000001504e1e6c380271bb8b129ac8f7c0000000000000000000000000000000000001504e1e6c380271bb8b129ac8f7c00000000000000000000000000000000000000000000000000000000669150ab0000000000000000000000000000000000000000000000000000002482116240000000000000000000000000000000000000000000000000000000247625a04000000000000000000000000000000000000000000000000000000024880743400000000000000000000000000000000000000000000000000000000000000002710ac21df88ab70c8822b68be53d7bed65c82ffc9204c1d7ccf3c6c4048b3ca2cafb26e7bbd8f13fe626c946baa5ffcb444319c4229b945ea65d0c99c21978a100000000000000000000000000000000000000000000000000000000000000022c07843f17aa3ecd55f52e99e889906f825f49e4ddfa9c74ca487dd4ff101cc636108a5323be838e658dffa1be67bd91e99f68c4bf86936b76c5d8193b707597`) + m := make(map[string]any) + err := mercury.PayloadTypes.UnpackIntoMap(m, signedReportBinary) + require.NoError(t, err) + + signedReport := SignedReport{ + RawRs: m["rawRs"].([][32]byte), + RawSs: m["rawSs"].([][32]byte), + RawVs: m["rawVs"].([32]byte), + ReportContext: m["reportContext"].([3][32]byte), + Report: m["report"].([]byte), + } + + f := uint8(1) + + v := NewVerifier() + + t.Run("Verify errors with unauthorized signers", func(t *testing.T) { + _, err := v.Verify(signedReport, f, []common.Address{}) + require.Error(t, err) + assert.EqualError(t, err, "verification failed: node unauthorized\nsigner 0x3fc9FaA15d71EeD614e5322bd9554Fb35cC381d2 not in list of authorized nodes\nverification failed: node unauthorized\nsigner 0xBa6534da0E49c71cD9d0292203F1524876f33E23 not in list of authorized nodes") + }) + + t.Run("Verify succeeds with authorized signers", func(t *testing.T) { + signers, err := v.Verify(signedReport, f, []common.Address{ + common.HexToAddress("0xde25e5b4005f611e356ce203900da4e37d72d58f"), + common.HexToAddress("0x256431d41cf0d944f5877bc6c93846a9829dfc03"), + common.HexToAddress("0x3fc9faa15d71eed614e5322bd9554fb35cc381d2"), + common.HexToAddress("0xba6534da0e49c71cd9d0292203f1524876f33e23"), + }) + require.NoError(t, err) + assert.Equal(t, []common.Address{ + common.HexToAddress("0x3fc9faa15d71eed614e5322bd9554fb35cc381d2"), + common.HexToAddress("0xBa6534da0E49c71cD9d0292203F1524876f33E23"), + }, signers) + }) + + t.Run("Verify fails if report has been tampered with", func(t *testing.T) { + badReport := signedReport + badReport.Report = []byte{0x0011} + _, err := v.Verify(badReport, f, []common.Address{ + common.HexToAddress("0xde25e5b4005f611e356ce203900da4e37d72d58f"), + common.HexToAddress("0x256431d41cf0d944f5877bc6c93846a9829dfc03"), + common.HexToAddress("0x3fc9faa15d71eed614e5322bd9554fb35cc381d2"), + common.HexToAddress("0xba6534da0e49c71cd9d0292203f1524876f33e23"), + }) + + require.Error(t, err) + }) + + t.Run("Verify fails if rawVs has been changed", func(t *testing.T) { + badReport := signedReport + badReport.RawVs = [32]byte{0x0011} + _, err := v.Verify(badReport, f, []common.Address{ + common.HexToAddress("0xde25e5b4005f611e356ce203900da4e37d72d58f"), + common.HexToAddress("0x256431d41cf0d944f5877bc6c93846a9829dfc03"), + common.HexToAddress("0x3fc9faa15d71eed614e5322bd9554fb35cc381d2"), + common.HexToAddress("0xba6534da0e49c71cd9d0292203f1524876f33e23"), + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to recover signature: invalid signature recovery id") + }) +} diff --git a/pkg/mercury/wsrpc/cache/cache.go b/pkg/mercury/wsrpc/cache/cache.go new file mode 100644 index 0000000000..a7f15180ef --- /dev/null +++ b/pkg/mercury/wsrpc/cache/cache.go @@ -0,0 +1,395 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/jpillora/backoff" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + mercuryutils "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +var ( + promFetchFailedCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_fetch_failure_count", + Help: "Number of times we tried to call LatestReport from the mercury server, but some kind of error occurred", + }, + []string{"serverURL", "feedID"}, + ) + promCacheHitCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_hit_count", + Help: "Running count of cache hits", + }, + []string{"serverURL", "feedID"}, + ) + promCacheWaitCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_wait_count", + Help: "Running count of times that we had to wait for a fetch to complete before reading from cache", + }, + []string{"serverURL", "feedID"}, + ) + promCacheMissCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_miss_count", + Help: "Running count of cache misses", + }, + []string{"serverURL", "feedID"}, + ) +) + +type Fetcher interface { + LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) +} + +type Client interface { + Fetcher + ServerURL() string + RawClient() pb.MercuryClient +} + +// Cache is scoped to one particular mercury server +// Use CacheSet to hold lookups for multiple servers +type Cache interface { + Fetcher + services.Service +} + +type Config struct { + // LatestReportTTL controls how "stale" we will allow a price to be e.g. if + // set to 1s, a new price will always be fetched if the last result was + // from more than 1 second ago. + // + // Another way of looking at it is such: the cache will _never_ return a + // price that was queried from before now-LatestReportTTL. + // + // Setting to zero disables caching entirely. + LatestReportTTL time.Duration + // MaxStaleAge is that maximum amount of time that a value can be stale + // before it is deleted from the cache (a form of garbage collection). + // + // This should generally be set to something much larger than + // LatestReportTTL. Setting to zero disables garbage collection. + MaxStaleAge time.Duration + // LatestReportDeadline controls how long to wait for a response before + // retrying. Setting this to zero will wait indefinitely. + LatestReportDeadline time.Duration +} + +type cacheVal struct { + sync.RWMutex + + fetching bool + fetchCh chan (struct{}) + + val *pb.LatestReportResponse + err error + + expiresAt time.Time +} + +func (v *cacheVal) read() (*pb.LatestReportResponse, error) { + v.RLock() + defer v.RUnlock() + return v.val, v.err +} + +// caller expected to hold lock +func (v *cacheVal) initiateFetch() <-chan struct{} { + if v.fetching { + panic("cannot initiateFetch on cache val that is already fetching") + } + v.fetching = true + v.fetchCh = make(chan struct{}) + return v.fetchCh +} + +func (v *cacheVal) setError(err error) { + v.Lock() + defer v.Unlock() + v.err = err +} + +func (v *cacheVal) completeFetch(val *pb.LatestReportResponse, err error, expiresAt time.Time) { + v.Lock() + defer v.Unlock() + if !v.fetching { + panic("can only completeFetch on cache val that is fetching") + } + v.val = val + v.err = err + if err == nil { + v.expiresAt = expiresAt + } + close(v.fetchCh) + v.fetchCh = nil + v.fetching = false +} + +func (v *cacheVal) abandonFetch(err error) { + v.completeFetch(nil, err, time.Now()) +} + +func (v *cacheVal) waitForResult(ctx context.Context, chResult <-chan struct{}, chStop <-chan struct{}) (*pb.LatestReportResponse, error) { + select { + case <-ctx.Done(): + _, err := v.read() + return nil, errors.Join(err, ctx.Err()) + case <-chStop: + return nil, errors.New("stopped") + case <-chResult: + return v.read() + } +} + +// memCache stores values in memory +// it will never return a stale value older than latestPriceTTL, instead +// waiting for a successful fetch or caller context cancels, whichever comes +// first +type memCache struct { + services.StateMachine + lggr logger.SugaredLogger + + client Client + + cfg Config + + cache sync.Map + + wg sync.WaitGroup + chStop services.StopChan +} + +func newMemCache(lggr logger.Logger, client Client, cfg Config) *memCache { + return &memCache{ + services.StateMachine{}, + logger.Sugared(lggr).Named("MemCache").Named(client.ServerURL()), + client, + cfg, + sync.Map{}, + sync.WaitGroup{}, + make(chan (struct{})), + } +} + +// LatestReport +// NOTE: This will actually block on all types of errors, even non-timeouts. +// Context should be set carefully and timed to be the maximum time we are +// willing to wait for a result, the background thread will keep re-querying +// until it gets one even on networking errors etc. +func (m *memCache) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + if req == nil { + return nil, errors.New("req must not be nil") + } + feedIDHex := mercuryutils.BytesToFeedID(req.FeedId).String() + if m.cfg.LatestReportTTL <= 0 { + return m.client.RawClient().LatestReport(ctx, req) + } + vi, loaded := m.cache.LoadOrStore(feedIDHex, &cacheVal{ + sync.RWMutex{}, + false, + nil, + nil, + nil, + time.Now(), // first result is always "expired" and requires fetch + }) + v := vi.(*cacheVal) + + m.lggr.Tracew("LatestReport", "feedID", feedIDHex, "loaded", loaded) + + // HOT PATH + v.RLock() + if time.Now().Before(v.expiresAt) { + // CACHE HIT + promCacheHitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE HIT (hot path)", "feedID", feedIDHex) + + defer v.RUnlock() + return v.val, nil + } else if v.fetching { + // CACHE WAIT + promCacheWaitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE WAIT (hot path)", "feedID", feedIDHex) + // if someone else is fetching then wait for the fetch to complete + ch := v.fetchCh + v.RUnlock() + return v.waitForResult(ctx, ch, m.chStop) + } + // CACHE MISS + promCacheMissCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + // fallthrough to cold path and fetch + v.RUnlock() + + // COLD PATH + v.Lock() + if time.Now().Before(v.expiresAt) { + // CACHE HIT + promCacheHitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE HIT (cold path)", "feedID", feedIDHex) + defer v.Unlock() + return v.val, nil + } else if v.fetching { + // CACHE WAIT + promCacheWaitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE WAIT (cold path)", "feedID", feedIDHex) + // if someone else is fetching then wait for the fetch to complete + ch := v.fetchCh + v.Unlock() + return v.waitForResult(ctx, ch, m.chStop) + } + // CACHE MISS + promCacheMissCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE MISS (cold path)", "feedID", feedIDHex) + // initiate the fetch and wait for result + ch := v.initiateFetch() + v.Unlock() + + ok := m.IfStarted(func() { + m.wg.Add(1) + go m.fetch(req, v) + }) + if !ok { + err := fmt.Errorf("memCache must be started, but is: %v", m.State()) + v.abandonFetch(err) + return nil, err + } + return v.waitForResult(ctx, ch, m.chStop) +} + +const minBackoffRetryInterval = 50 * time.Millisecond + +// newBackoff creates a backoff for retrying +func (m *memCache) newBackoff() backoff.Backoff { + min := minBackoffRetryInterval + max := m.cfg.LatestReportTTL / 2 + if min > max { + // avoid setting a min that is greater than max + min = max + } + return backoff.Backoff{ + Min: min, + Max: max, + Factor: 2, + Jitter: true, + } +} + +// fetch continually tries to call FetchLatestReport and write the result to v +// it writes errors as they come up +func (m *memCache) fetch(req *pb.LatestReportRequest, v *cacheVal) { + defer m.wg.Done() + b := m.newBackoff() + memcacheCtx, cancel := m.chStop.NewCtx() + defer cancel() + var t time.Time + var val *pb.LatestReportResponse + var err error + defer func() { + v.completeFetch(val, err, t.Add(m.cfg.LatestReportTTL)) + }() + + for { + t = time.Now() + + ctx := memcacheCtx + cancel := func() {} + if m.cfg.LatestReportDeadline > 0 { + ctx, cancel = context.WithTimeoutCause(memcacheCtx, m.cfg.LatestReportDeadline, errors.New("latest report fetch deadline exceeded")) + } + + // NOTE: must drop down to RawClient here otherwise we enter an + // infinite loop of calling a client that calls back to this same cache + // and on and on + val, err = m.client.RawClient().LatestReport(ctx, req) + cancel() + v.setError(err) + if memcacheCtx.Err() != nil { + // stopped + return + } else if err != nil { + m.lggr.Warnw("FetchLatestReport failed", "err", err) + promFetchFailedCount.WithLabelValues(m.client.ServerURL(), mercuryutils.BytesToFeedID(req.FeedId).String()).Inc() + select { + case <-m.chStop: + return + case <-time.After(b.Duration()): + continue + } + } + return + } +} + +func (m *memCache) Start(context.Context) error { + return m.StartOnce(m.Name(), func() error { + m.lggr.Debugw("MemCache starting", "config", m.cfg, "serverURL", m.client.ServerURL()) + m.wg.Add(1) + go m.runloop() + return nil + }) +} + +func (m *memCache) runloop() { + defer m.wg.Done() + + if m.cfg.MaxStaleAge == 0 { + return + } + t := services.NewTicker(m.cfg.MaxStaleAge) + defer t.Stop() + + for { + select { + case <-t.C: + m.cleanup() + t.Reset() + case <-m.chStop: + return + } + } +} + +// remove anything that has been stale for longer than maxStaleAge so that +// cache doesn't grow forever and cause memory leaks +// +// NOTE: This should be concurrent-safe with LatestReport. The only time they +// can race is if the cache item has expired past the stale age between +// creation of the cache item and start of fetch. This is unlikely, and even if +// it does occur, the worst case is that we discard a cache item early and +// double fetch, which isn't bad at all. +func (m *memCache) cleanup() { + m.cache.Range(func(k, vi any) bool { + v := vi.(*cacheVal) + v.RLock() + defer v.RUnlock() + if v.fetching { + // skip cleanup if fetching + return true + } + if time.Now().After(v.expiresAt.Add(m.cfg.MaxStaleAge)) { + // garbage collection + m.cache.Delete(k) + } + return true + }) +} + +func (m *memCache) Close() error { + return m.StopOnce(m.Name(), func() error { + close(m.chStop) + m.wg.Wait() + return nil + }) +} +func (m *memCache) HealthReport() map[string]error { + return map[string]error{ + m.Name(): m.Ready(), + } +} +func (m *memCache) Name() string { return m.lggr.Name() } diff --git a/pkg/mercury/wsrpc/cache/cache_set.go b/pkg/mercury/wsrpc/cache/cache_set.go new file mode 100644 index 0000000000..689bfc8798 --- /dev/null +++ b/pkg/mercury/wsrpc/cache/cache_set.go @@ -0,0 +1,117 @@ +package cache + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/exp/maps" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" +) + +// CacheSet holds a set of mercury caches keyed by server URL +type CacheSet interface { + services.Service + Get(ctx context.Context, client Client) (Fetcher, error) +} + +var _ CacheSet = (*cacheSet)(nil) + +type cacheSet struct { + sync.RWMutex + services.StateMachine + + lggr logger.SugaredLogger + caches map[string]Cache + + cfg Config +} + +func NewCacheSet(lggr logger.Logger, cfg Config) CacheSet { + return newCacheSet(lggr, cfg) +} + +func newCacheSet(lggr logger.Logger, cfg Config) *cacheSet { + return &cacheSet{ + sync.RWMutex{}, + services.StateMachine{}, + logger.Sugared(lggr).Named("CacheSet"), + make(map[string]Cache), + cfg, + } +} + +func (cs *cacheSet) Start(context.Context) error { + return cs.StartOnce("CacheSet", func() error { + cs.lggr.Debugw("CacheSet starting", "config", cs.cfg, "cachingEnabled", cs.cfg.LatestReportTTL > 0) + return nil + }) +} + +func (cs *cacheSet) Close() error { + return cs.StopOnce("CacheSet", func() error { + cs.Lock() + defer cs.Unlock() + caches := maps.Values(cs.caches) + if err := services.MultiCloser(caches).Close(); err != nil { + return err + } + cs.caches = nil + return nil + }) +} + +func (cs *cacheSet) Get(ctx context.Context, client Client) (f Fetcher, err error) { + if cs.cfg.LatestReportTTL == 0 { + // caching disabled + return nil, nil + } + ok := cs.IfStarted(func() { + f, err = cs.get(ctx, client) + }) + if !ok { + return nil, fmt.Errorf("cacheSet must be started, but is: %v", cs.State()) + } + return +} + +func (cs *cacheSet) get(ctx context.Context, client Client) (Fetcher, error) { + sURL := client.ServerURL() + // HOT PATH + cs.RLock() + c, exists := cs.caches[sURL] + cs.RUnlock() + if exists { + return c, nil + } + + // COLD PATH + cs.Lock() + defer cs.Unlock() + c, exists = cs.caches[sURL] + if exists { + return c, nil + } + c = newMemCache(cs.lggr, client, cs.cfg) + if err := c.Start(ctx); err != nil { + return nil, err + } + cs.caches[sURL] = c + return c, nil +} + +func (cs *cacheSet) HealthReport() map[string]error { + report := map[string]error{ + cs.Name(): cs.Ready(), + } + cs.RLock() + caches := maps.Values(cs.caches) + cs.RUnlock() + for _, c := range caches { + services.CopyHealth(report, c.HealthReport()) + } + return report +} +func (cs *cacheSet) Name() string { return cs.lggr.Name() } diff --git a/pkg/mercury/wsrpc/cache/cache_set_test.go b/pkg/mercury/wsrpc/cache/cache_set_test.go new file mode 100644 index 0000000000..c7eac22261 --- /dev/null +++ b/pkg/mercury/wsrpc/cache/cache_set_test.go @@ -0,0 +1,58 @@ +package cache + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +func Test_CacheSet(t *testing.T) { + lggr := logger.Test(t) + cs := newCacheSet(lggr, Config{LatestReportTTL: 1}) + disabledCs := newCacheSet(lggr, Config{LatestReportTTL: 0}) + ctx := testutils.Context(t) + servicetest.Run(t, cs) + + t.Run("Get", func(t *testing.T) { + c := &mockClient{} + + var err error + var f Fetcher + t.Run("with caching disabled, returns nil, nil", func(t *testing.T) { + assert.Empty(t, disabledCs.caches) + + f, err = disabledCs.Get(ctx, c) + require.NoError(t, err) + + assert.Nil(t, f) + assert.Empty(t, disabledCs.caches) + }) + + t.Run("with virgin cacheset, makes new entry and returns it", func(t *testing.T) { + assert.Empty(t, cs.caches) + + f, err = cs.Get(ctx, c) + require.NoError(t, err) + + assert.IsType(t, &memCache{}, f) + assert.Len(t, cs.caches, 1) + }) + t.Run("with existing cache for value, returns that", func(t *testing.T) { + var f2 Fetcher + assert.Len(t, cs.caches, 1) + + f2, err = cs.Get(ctx, c) + require.NoError(t, err) + + assert.IsType(t, &memCache{}, f) + assert.Equal(t, f, f2) + assert.Len(t, cs.caches, 1) + }) + }) +} diff --git a/pkg/mercury/wsrpc/cache/cache_test.go b/pkg/mercury/wsrpc/cache/cache_test.go new file mode 100644 index 0000000000..c0e32fb084 --- /dev/null +++ b/pkg/mercury/wsrpc/cache/cache_test.go @@ -0,0 +1,202 @@ +package cache + +import ( + "context" + "errors" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + mercuryutils "github.com/smartcontractkit/chainlink-evm/pkg/mercury/utils" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +const neverExpireTTL = 1000 * time.Hour // some massive value that will never expire during a test + +func Test_Cache(t *testing.T) { + lggr := logger.Test(t) + client := &mockClient{} + cfg := Config{} + ctx := testutils.Context(t) + + req1 := &pb.LatestReportRequest{FeedId: []byte{1}} + req2 := &pb.LatestReportRequest{FeedId: []byte{2}} + req3 := &pb.LatestReportRequest{FeedId: []byte{3}} + + feedID1Hex := mercuryutils.BytesToFeedID(req1.FeedId).String() + + t.Run("errors with nil req", func(t *testing.T) { + c := newMemCache(lggr, client, cfg) + + _, err := c.LatestReport(ctx, nil) + assert.EqualError(t, err, "req must not be nil") + }) + + t.Run("with LatestReportTTL=0 does no caching", func(t *testing.T) { + c := newMemCache(lggr, client, cfg) + + req := &pb.LatestReportRequest{} + for i := range 5 { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + + resp, err := c.LatestReport(ctx, req) + require.NoError(t, err) + assert.Equal(t, client.resp, resp) + } + + client.resp = nil + client.err = errors.New("something exploded") + + resp, err := c.LatestReport(ctx, req) + assert.EqualError(t, err, "something exploded") + assert.Nil(t, resp) + }) + + t.Run("caches repeated calls to LatestReport, keyed by request", func(t *testing.T) { + cfg.LatestReportTTL = neverExpireTTL + client.err = nil + c := newMemCache(lggr, client, cfg) + + t.Run("if cache is unstarted, returns error", func(t *testing.T) { + // starting the cache is required for state management if we + // actually cache results, since fetches are initiated async and + // need to be cleaned up properly on close + _, err := c.LatestReport(ctx, &pb.LatestReportRequest{}) + assert.EqualError(t, err, "memCache must be started, but is: Unstarted") + }) + + err := c.StartOnce("test start", func() error { return nil }) + require.NoError(t, err) + + t.Run("returns cached value for key", func(t *testing.T) { + var firstResp *pb.LatestReportResponse + for i := range 5 { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + if firstResp == nil { + firstResp = client.resp + } + + resp, err := c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, firstResp, resp) + } + }) + + t.Run("cache keys do not conflict", func(t *testing.T) { + var firstResp1 *pb.LatestReportResponse + for i := 5; i < 10; i++ { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + if firstResp1 == nil { + firstResp1 = client.resp + } + + resp, err := c.LatestReport(ctx, req2) + require.NoError(t, err) + assert.Equal(t, firstResp1, resp) + } + + var firstResp2 *pb.LatestReportResponse + for i := 10; i < 15; i++ { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + if firstResp2 == nil { + firstResp2 = client.resp + } + + resp, err := c.LatestReport(ctx, req3) + require.NoError(t, err) + assert.Equal(t, firstResp2, resp) + } + + // req1 key still has same value + resp, err := c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, []byte(strconv.Itoa(0)), resp.Report.Price) + + // req2 key still has same value + resp, err = c.LatestReport(ctx, req2) + require.NoError(t, err) + assert.Equal(t, []byte(strconv.Itoa(5)), resp.Report.Price) + }) + + t.Run("re-queries when a cache item has expired", func(t *testing.T) { + vi, exists := c.cache.Load(feedID1Hex) + require.True(t, exists) + v := vi.(*cacheVal) + v.expiresAt = time.Now().Add(-1 * time.Second) + + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(15))}} + + resp, err := c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, client.resp, resp) + + // querying again yields the same cached item + resp, err = c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, client.resp, resp) + }) + }) + + t.Run("complete fetch", func(t *testing.T) { + t.Run("does not change expiry if fetch returns error", func(t *testing.T) { + expires := time.Now().Add(-1 * time.Second) + v := &cacheVal{ + fetching: true, + fetchCh: make(chan (struct{})), + val: nil, + err: nil, + expiresAt: expires, + } + v.completeFetch(nil, errors.New("foo"), time.Now().Add(neverExpireTTL)) + assert.Equal(t, expires, v.expiresAt) + + v = &cacheVal{ + fetching: true, + fetchCh: make(chan (struct{})), + val: nil, + err: nil, + expiresAt: expires, + } + expires = time.Now().Add(neverExpireTTL) + v.completeFetch(nil, nil, expires) + assert.Equal(t, expires, v.expiresAt) + }) + }) + + t.Run("timeouts", func(t *testing.T) { + c := newMemCache(lggr, client, cfg) + // simulate fetch already executing in background + v := &cacheVal{ + fetching: true, + fetchCh: make(chan (struct{})), + val: nil, + err: nil, + expiresAt: time.Now().Add(-1 * time.Second), + } + c.cache.Store(feedID1Hex, v) + + canceledCtx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + + t.Run("returns context deadline exceeded error if fetch takes too long", func(t *testing.T) { + _, err := c.LatestReport(canceledCtx, req1) + require.Error(t, err) + assert.ErrorIs(t, err, context.Canceled) + assert.EqualError(t, err, "context canceled") + }) + t.Run("returns wrapped context deadline exceeded error if fetch has errored and is in the retry loop", func(t *testing.T) { + v.err = errors.New("some background fetch error") + + _, err := c.LatestReport(canceledCtx, req1) + require.Error(t, err) + assert.ErrorIs(t, err, context.Canceled) + assert.EqualError(t, err, "some background fetch error\ncontext canceled") + }) + }) +} diff --git a/pkg/mercury/wsrpc/cache/helpers_test.go b/pkg/mercury/wsrpc/cache/helpers_test.go new file mode 100644 index 0000000000..8caf5809f2 --- /dev/null +++ b/pkg/mercury/wsrpc/cache/helpers_test.go @@ -0,0 +1,38 @@ +package cache + +import ( + "context" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +var _ Client = &mockClient{} + +type mockClient struct { + resp *pb.LatestReportResponse + err error +} + +func (m *mockClient) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + return m.resp, m.err +} + +func (m *mockClient) ServerURL() string { + return "mock client url" +} + +func (m *mockClient) RawClient() pb.MercuryClient { + return &mockRawClient{m.resp, m.err} +} + +type mockRawClient struct { + resp *pb.LatestReportResponse + err error +} + +func (m *mockRawClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + return nil, nil +} +func (m *mockRawClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + return m.resp, m.err +} diff --git a/pkg/mercury/wsrpc/client.go b/pkg/mercury/wsrpc/client.go new file mode 100644 index 0000000000..5dc622d1a4 --- /dev/null +++ b/pkg/mercury/wsrpc/client.go @@ -0,0 +1,420 @@ +package wsrpc + +import ( + "context" + "crypto" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + grpc_connectivity "google.golang.org/grpc/connectivity" + + "github.com/smartcontractkit/wsrpc" + "github.com/smartcontractkit/wsrpc/connectivity" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-data-streams/rpc" + "github.com/smartcontractkit/chainlink-evm/pkg/utils" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/cache" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +// MaxConsecutiveRequestFailures controls how many consecutive requests are +// allowed to time out before we reset the connection +const MaxConsecutiveRequestFailures = 10 + +var ( + timeoutCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_timeout_count", + Help: "Running count of transmit timeouts", + }, + []string{"serverURL"}, + ) + dialCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_dial_count", + Help: "Running count of dials to mercury server", + }, + []string{"serverURL"}, + ) + dialSuccessCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_dial_success_count", + Help: "Running count of successful dials to mercury server", + }, + []string{"serverURL"}, + ) + dialErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_dial_error_count", + Help: "Running count of errored dials to mercury server", + }, + []string{"serverURL"}, + ) + connectionResetCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_connection_reset_count", + Help: fmt.Sprintf("Running count of times connection to mercury server has been reset (connection reset happens automatically after %d consecutive request failures)", MaxConsecutiveRequestFailures), + }, + []string{"serverURL"}, + ) +) + +type Client interface { + services.Service + pb.MercuryClient + ServerURL() string + RawClient() pb.MercuryClient +} + +type Conn interface { + wsrpc.ClientInterface + WaitForReady(ctx context.Context) bool + GetState() grpc_connectivity.State + Close() error +} + +type DialWithContextFunc func(ctxCaller context.Context, target string, opts ...wsrpc.DialOption) (Conn, error) + +type client struct { + services.StateMachine + + csaSigner crypto.Signer + serverPubKey []byte + serverURL string + + dialWithContext DialWithContextFunc + + logger logger.SugaredLogger + conn Conn + rawClient pb.MercuryClient + mu sync.RWMutex + + consecutiveTimeoutCnt atomic.Int32 + wg sync.WaitGroup + chStop services.StopChan + chResetTransport chan struct{} + + cacheSet cache.CacheSet + cache cache.Fetcher + + timeoutCountMetric prometheus.Counter + dialCountMetric prometheus.Counter + dialSuccessCountMetric prometheus.Counter + dialErrorCountMetric prometheus.Counter + connectionResetCountMetric prometheus.Counter +} + +type ClientOpts struct { + Logger logger.SugaredLogger + CSASigner crypto.Signer + ServerPubKey []byte + ServerURL string + CacheSet cache.CacheSet + + // DialWithContext allows optional dependency injection for testing + DialWithContext DialWithContextFunc +} + +// Consumers of wsrpc package should not usually call NewClient directly, but instead use the Pool +func NewClient(opts ClientOpts) Client { + return newClient(opts) +} + +func newClient(opts ClientOpts) *client { + var dialWithContext DialWithContextFunc + if opts.DialWithContext != nil { + dialWithContext = opts.DialWithContext + } else { + // NOTE: Wrap here since wsrpc.DialWithContext returns a concrete *wsrpc.Conn, not an interface + dialWithContext = func(ctx context.Context, target string, opts ...wsrpc.DialOption) (Conn, error) { + conn, err := wsrpc.DialWithContext(ctx, target, opts...) + return conn, err + } + } + return &client{ + dialWithContext: dialWithContext, + csaSigner: opts.CSASigner, + serverPubKey: opts.ServerPubKey, + serverURL: opts.ServerURL, + logger: opts.Logger.Named("WSRPC").Named(opts.ServerURL).With("serverURL", opts.ServerURL), + chResetTransport: make(chan struct{}, 1), + cacheSet: opts.CacheSet, + chStop: make(services.StopChan), + timeoutCountMetric: timeoutCount.WithLabelValues(opts.ServerURL), + dialCountMetric: dialCount.WithLabelValues(opts.ServerURL), + dialSuccessCountMetric: dialSuccessCount.WithLabelValues(opts.ServerURL), + dialErrorCountMetric: dialErrorCount.WithLabelValues(opts.ServerURL), + connectionResetCountMetric: connectionResetCount.WithLabelValues(opts.ServerURL), + } +} + +func (w *client) Start(ctx context.Context) error { + return w.StartOnce("WSRPC Client", func() (err error) { + // NOTE: This is not a mistake, dial is non-blocking so it should use a + // background context, not the Start context + if err = w.dial(context.Background()); err != nil { + return err + } + w.cache, err = w.cacheSet.Get(ctx, w) + if err != nil { + return err + } + w.wg.Add(1) + go w.runloop() + return nil + }) +} + +// NOTE: Dial is non-blocking, and will retry on an exponential backoff +// in the background until close is called, or context is cancelled. +// This is why we use the background context, not the start context here. +// +// Any transmits made while client is still trying to dial will fail +// with error. +func (w *client) dial(ctx context.Context, opts ...wsrpc.DialOption) error { + w.dialCountMetric.Inc() + conn, err := w.dialWithContext(ctx, w.serverURL, + append(opts, + wsrpc.WithTransportSigner(w.csaSigner, w.serverPubKey), + wsrpc.WithLogger(w.logger), + )..., + ) + if err != nil { + w.dialErrorCountMetric.Inc() + setLivenessMetric(false) + return errors.Wrap(err, "failed to dial wsrpc client") + } + w.dialSuccessCountMetric.Inc() + setLivenessMetric(true) + w.mu.Lock() + w.conn = conn + w.rawClient = pb.NewMercuryClient(conn) + w.mu.Unlock() + return nil +} + +func (w *client) runloop() { + defer w.wg.Done() + for { + select { + case <-w.chStop: + return + case <-w.chResetTransport: + // Using channel here ensures we only have one reset in process at + // any given time + w.resetTransport() + } + } +} + +// resetTransport disconnects and reconnects to the mercury server +func (w *client) resetTransport() { + w.connectionResetCountMetric.Inc() + ok := w.IfStarted(func() { + w.mu.RLock() + defer w.mu.RUnlock() + w.conn.Close() // Close is safe to call multiple times + }) + if !ok { + panic("resetTransport should never be called unless client is in 'started' state") + } + ctx, cancel := w.chStop.NewCtx() + defer cancel() + b := utils.NewRedialBackoff() + for { + // Will block until successful dial, or context is canceled (i.e. on close) + err := w.dial(ctx, wsrpc.WithBlock()) + if err == nil { + break + } + if ctx.Err() != nil { + w.logger.Debugw("ResetTransport exiting due to client Close", "err", err) + return + } + w.logger.Errorw("ResetTransport failed to redial", "err", err) + time.Sleep(b.Duration()) + } + w.logger.Info("ResetTransport successfully redialled") +} + +func (w *client) Close() error { + return w.StopOnce("WSRPC Client", func() error { + close(w.chStop) + w.mu.RLock() + w.conn.Close() + w.mu.RUnlock() + w.wg.Wait() + return nil + }) +} + +func (w *client) Name() string { + return w.logger.Name() +} + +func (w *client) HealthReport() map[string]error { + return map[string]error{w.Name(): w.Healthy()} +} + +// Healthy if connected +func (w *client) Healthy() (err error) { + if err = w.StateMachine.Healthy(); err != nil { + return err + } + state := w.conn.GetState() + if state != grpc_connectivity.Ready { + return errors.Errorf("client state should be %s; got %s", connectivity.Ready, state) + } + return nil +} + +func (w *client) waitForReady(ctx context.Context) (err error) { + ok := w.IfStarted(func() { + if ready := w.conn.WaitForReady(ctx); !ready { + err = errors.Errorf("websocket client not ready; got state: %v", w.conn.GetState()) + return + } + }) + if !ok { + return errors.New("client is not started") + } + return +} + +func (w *client) Transmit(ctx context.Context, req *pb.TransmitRequest) (resp *pb.TransmitResponse, err error) { + ok := w.IfStarted(func() { + w.logger.Trace("Transmit") + start := time.Now() + if err = w.waitForReady(ctx); err != nil { + err = errors.Wrap(err, "Transmit call failed") + return + } + w.mu.RLock() + rc := w.rawClient + w.mu.RUnlock() + resp, err = rc.Transmit(ctx, req) + w.handleTimeout(err) + if err != nil { + w.logger.Warnw("Transmit call failed due to networking error", "err", err, "resp", resp) + incRequestStatusMetric(statusFailed) + } else { + w.logger.Tracew("Transmit call succeeded", "resp", resp) + incRequestStatusMetric(statusSuccess) + setRequestLatencyMetric(float64(time.Since(start).Milliseconds())) + } + }) + if !ok { + err = errors.New("client is not started") + } + return +} + +// hacky workaround to trap panics from buggy underlying wsrpc lib and restart +// the connection from a known good state +func (w *client) handlePanic(r any) { + w.chResetTransport <- struct{}{} +} + +func (w *client) handleTimeout(err error) { + if errors.Is(err, context.DeadlineExceeded) { + w.timeoutCountMetric.Inc() + cnt := w.consecutiveTimeoutCnt.Add(1) + if cnt == MaxConsecutiveRequestFailures { + w.logger.Errorf("Timed out on %d consecutive transmits, resetting transport", cnt) + // NOTE: If we get at least MaxConsecutiveRequestFailures request + // timeouts in a row, close and re-open the websocket connection. + // + // This *shouldn't* be necessary in theory (ideally, wsrpc would + // handle it for us) but it acts as a "belts and braces" approach + // to ensure we get a websocket connection back up and running + // again if it gets itself into a bad state. + select { + case w.chResetTransport <- struct{}{}: + default: + // This can happen if we had MaxConsecutiveRequestFailures + // consecutive timeouts, already sent a reset signal, then the + // connection started working again (resetting the count) then + // we got MaxConsecutiveRequestFailures additional failures + // before the runloop was able to close the bad connection. + // + // It should be safe to just ignore in this case. + // + // Debug log in case my reasoning is wrong. + w.logger.Debugf("Transport is resetting, cnt=%d", cnt) + } + } + } else { + w.consecutiveTimeoutCnt.Store(0) + } +} + +func (w *client) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + ok := w.IfStarted(func() { + lggr := w.logger.With("req.FeedId", hexutil.Encode(req.FeedId)) + lggr.Trace("LatestReport") + if err = w.waitForReady(ctx); err != nil { + err = errors.Wrap(err, "LatestReport failed") + return + } + var cached bool + if w.cache == nil { + w.mu.RLock() + rc := w.rawClient + w.mu.RUnlock() + resp, err = rc.LatestReport(ctx, req) + w.handleTimeout(err) + } else { + cached = true + resp, err = w.cache.LatestReport(ctx, req) + } + switch { + case err != nil: + lggr.Errorw("LatestReport failed", "err", err, "resp", resp, "cached", cached) + case resp.Error != "": + lggr.Errorw("LatestReport failed; mercury server returned error", "err", resp.Error, "resp", resp, "cached", cached) + case !cached: + lggr.Debugw("LatestReport succeeded", "resp", resp, "cached", cached) + default: + lggr.Tracew("LatestReport succeeded", "resp", resp, "cached", cached) + } + }) + if !ok { + err = errors.New("client is not started") + } + return +} + +func (w *client) ServerURL() string { + return w.serverURL +} + +func (w *client) RawClient() pb.MercuryClient { + w.mu.RLock() + defer w.mu.RUnlock() + return w.rawClient +} + +var _ rpc.Client = GRPCCompatibilityWrapper{} + +type GRPCCompatibilityWrapper struct { + Client +} + +func (w GRPCCompatibilityWrapper) Transmit(ctx context.Context, in *rpc.TransmitRequest) (*rpc.TransmitResponse, error) { + req := &pb.TransmitRequest{ + Payload: in.Payload, + ReportFormat: in.ReportFormat, + } + resp, err := w.Client.Transmit(ctx, req) + if err != nil { + return nil, err + } + return &rpc.TransmitResponse{ + Code: resp.Code, + Error: resp.Error, + }, nil +} diff --git a/pkg/mercury/wsrpc/client_test.go b/pkg/mercury/wsrpc/client_test.go new file mode 100644 index 0000000000..de612fc11a --- /dev/null +++ b/pkg/mercury/wsrpc/client_test.go @@ -0,0 +1,188 @@ +package wsrpc + +import ( + "context" + "math/big" + "math/rand/v2" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/keystore/corekeys/csakey" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/cache" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/mocks" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" +) + +// simulate start without dialling +func simulateStart(ctx context.Context, t *testing.T, c *client) { + require.NoError(t, c.StartOnce("Mock WSRPC Client", func() (err error) { + c.cache, err = c.cacheSet.Get(ctx, c) + return err + })) +} + +var _ cache.CacheSet = &mockCacheSet{} + +type mockCacheSet struct{} + +func (m *mockCacheSet) Get(ctx context.Context, client cache.Client) (cache.Fetcher, error) { + return nil, nil +} +func (m *mockCacheSet) Start(context.Context) error { return nil } +func (m *mockCacheSet) Ready() error { return nil } +func (m *mockCacheSet) HealthReport() map[string]error { return nil } +func (m *mockCacheSet) Name() string { return "" } +func (m *mockCacheSet) Close() error { return nil } + +var _ cache.Cache = &mockCache{} + +type mockCache struct{} + +func (m *mockCache) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + return nil, nil +} +func (m *mockCache) Start(context.Context) error { return nil } +func (m *mockCache) Ready() error { return nil } +func (m *mockCache) HealthReport() map[string]error { return nil } +func (m *mockCache) Name() string { return "" } +func (m *mockCache) Close() error { return nil } + +func newNoopCacheSet() cache.CacheSet { + return &mockCacheSet{} +} + +func Test_Client_Transmit(t *testing.T) { + lggr := logger.Test(t) + ctx := testutils.Context(t) + req := &pb.TransmitRequest{} + + noopCacheSet := newNoopCacheSet() + + t.Run("sends on reset channel after MaxConsecutiveRequestFailures timed out transmits", func(t *testing.T) { + calls := 0 + transmitErr := context.DeadlineExceeded + wsrpcClient := &mocks.MockWSRPCClient{ + TransmitF: func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + calls++ + return nil, transmitErr + }, + } + conn := &mocks.MockConn{ + Ready: true, + } + opts := ClientOpts{ + logger.Sugared(lggr), + csakey.MustNewV2XXXTestingOnly(new(big.Int).SetInt64(rand.Int64())), + nil, + "", + noopCacheSet, + nil, + } + c := newClient(opts) + c.conn = conn + c.rawClient = wsrpcClient + require.NoError(t, c.StartOnce("Mock WSRPC Client", func() error { return nil })) + for i := 1; i < MaxConsecutiveRequestFailures; i++ { + _, err := c.Transmit(ctx, req) + require.EqualError(t, err, "context deadline exceeded") + } + assert.Equal(t, MaxConsecutiveRequestFailures-1, calls) + select { + case <-c.chResetTransport: + t.Fatal("unexpected send on chResetTransport") + default: + } + _, err := c.Transmit(ctx, req) + require.EqualError(t, err, "context deadline exceeded") + assert.Equal(t, MaxConsecutiveRequestFailures, calls) + select { + case <-c.chResetTransport: + default: + t.Fatal("expected send on chResetTransport") + } + + t.Run("successful transmit resets the counter", func(t *testing.T) { + transmitErr = nil + // working transmit to reset counter + _, err = c.Transmit(ctx, req) + require.NoError(t, err) + assert.Equal(t, MaxConsecutiveRequestFailures+1, calls) + assert.Equal(t, 0, int(c.consecutiveTimeoutCnt.Load())) + }) + + t.Run("doesn't block in case channel is full", func(t *testing.T) { + transmitErr = context.DeadlineExceeded + c.chResetTransport = nil // simulate full channel + for range MaxConsecutiveRequestFailures { + _, err := c.Transmit(ctx, req) + require.EqualError(t, err, "context deadline exceeded") + } + }) + }) +} + +func Test_Client_LatestReport(t *testing.T) { + lggr := logger.Test(t) + ctx := testutils.Context(t) + cacheReads := 5 + + tests := []struct { + name string + ttl time.Duration + expectedCalls int + }{ + { + name: "with cache disabled", + ttl: 0, + expectedCalls: 5, + }, + { + name: "with cache enabled", + ttl: 1000 * time.Hour, // some large value that will never expire during a test + expectedCalls: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &pb.LatestReportRequest{} + + cacheSet := cache.NewCacheSet(lggr, cache.Config{LatestReportTTL: tt.ttl}) + + resp := &pb.LatestReportResponse{} + + var calls int + wsrpcClient := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + calls++ + assert.Equal(t, req, in) + return resp, nil + }, + } + + conn := &mocks.MockConn{ + Ready: true, + } + c := newClient(ClientOpts{logger.Sugared(lggr), csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int64())), nil, "", cacheSet, nil}) + c.conn = conn + c.rawClient = wsrpcClient + + servicetest.Run(t, cacheSet) + simulateStart(ctx, t, c) + + for range cacheReads { + r, err := c.LatestReport(ctx, req) + + require.NoError(t, err) + assert.Equal(t, resp, r) + } + assert.Equal(t, tt.expectedCalls, calls, "expected %d calls to LatestReport but it was called %d times", tt.expectedCalls, calls) + }) + } +} diff --git a/pkg/mercury/wsrpc/metrics.go b/pkg/mercury/wsrpc/metrics.go new file mode 100644 index 0000000000..8c12184cd8 --- /dev/null +++ b/pkg/mercury/wsrpc/metrics.go @@ -0,0 +1,49 @@ +package wsrpc + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type reqStatus string + +const ( + statusSuccess reqStatus = "success" + statusFailed reqStatus = "failed" +) + +var ( + aliveMetric = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "mercury", + Name: "wsrpc_connection_alive", + Help: "Total time spent connected to the Mercury WSRPC server", + }) + requestsStatusMetric = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "mercury", + Name: "wsrpc_requests_status_count", + Help: "Number of request status made to the Mercury WSRPC server", + }, []string{"status"}) + + requestLatencyMetric = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "mercury", + Name: "wsrpc_request_latency", + Help: "Latency of requests made to the Mercury WSRPC server", + Buckets: []float64{10, 30, 100, 200, 250, 300, 350, 400, 500, 750, 1000, 3000, 10000}, + }) +) + +func setLivenessMetric(live bool) { + if live { + aliveMetric.Set(1) + } else { + aliveMetric.Set(0) + } +} + +func incRequestStatusMetric(status reqStatus) { + requestsStatusMetric.WithLabelValues(string(status)).Inc() +} + +func setRequestLatencyMetric(latency float64) { + requestLatencyMetric.Observe(latency) +} diff --git a/pkg/mercury/wsrpc/mocks/mocks.go b/pkg/mercury/wsrpc/mocks/mocks.go new file mode 100644 index 0000000000..95179d09d0 --- /dev/null +++ b/pkg/mercury/wsrpc/mocks/mocks.go @@ -0,0 +1,49 @@ +package mocks + +import ( + "context" + + grpc_connectivity "google.golang.org/grpc/connectivity" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" +) + +type MockWSRPCClient struct { + TransmitF func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) + LatestReportF func(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) +} + +func (m *MockWSRPCClient) Name() string { return "" } +func (m *MockWSRPCClient) Start(context.Context) error { return nil } +func (m *MockWSRPCClient) Close() error { return nil } +func (m *MockWSRPCClient) HealthReport() map[string]error { return map[string]error{} } +func (m *MockWSRPCClient) Ready() error { return nil } +func (m *MockWSRPCClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + return m.TransmitF(ctx, in) +} +func (m *MockWSRPCClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + return m.LatestReportF(ctx, in) +} +func (m *MockWSRPCClient) ServerURL() string { return "mock server url" } + +func (m *MockWSRPCClient) RawClient() pb.MercuryClient { return nil } + +type MockConn struct { + State grpc_connectivity.State + Ready bool + Closed bool + InvokeF func(ctx context.Context, method string, args any, reply any) error +} + +func (m *MockConn) Close() error { + m.Closed = true + return nil +} +func (m MockConn) WaitForReady(ctx context.Context) bool { + return m.Ready +} +func (m MockConn) GetState() grpc_connectivity.State { return m.State } + +func (m MockConn) Invoke(ctx context.Context, method string, args any, reply any) error { + return m.InvokeF(ctx, method, args, reply) +} diff --git a/pkg/mercury/wsrpc/pb/generate.go b/pkg/mercury/wsrpc/pb/generate.go new file mode 100644 index 0000000000..2bb95012d1 --- /dev/null +++ b/pkg/mercury/wsrpc/pb/generate.go @@ -0,0 +1,2 @@ +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-wsrpc_out=. --go-wsrpc_opt=paths=source_relative mercury.proto +package pb diff --git a/pkg/mercury/wsrpc/pb/mercury.pb.go b/pkg/mercury/wsrpc/pb/mercury.pb.go new file mode 100644 index 0000000000..49943ba6ea --- /dev/null +++ b/pkg/mercury/wsrpc/pb/mercury.pb.go @@ -0,0 +1,529 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc v5.29.3 +// source: mercury.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TransmitRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + ReportFormat uint32 `protobuf:"varint,2,opt,name=reportFormat,proto3" json:"reportFormat,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransmitRequest) Reset() { + *x = TransmitRequest{} + mi := &file_mercury_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransmitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransmitRequest) ProtoMessage() {} + +func (x *TransmitRequest) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransmitRequest.ProtoReflect.Descriptor instead. +func (*TransmitRequest) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{0} +} + +func (x *TransmitRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *TransmitRequest) GetReportFormat() uint32 { + if x != nil { + return x.ReportFormat + } + return 0 +} + +type TransmitResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransmitResponse) Reset() { + *x = TransmitResponse{} + mi := &file_mercury_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransmitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransmitResponse) ProtoMessage() {} + +func (x *TransmitResponse) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransmitResponse.ProtoReflect.Descriptor instead. +func (*TransmitResponse) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{1} +} + +func (x *TransmitResponse) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *TransmitResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type LatestReportRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + FeedId []byte `protobuf:"bytes,1,opt,name=feedId,proto3" json:"feedId,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LatestReportRequest) Reset() { + *x = LatestReportRequest{} + mi := &file_mercury_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LatestReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LatestReportRequest) ProtoMessage() {} + +func (x *LatestReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LatestReportRequest.ProtoReflect.Descriptor instead. +func (*LatestReportRequest) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{2} +} + +func (x *LatestReportRequest) GetFeedId() []byte { + if x != nil { + return x.FeedId + } + return nil +} + +type LatestReportResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Report *Report `protobuf:"bytes,2,opt,name=report,proto3" json:"report,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LatestReportResponse) Reset() { + *x = LatestReportResponse{} + mi := &file_mercury_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LatestReportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LatestReportResponse) ProtoMessage() {} + +func (x *LatestReportResponse) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LatestReportResponse.ProtoReflect.Descriptor instead. +func (*LatestReportResponse) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{3} +} + +func (x *LatestReportResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *LatestReportResponse) GetReport() *Report { + if x != nil { + return x.Report + } + return nil +} + +type Report struct { + state protoimpl.MessageState `protogen:"open.v1"` + FeedId []byte `protobuf:"bytes,1,opt,name=feedId,proto3" json:"feedId,omitempty"` + Price []byte `protobuf:"bytes,2,opt,name=price,proto3" json:"price,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + ValidFromBlockNumber int64 `protobuf:"varint,4,opt,name=validFromBlockNumber,proto3" json:"validFromBlockNumber,omitempty"` + CurrentBlockNumber int64 `protobuf:"varint,5,opt,name=currentBlockNumber,proto3" json:"currentBlockNumber,omitempty"` + CurrentBlockHash []byte `protobuf:"bytes,6,opt,name=currentBlockHash,proto3" json:"currentBlockHash,omitempty"` + CurrentBlockTimestamp uint64 `protobuf:"varint,7,opt,name=currentBlockTimestamp,proto3" json:"currentBlockTimestamp,omitempty"` + ObservationsTimestamp int64 `protobuf:"varint,8,opt,name=observationsTimestamp,proto3" json:"observationsTimestamp,omitempty"` + ConfigDigest []byte `protobuf:"bytes,9,opt,name=configDigest,proto3" json:"configDigest,omitempty"` + Epoch uint32 `protobuf:"varint,10,opt,name=epoch,proto3" json:"epoch,omitempty"` + Round uint32 `protobuf:"varint,11,opt,name=round,proto3" json:"round,omitempty"` + OperatorName string `protobuf:"bytes,12,opt,name=operatorName,proto3" json:"operatorName,omitempty"` + TransmittingOperator []byte `protobuf:"bytes,13,opt,name=transmittingOperator,proto3" json:"transmittingOperator,omitempty"` + CreatedAt *Timestamp `protobuf:"bytes,14,opt,name=createdAt,proto3" json:"createdAt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Report) Reset() { + *x = Report{} + mi := &file_mercury_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Report) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Report) ProtoMessage() {} + +func (x *Report) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Report.ProtoReflect.Descriptor instead. +func (*Report) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{4} +} + +func (x *Report) GetFeedId() []byte { + if x != nil { + return x.FeedId + } + return nil +} + +func (x *Report) GetPrice() []byte { + if x != nil { + return x.Price + } + return nil +} + +func (x *Report) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Report) GetValidFromBlockNumber() int64 { + if x != nil { + return x.ValidFromBlockNumber + } + return 0 +} + +func (x *Report) GetCurrentBlockNumber() int64 { + if x != nil { + return x.CurrentBlockNumber + } + return 0 +} + +func (x *Report) GetCurrentBlockHash() []byte { + if x != nil { + return x.CurrentBlockHash + } + return nil +} + +func (x *Report) GetCurrentBlockTimestamp() uint64 { + if x != nil { + return x.CurrentBlockTimestamp + } + return 0 +} + +func (x *Report) GetObservationsTimestamp() int64 { + if x != nil { + return x.ObservationsTimestamp + } + return 0 +} + +func (x *Report) GetConfigDigest() []byte { + if x != nil { + return x.ConfigDigest + } + return nil +} + +func (x *Report) GetEpoch() uint32 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *Report) GetRound() uint32 { + if x != nil { + return x.Round + } + return 0 +} + +func (x *Report) GetOperatorName() string { + if x != nil { + return x.OperatorName + } + return "" +} + +func (x *Report) GetTransmittingOperator() []byte { + if x != nil { + return x.TransmittingOperator + } + return nil +} + +func (x *Report) GetCreatedAt() *Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +// Taken from: https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/timestamp.proto +type Timestamp struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + mi := &file_mercury_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{5} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_mercury_proto protoreflect.FileDescriptor + +const file_mercury_proto_rawDesc = "" + + "\n" + + "\rmercury.proto\x12\x02pb\"O\n" + + "\x0fTransmitRequest\x12\x18\n" + + "\apayload\x18\x01 \x01(\fR\apayload\x12\"\n" + + "\freportFormat\x18\x02 \x01(\rR\freportFormat\"<\n" + + "\x10TransmitResponse\x12\x12\n" + + "\x04code\x18\x01 \x01(\x05R\x04code\x12\x14\n" + + "\x05error\x18\x02 \x01(\tR\x05error\"-\n" + + "\x13LatestReportRequest\x12\x16\n" + + "\x06feedId\x18\x01 \x01(\fR\x06feedId\"P\n" + + "\x14LatestReportResponse\x12\x14\n" + + "\x05error\x18\x01 \x01(\tR\x05error\x12\"\n" + + "\x06report\x18\x02 \x01(\v2\n" + + ".pb.ReportR\x06report\"\xa1\x04\n" + + "\x06Report\x12\x16\n" + + "\x06feedId\x18\x01 \x01(\fR\x06feedId\x12\x14\n" + + "\x05price\x18\x02 \x01(\fR\x05price\x12\x18\n" + + "\apayload\x18\x03 \x01(\fR\apayload\x122\n" + + "\x14validFromBlockNumber\x18\x04 \x01(\x03R\x14validFromBlockNumber\x12.\n" + + "\x12currentBlockNumber\x18\x05 \x01(\x03R\x12currentBlockNumber\x12*\n" + + "\x10currentBlockHash\x18\x06 \x01(\fR\x10currentBlockHash\x124\n" + + "\x15currentBlockTimestamp\x18\a \x01(\x04R\x15currentBlockTimestamp\x124\n" + + "\x15observationsTimestamp\x18\b \x01(\x03R\x15observationsTimestamp\x12\"\n" + + "\fconfigDigest\x18\t \x01(\fR\fconfigDigest\x12\x14\n" + + "\x05epoch\x18\n" + + " \x01(\rR\x05epoch\x12\x14\n" + + "\x05round\x18\v \x01(\rR\x05round\x12\"\n" + + "\foperatorName\x18\f \x01(\tR\foperatorName\x122\n" + + "\x14transmittingOperator\x18\r \x01(\fR\x14transmittingOperator\x12+\n" + + "\tcreatedAt\x18\x0e \x01(\v2\r.pb.TimestampR\tcreatedAt\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanos2\x83\x01\n" + + "\aMercury\x125\n" + + "\bTransmit\x12\x13.pb.TransmitRequest\x1a\x14.pb.TransmitResponse\x12A\n" + + "\fLatestReport\x12\x17.pb.LatestReportRequest\x1a\x18.pb.LatestReportResponseBNZLgithub.com/smartcontractkit/chainlink/v2/services/relay/evm/mercury/wsrpc/pbb\x06proto3" + +var ( + file_mercury_proto_rawDescOnce sync.Once + file_mercury_proto_rawDescData []byte +) + +func file_mercury_proto_rawDescGZIP() []byte { + file_mercury_proto_rawDescOnce.Do(func() { + file_mercury_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mercury_proto_rawDesc), len(file_mercury_proto_rawDesc))) + }) + return file_mercury_proto_rawDescData +} + +var file_mercury_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_mercury_proto_goTypes = []any{ + (*TransmitRequest)(nil), // 0: pb.TransmitRequest + (*TransmitResponse)(nil), // 1: pb.TransmitResponse + (*LatestReportRequest)(nil), // 2: pb.LatestReportRequest + (*LatestReportResponse)(nil), // 3: pb.LatestReportResponse + (*Report)(nil), // 4: pb.Report + (*Timestamp)(nil), // 5: pb.Timestamp +} +var file_mercury_proto_depIdxs = []int32{ + 4, // 0: pb.LatestReportResponse.report:type_name -> pb.Report + 5, // 1: pb.Report.createdAt:type_name -> pb.Timestamp + 0, // 2: pb.Mercury.Transmit:input_type -> pb.TransmitRequest + 2, // 3: pb.Mercury.LatestReport:input_type -> pb.LatestReportRequest + 1, // 4: pb.Mercury.Transmit:output_type -> pb.TransmitResponse + 3, // 5: pb.Mercury.LatestReport:output_type -> pb.LatestReportResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_mercury_proto_init() } +func file_mercury_proto_init() { + if File_mercury_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_mercury_proto_rawDesc), len(file_mercury_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_mercury_proto_goTypes, + DependencyIndexes: file_mercury_proto_depIdxs, + MessageInfos: file_mercury_proto_msgTypes, + }.Build() + File_mercury_proto = out.File + file_mercury_proto_goTypes = nil + file_mercury_proto_depIdxs = nil +} diff --git a/pkg/mercury/wsrpc/pb/mercury.proto b/pkg/mercury/wsrpc/pb/mercury.proto new file mode 100644 index 0000000000..6b71404a6a --- /dev/null +++ b/pkg/mercury/wsrpc/pb/mercury.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +option go_package = "github.com/smartcontractkit/chainlink/v2/services/relay/evm/mercury/wsrpc/pb"; + +package pb; + +service Mercury { + rpc Transmit(TransmitRequest) returns (TransmitResponse); + rpc LatestReport(LatestReportRequest) returns (LatestReportResponse); +} + +message TransmitRequest { + bytes payload = 1; + uint32 reportFormat = 2; +} + +message TransmitResponse { + int32 code = 1; + string error = 2; +} + +message LatestReportRequest { + bytes feedId = 1; +} + +message LatestReportResponse { + string error = 1; + Report report = 2; +} + +message Report { + bytes feedId = 1; + bytes price = 2; + bytes payload = 3; + int64 validFromBlockNumber = 4; + int64 currentBlockNumber = 5; + bytes currentBlockHash = 6; + uint64 currentBlockTimestamp = 7; + int64 observationsTimestamp = 8; + bytes configDigest = 9; + uint32 epoch = 10; + uint32 round = 11; + string operatorName = 12; + bytes transmittingOperator = 13; + Timestamp createdAt = 14; +} + +// Taken from: https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/timestamp.proto +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/pkg/mercury/wsrpc/pb/mercury_wsrpc.pb.go b/pkg/mercury/wsrpc/pb/mercury_wsrpc.pb.go new file mode 100644 index 0000000000..1e0a862f48 --- /dev/null +++ b/pkg/mercury/wsrpc/pb/mercury_wsrpc.pb.go @@ -0,0 +1,87 @@ +// Code generated by protoc-gen-go-wsrpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-wsrpc v0.0.1 +// - protoc v5.29.3 + +package pb + +import ( + context "context" + wsrpc "github.com/smartcontractkit/wsrpc" +) + +// MercuryClient is the client API for Mercury service. +type MercuryClient interface { + Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error) + LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error) +} + +type mercuryClient struct { + cc wsrpc.ClientInterface +} + +func NewMercuryClient(cc wsrpc.ClientInterface) MercuryClient { + return &mercuryClient{cc} +} + +func (c *mercuryClient) Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error) { + out := new(TransmitResponse) + err := c.cc.Invoke(ctx, "Transmit", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mercuryClient) LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error) { + out := new(LatestReportResponse) + err := c.cc.Invoke(ctx, "LatestReport", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +// MercuryServer is the server API for Mercury service. +type MercuryServer interface { + Transmit(context.Context, *TransmitRequest) (*TransmitResponse, error) + LatestReport(context.Context, *LatestReportRequest) (*LatestReportResponse, error) +} + +func RegisterMercuryServer(s wsrpc.ServiceRegistrar, srv MercuryServer) { + s.RegisterService(&Mercury_ServiceDesc, srv) +} + +func _Mercury_Transmit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(TransmitRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(MercuryServer).Transmit(ctx, in) +} + +func _Mercury_LatestReport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(LatestReportRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(MercuryServer).LatestReport(ctx, in) +} + +// Mercury_ServiceDesc is the wsrpc.ServiceDesc for Mercury service. +// It's only intended for direct use with wsrpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Mercury_ServiceDesc = wsrpc.ServiceDesc{ + ServiceName: "pb.Mercury", + HandlerType: (*MercuryServer)(nil), + Methods: []wsrpc.MethodDesc{ + { + MethodName: "Transmit", + Handler: _Mercury_Transmit_Handler, + }, + { + MethodName: "LatestReport", + Handler: _Mercury_LatestReport_Handler, + }, + }, +} diff --git a/pkg/mercury/wsrpc/pool.go b/pkg/mercury/wsrpc/pool.go new file mode 100644 index 0000000000..09e636f1d7 --- /dev/null +++ b/pkg/mercury/wsrpc/pool.go @@ -0,0 +1,309 @@ +package wsrpc + +import ( + "context" + "crypto" + "errors" + "sync" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/cache" +) + +var _ Client = &clientCheckout{} + +type clientCheckout struct { + *connection // inherit all methods from client, with override on Start/Close +} + +func (cco *clientCheckout) Start(_ context.Context) error { + return nil +} + +func (cco *clientCheckout) Close() error { + cco.checkin(cco) + return nil +} + +type connection struct { + // Client will be nil when checkouts is empty, if len(checkouts) > 0 then it is expected to be a non-nil, started client + client Client + + lggr logger.Logger + clientPubKeyHex string + clientSigner crypto.Signer + serverPubKey []byte + serverURL string + + pool *pool + + checkouts []*clientCheckout // reference count, if this goes to zero the connection should be closed and *client nilified + + mu sync.RWMutex +} + +func (c *connection) Ready() error { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return errors.New("nil client") + } + return c.client.Ready() +} + +func (c *connection) HealthReport() map[string]error { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return map[string]error{} // no name available either + } + return c.client.HealthReport() +} + +func (c *connection) Name() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return "" + } + return c.client.Name() +} + +func (c *connection) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return nil, errors.New("nil client") + } + return c.client.Transmit(ctx, in) +} + +func (c *connection) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return nil, errors.New("nil client") + } + return c.client.LatestReport(ctx, in) +} + +func (c *connection) ServerURL() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return "" + } + return c.client.ServerURL() +} + +func (c *connection) RawClient() pb.MercuryClient { + c.mu.RLock() + defer c.mu.RUnlock() + if c.client == nil { + return errMercuryClient{} + } + return c.client.RawClient() +} + +type errMercuryClient struct{} + +func (e errMercuryClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + return nil, errors.New("nil client") +} + +func (e errMercuryClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + return nil, errors.New("nil client") +} + +func (conn *connection) checkout(ctx context.Context) (cco *clientCheckout, err error) { + conn.mu.Lock() + defer conn.mu.Unlock() + if err = conn.ensureStartedClient(ctx); err != nil { + return nil, err + } + cco = &clientCheckout{conn} + conn.checkouts = append(conn.checkouts, cco) + return cco, nil +} + +// not thread-safe, access must be serialized +func (conn *connection) ensureStartedClient(ctx context.Context) error { + if len(conn.checkouts) == 0 { + conn.client = conn.pool.newClient(ClientOpts{logger.Sugared(conn.lggr), conn.clientSigner, conn.serverPubKey, conn.serverURL, conn.pool.cacheSet, nil}) + return conn.client.Start(ctx) + } + return nil +} + +func (conn *connection) checkin(checkinCco *clientCheckout) { + conn.mu.Lock() + defer conn.mu.Unlock() + var removed bool + for i, cco := range conn.checkouts { + if cco == checkinCco { + conn.checkouts = deleteUnstable(conn.checkouts, i) + removed = true + break + } + } + if !removed { + panic("tried to check in client that was never checked out") + } + if len(conn.checkouts) == 0 { + if err := conn.client.Close(); err != nil { + // programming error if we hit this + panic(err) + } + conn.client = nil + conn.pool.remove(conn.serverURL, conn.clientPubKeyHex) + } +} + +func (conn *connection) forceCloseAll() (err error) { + conn.mu.Lock() + defer conn.mu.Unlock() + if conn.client != nil { + err = conn.client.Close() + if errors.Is(err, services.ErrAlreadyStopped) { + // ignore error if it has already been stopped; no problem + err = nil + } + conn.client = nil + conn.checkouts = nil + } + return +} + +type Pool interface { + services.Service + // Checkout gets a wsrpc.Client for the given arguments + // The same underlying client can be checked out multiple times, the pool + // handles lifecycle management. The consumer can treat it as if it were + // its own unique client. + Checkout(ctx context.Context, clientPubKeyHex string, clientSigner crypto.Signer, serverPubKey []byte, serverURL string) (client Client, err error) +} + +// WSRPC allows only one connection per client key per server +type pool struct { + lggr logger.Logger + // server url => client public key hex => connection + connections map[string]map[string]*connection + + // embedding newClient makes testing/mocking easier + newClient func(opts ClientOpts) Client + + mu sync.RWMutex + + cacheSet cache.CacheSet + + closed bool +} + +func NewPool(lggr logger.Logger, cacheCfg cache.Config) Pool { + lggr = logger.Sugared(lggr).Named("Mercury.WSRPCPool") + p := newPool(lggr) + p.newClient = NewClient + p.cacheSet = cache.NewCacheSet(lggr, cacheCfg) + return p +} + +func newPool(lggr logger.Logger) *pool { + return &pool{ + lggr: lggr, + connections: make(map[string]map[string]*connection), + } +} + +func (p *pool) Checkout(ctx context.Context, clientPubKeyHex string, clientSigner crypto.Signer, serverPubKey []byte, serverURL string) (client Client, err error) { + p.mu.Lock() + + if p.closed { + p.mu.Unlock() + return nil, errors.New("pool is closed") + } + + server, exists := p.connections[serverURL] + if !exists { + server = make(map[string]*connection) + p.connections[serverURL] = server + } + conn, exists := server[clientPubKeyHex] + if !exists { + conn = p.newConnection(p.lggr, clientPubKeyHex, clientSigner, serverPubKey, serverURL) + server[clientPubKeyHex] = conn + } + p.mu.Unlock() + + // checkout outside of pool lock since it might take non-trivial time + // the clientCheckout will be checked in again when its Close() method is called + // this also should avoid deadlocks between conn.mu and pool.mu + return conn.checkout(ctx) +} + +// remove performs garbage collection on the connections map after connections are no longer used +func (p *pool) remove(serverURL string, clientPubKeyHex string) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.connections[serverURL], clientPubKeyHex) + if len(p.connections[serverURL]) == 0 { + delete(p.connections, serverURL) + } +} + +func (p *pool) newConnection(lggr logger.Logger, clientPubKeyHex string, clientSigner crypto.Signer, serverPubKey []byte, serverURL string) *connection { + return &connection{ + lggr: lggr, + clientPubKeyHex: clientPubKeyHex, + clientSigner: clientSigner, + serverPubKey: serverPubKey, + serverURL: serverURL, + pool: p, + } +} + +func (p *pool) Start(ctx context.Context) error { + return p.cacheSet.Start(ctx) +} + +func (p *pool) Close() (merr error) { + p.mu.Lock() + defer p.mu.Unlock() + p.closed = true + for _, clientPubKeys := range p.connections { + for _, conn := range clientPubKeys { + merr = errors.Join(merr, conn.forceCloseAll()) + } + } + merr = errors.Join(merr, p.cacheSet.Close()) + return +} + +func (p *pool) Name() string { + return p.lggr.Name() +} + +func (p *pool) Ready() error { + p.mu.RLock() + defer p.mu.RUnlock() + if p.closed { + return errors.New("pool is closed") + } + return nil +} + +func (p *pool) HealthReport() map[string]error { + hp := map[string]error{p.Name(): p.Ready()} + services.CopyHealth(hp, p.cacheSet.HealthReport()) + return hp +} + +// deleteUnstable destructively removes slice element at index i +// It does no bounds checking and may re-order the slice +func deleteUnstable[T any](s []T, i int) []T { + s[i] = s[len(s)-1] + s = s[:len(s)-1] + return s +} diff --git a/pkg/mercury/wsrpc/pool_test.go b/pkg/mercury/wsrpc/pool_test.go new file mode 100644 index 0000000000..0c9580fb7b --- /dev/null +++ b/pkg/mercury/wsrpc/pool_test.go @@ -0,0 +1,265 @@ +package wsrpc + +import ( + "context" + "math/big" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + "github.com/smartcontractkit/chainlink-common/keystore/corekeys/csakey" + "github.com/smartcontractkit/chainlink-evm/pkg/mercury/wsrpc/pb" + "github.com/smartcontractkit/chainlink-evm/pkg/testutils" + "github.com/smartcontractkit/chainlink-evm/pkg/utils" +) + +var _ Client = &mockClient{} + +type mockClient struct { + started bool + closed bool + rawClient pb.MercuryClient +} + +func (c *mockClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (out *pb.TransmitResponse, err error) { + return +} +func (c *mockClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return +} +func (c *mockClient) Start(context.Context) error { + c.started = true + return nil +} +func (c *mockClient) Close() error { + c.closed = true + return nil +} +func (c *mockClient) Name() string { return "mock client" } +func (c *mockClient) Ready() error { return nil } +func (c *mockClient) HealthReport() map[string]error { return nil } +func (c *mockClient) ServerURL() string { return "mock client url" } +func (c *mockClient) RawClient() pb.MercuryClient { return c.rawClient } + +func newMockClient(lggr logger.Logger) *mockClient { + return &mockClient{} +} + +func Test_Pool(t *testing.T) { + lggr := logger.Sugared(logger.Test(t)).Named("PoolTestLogger") + + ctx := testutils.Context(t) + + t.Run("Checkout", func(t *testing.T) { + p := newPool(lggr) + p.cacheSet = &mockCacheSet{} + + t.Run("checks out one started client", func(t *testing.T) { + clientPrivKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())) + serverPubKey := utils.NewHash().Bytes() + serverURL := "example.com:443/ws" + + client := newMockClient(lggr) + p.newClient = func(opts ClientOpts) Client { + assert.Equal(t, serverPubKey, opts.ServerPubKey) + assert.Equal(t, serverURL, opts.ServerURL) + return client + } + + c, err := p.Checkout(ctx, clientPrivKey.PublicKeyString(), clientPrivKey, serverPubKey, serverURL) + require.NoError(t, err) + + assert.True(t, client.started) + + require.IsType(t, &clientCheckout{}, c) + + conn := c.(*clientCheckout).connection + require.Equal(t, conn.client, client) + + assert.Len(t, conn.checkouts, 1) + assert.Same(t, lggr, conn.lggr) + assert.Equal(t, clientPrivKey.PublicKeyString(), conn.clientPubKeyHex) + assert.Equal(t, serverPubKey, conn.serverPubKey) + assert.Equal(t, serverURL, conn.serverURL) + assert.Same(t, p, conn.pool) + + t.Run("checks in the clientCheckout when Close is called", func(t *testing.T) { + err := c.Close() + require.NoError(t, err) + + assert.Empty(t, conn.checkouts) + require.IsType(t, nil, conn.client) + assert.Nil(t, conn.client) + assert.True(t, client.closed) + }) + }) + + t.Run("checks out multiple started clients and only closes if all of the clients for a given pk/server pair are checked back in", func(t *testing.T) { + clientPrivKeys := []csakey.KeyV2{ + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + } + serverPubKey := utils.NewHash().Bytes() + serverURLs := []string{ + "example.com:443/ws", + "example.invalid:8000/ws", + } + + p.newClient = func(opts ClientOpts) Client { + return newMockClient(opts.Logger) + } + + // conn 1 + c1 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + c2 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + c3 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + assert.Len(t, p.connections, 1) + assert.Len(t, p.connections[serverURLs[0]], 1) + assert.Empty(t, p.connections[serverURLs[1]]) + + // conn 2 + c4 := mustCheckout(t, p, clientPrivKeys[1], serverPubKey, serverURLs[0]) + assert.Len(t, p.connections, 1) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Empty(t, p.connections[serverURLs[1]]) + + // conn 3 + c5 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + c6 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + conn1 := c1.(*clientCheckout).connection + assert.Same(t, conn1, c2.(*clientCheckout).connection) + assert.Same(t, conn1, c3.(*clientCheckout).connection) + assert.Len(t, conn1.checkouts, 3) + assert.True(t, conn1.client.(*mockClient).started) + + conn2 := c4.(*clientCheckout).connection + assert.NotEqual(t, conn1, conn2) + assert.Len(t, conn2.checkouts, 1) + assert.True(t, conn2.client.(*mockClient).started) + + conn3 := c5.(*clientCheckout).connection + assert.NotEqual(t, conn1, conn3) + assert.NotEqual(t, conn2, conn3) + assert.Same(t, conn3, c6.(*clientCheckout).connection) + assert.Len(t, conn3.checkouts, 2) + assert.True(t, conn3.client.(*mockClient).started) + + require.NoError(t, c1.Close()) + assert.Len(t, conn1.checkouts, 2) + assert.NotNil(t, conn1.client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c2.Close()) + assert.Len(t, conn1.checkouts, 1) + assert.NotNil(t, conn1.client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c3.Close()) + assert.Empty(t, conn1.checkouts) + assert.Nil(t, conn1.client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 1) + assert.Len(t, p.connections[serverURLs[1]], 1) + + c7 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + // Not the same one, since previously all checkouts were checked in, the original connection was deleted from the map and a new one created + assert.NotSame(t, conn1, c7.(*clientCheckout).connection) + assert.Empty(t, conn1.checkouts) // actually, conn1 has already been removed from the map and will be garbage collected + conn4 := c7.(*clientCheckout).connection + assert.Len(t, conn4.checkouts, 1) + assert.NotNil(t, conn4.client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c7.Close()) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 1) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c4.Close()) + assert.Len(t, p.connections, 1) + assert.Empty(t, p.connections[serverURLs[0]]) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c5.Close()) + require.NoError(t, c6.Close()) + assert.Empty(t, p.connections) + + require.NoError(t, p.Close()) + }) + }) + + p := newPool(lggr) + p.cacheSet = &mockCacheSet{} + + t.Run("Name", func(t *testing.T) { + assert.Equal(t, "PoolTestLogger", p.Name()) + }) + t.Run("Start", func(t *testing.T) { + require.NoError(t, p.Start(ctx)) + assert.NoError(t, p.Ready()) + assert.NoError(t, p.HealthReport()["PoolTestLogger"]) + }) + t.Run("Close force closes all connections", func(t *testing.T) { + clientPrivKeys := []csakey.KeyV2{ + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + } + serverPubKey := utils.NewHash().Bytes() + serverURLs := []string{ + "example.com:443/ws", + "example.invalid:8000/ws", + } + + var clients []*mockClient + p.newClient = func(opts ClientOpts) Client { + c := newMockClient(opts.Logger) + clients = append(clients, c) + return c + } + + // conn 1 + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + + // conn 2 + mustCheckout(t, p, clientPrivKeys[1], serverPubKey, serverURLs[0]) + + // conn 3 + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + + for _, c := range clients { + assert.True(t, c.started) + assert.False(t, c.closed) + } + + require.NoError(t, p.Close()) + assert.EqualError(t, p.Ready(), "pool is closed") + assert.EqualError(t, p.HealthReport()["PoolTestLogger"], "pool is closed") + + for _, c := range clients { + assert.True(t, c.closed) + } + }) +} + +func mustCheckout(t *testing.T, p *pool, csaKey csakey.KeyV2, serverPubKey []byte, serverURL string) Client { + c, err := p.Checkout(testutils.Context(t), csaKey.PublicKeyString(), csaKey, serverPubKey, serverURL) + require.NoError(t, err) + return c +} From b5259b3d56135ab49e2711a2afda91ce6dbaf120 Mon Sep 17 00:00:00 2001 From: pavel-raykov Date: Tue, 24 Feb 2026 18:58:57 +0100 Subject: [PATCH 2/2] Minor. --- go.mod | 10 +++++++++- go.sum | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a1855dc8bc..37427701c9 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/smartcontractkit/chainlink-evm go 1.25.3 require ( + github.com/esote/minmaxheap v1.0.0 github.com/ethereum/go-ethereum v1.16.9 github.com/fbsobreira/gotron-sdk v0.0.0-20250403083053-2943ce8c759b github.com/fxamacker/cbor/v2 v2.7.0 @@ -28,6 +29,7 @@ require ( github.com/smartcontractkit/chain-selectors v1.0.89 github.com/smartcontractkit/chainlink-common v0.10.1-0.20260217160002-b56cb5356cc7 github.com/smartcontractkit/chainlink-common/keystore v1.0.2-0.20260217160002-b56cb5356cc7 + github.com/smartcontractkit/chainlink-data-streams v0.1.11 github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022073203-7d8ae8cf67c1 github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 github.com/smartcontractkit/chainlink-framework/chains v0.0.0-20251210101658-1c5c8e4c4f15 @@ -38,10 +40,12 @@ require ( github.com/smartcontractkit/chainlink-tron/relayer v0.0.11-0.20250815105909-75499abc4335 github.com/smartcontractkit/freeport v0.1.3-0.20250716200817-cb5dfd0e369e github.com/smartcontractkit/libocr v0.0.0-20251027221354-bdc84e1ed858 + github.com/smartcontractkit/wsrpc v0.8.5-0.20250502134807-c57d3d995945 github.com/stretchr/testify v1.11.1 github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a github.com/tidwall/gjson v1.18.0 github.com/ugorji/go/codec v1.2.12 + github.com/umbracle/ethgo v0.1.3 go.opentelemetry.io/otel v1.39.0 go.opentelemetry.io/otel/metric v1.39.0 go.uber.org/multierr v1.11.0 @@ -49,6 +53,7 @@ require ( golang.org/x/crypto v0.48.0 golang.org/x/exp v0.0.0-20260112195511-716be5621a96 golang.org/x/sync v0.19.0 + google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.11 gopkg.in/guregu/null.v4 v4.0.0 ) @@ -66,6 +71,7 @@ require ( github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/buger/jsonparser v1.1.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.16.1 // indirect @@ -109,6 +115,7 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect @@ -173,7 +180,9 @@ require ( github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect + github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect github.com/urfave/cli/v2 v2.27.6 // indirect + github.com/valyala/fastjson v1.4.1 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect @@ -209,7 +218,6 @@ require ( golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect - google.golang.org/grpc v1.78.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 800a668d86..531e65fceb 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= @@ -48,6 +50,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= @@ -80,6 +84,8 @@ github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/ github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -116,6 +122,8 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAK github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/consensys/gnark-crypto v0.18.1 h1:RyLV6UhPRoYYzaFnPQA4qK3DyuDgkTgskDdoGqFt3fI= github.com/consensys/gnark-crypto v0.18.1/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= +github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b h1:pik3LX++5O3UiNWv45wfP/WT81l7ukBJzd3uUiifbSU= +github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -143,6 +151,12 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvw github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= +github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -152,6 +166,8 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esote/minmaxheap v1.0.0 h1:rgA7StnXXpZG6qlM0S7pUmEv1KpWe32rYT4x8J8ntaA= +github.com/esote/minmaxheap v1.0.0/go.mod h1:Ln8+i7fS1k3PLgZI2JAo0iA1as95QnIYiGCrqSJ5FZk= github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk= @@ -542,6 +558,14 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -623,6 +647,8 @@ github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJV github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartcontractkit/chain-selectors v1.0.89 h1:L9oWZGqQXWyTPnC6ODXgu3b0DFyLmJ9eHv+uJrE9IZY= github.com/smartcontractkit/chain-selectors v1.0.89/go.mod h1:qy7whtgG5g+7z0jt0nRyii9bLND9m15NZTzuQPkMZ5w= github.com/smartcontractkit/chainlink-common v0.10.1-0.20260217160002-b56cb5356cc7 h1:h5cmgzKpKn5N5ItpEDFhRcrtqs36nu9r/dciJub1hos= @@ -631,6 +657,8 @@ github.com/smartcontractkit/chainlink-common/keystore v1.0.2-0.20260217160002-b5 github.com/smartcontractkit/chainlink-common/keystore v1.0.2-0.20260217160002-b56cb5356cc7/go.mod h1:rSkIHdomyak3YnUtXLenl6poIq8q0V3UZPiiyYqPdGA= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10 h1:FJAFgXS9oqASnkS03RE1HQwYQQxrO4l46O5JSzxqLgg= github.com/smartcontractkit/chainlink-common/pkg/chipingress v0.0.10/go.mod h1:oiDa54M0FwxevWwyAX773lwdWvFYYlYHHQV1LQ5HpWY= +github.com/smartcontractkit/chainlink-data-streams v0.1.11 h1:yBzjU0Cu8AcfuM858G4xcQIulfNQkPfpUs5FDxX9UaY= +github.com/smartcontractkit/chainlink-data-streams v0.1.11/go.mod h1:8rUcGhjeXBoTFx2MynWgXiBWzVSB+LXd9JR6m8y2FfQ= github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022073203-7d8ae8cf67c1 h1:NTODgwAil7BLoijS7y6KnEuNbQ9v60VUhIR9FcAzIhg= github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251022073203-7d8ae8cf67c1/go.mod h1:oyfOm4k0uqmgZIfxk1elI/59B02shbbJQiiUdPdbMgI= github.com/smartcontractkit/chainlink-framework/capabilities v0.0.0-20250818175541-3389ac08a563 h1:ACpDbAxG4fa4sA83dbtYcrnlpE/y7thNIZfHxTv2ZLs= @@ -659,6 +687,8 @@ github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 h1:12i github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7/go.mod h1:FX7/bVdoep147QQhsOPkYsPEXhGZjeYx6lBSaSXtZOA= github.com/smartcontractkit/libocr v0.0.0-20251027221354-bdc84e1ed858 h1:dz+lxAW+B+PUq32ODppSq5UKw06+EF6+EO6kk684bcQ= github.com/smartcontractkit/libocr v0.0.0-20251027221354-bdc84e1ed858/go.mod h1:oJkBKVn8zoBQm7Feah9CiuEHyCqAhnp1LJBzrvloQtM= +github.com/smartcontractkit/wsrpc v0.8.5-0.20250502134807-c57d3d995945 h1:zxcODLrFytOKmAd8ty8S/XK6WcIEJEgRBaL7sY/7l4Y= +github.com/smartcontractkit/wsrpc v0.8.5-0.20250502134807-c57d3d995945/go.mod h1:m3pdp17i4bD50XgktkzWetcV5yaLsi7Gunbv4ZgN6qg= github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -710,10 +740,16 @@ github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfj github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/umbracle/ethgo v0.1.3 h1:s8D7Rmphnt71zuqrgsGTMS5gTNbueGO1zKLh7qsFzTM= +github.com/umbracle/ethgo v0.1.3/go.mod h1:g9zclCLixH8liBI27Py82klDkW7Oo33AxUOr+M9lzrU= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 h1:10Nbw6cACsnQm7r34zlpJky+IzxVLRk6MKTS2d3Vp0E= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722/go.mod h1:c8J0h9aULj2i3umrfyestM6jCq0LK0U6ly6bWy96nd4= github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g= github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/yE= +github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= @@ -825,6 +861,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1285,3 +1322,5 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=