diff --git a/AGENTS.md b/AGENTS.md index 96e593fa..196b0636 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -46,12 +46,20 @@ term-challenge/ │ ├── submission.rs # Named submission registry and version tracking │ └── timeout_handler.rs # Review assignment timeout tracking ├── storage/ -│ ├── Cargo.toml # chain (sled) and local (SQLite) storage implementations +│ ├── Cargo.toml # native library, depends on platform-core + platform-challenge-sdk │ └── src/ -│ ├── lib.rs -│ ├── chain.rs -│ ├── local.rs -│ └── traits.rs +│ ├── lib.rs # Root module, re-exports, From impls for StorageError +│ ├── traits.rs # ChallengeStorage trait, StorageError, Result alias +│ ├── chain.rs # Chain storage (sled) +│ ├── local.rs # Local storage (SQLite) +│ ├── pg.rs # PostgreSQL connection pool (deadpool-postgres) +│ └── postgres/ +│ ├── mod.rs # Submodule declarations +│ ├── evaluations.rs # Evaluation CRUD using EvaluationResult (f64 score, UUID ChallengeId) +│ ├── leaderboard.rs # Leaderboard queries using WeightAssignment (f64 weight) +│ ├── submissions.rs # Submission storage with ChallengeId (UUID) and Hotkey (SS58) +│ ├── task_logs.rs # Task log storage and retrieval +│ └── validators.rs # Validator management with Hotkey (SS58 encoding) ├── cli/ │ ├── Cargo.toml # native binary, ratatui TUI │ └── src/ @@ -165,6 +173,9 @@ The `term-cli` crate is a **native binary** (NOT `no_std`) that provides a termi # Build CLI (native) cargo build --release -p term-cli +# Build storage library (native) +cargo build --release -p term-challenge-storage + # Build WASM module cargo build --release --target wasm32-unknown-unknown -p term-challenge-wasm @@ -194,7 +205,7 @@ Git hooks live in `.githooks/` and are activated with `git config core.hooksPath 5. **Host functions are the ONLY external interface.** No direct HTTP, no filesystem, no std::net. 6. **Do NOT add `#[allow(dead_code)]` broadly.** Fix unused code or remove it. -> **Note:** The `cli/` and `server/` crates are exempt from the `no_std` rule (rule 1) and the host-functions-only rule (rule 5) since they are native code that runs outside the WASM sandbox. Rules 2, 3, 4, and 6 still apply to both. +> **Note:** The `cli/`, `server/`, and `storage/` crates are exempt from the `no_std` rule (rule 1) and the host-functions-only rule (rule 5) since they are native code that runs outside the WASM sandbox. Rules 2, 3, 4, and 6 still apply to all. ## DO / DO NOT diff --git a/Cargo.lock b/Cargo.lock index 5ee8a34e..2cb24ce6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1306,6 +1306,43 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" +[[package]] +name = "deadpool" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0be2b1d1d6ec8d846f05e137292d0b89133caf95ef33695424c09568bdd39b1b" +dependencies = [ + "deadpool-runtime", + "lazy_static", + "num_cpus", + "serde", + "tokio", +] + +[[package]] +name = "deadpool-postgres" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d697d376cbfa018c23eb4caab1fd1883dd9c906a8c034e8d9a3cb06a7e0bef9" +dependencies = [ + "async-trait", + "deadpool", + "getrandom 0.2.17", + "serde", + "tokio", + "tokio-postgres", + "tracing", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" +dependencies = [ + "tokio", +] + [[package]] name = "debugid" version = "0.8.0" @@ -1602,7 +1639,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1886,7 +1923,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -2070,6 +2107,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + [[package]] name = "hex" version = "0.4.3" @@ -2515,7 +2558,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -2889,6 +2932,16 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + [[package]] name = "memchr" version = "2.8.0" @@ -2939,7 +2992,7 @@ checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.61.2", ] @@ -2966,7 +3019,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3007,6 +3060,34 @@ dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi 0.5.2", + "libc", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "objc2-system-configuration" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7216bd11cbda54ccabcab84d523dc93b858ec75ecfb3a7d89513fa22464da396" +dependencies = [ + "objc2-core-foundation", +] + [[package]] name = "object" version = "0.30.4" @@ -3235,6 +3316,25 @@ dependencies = [ "indexmap 2.13.0", ] +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_shared", + "serde", +] + +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -3386,6 +3486,52 @@ dependencies = [ "serde", ] +[[package]] +name = "postgres-derive" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56df96f5394370d1b20e49de146f9e6c25aa9ae750f449c9d665eafecb3ccae6" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee9dd5fe15055d2b6806f4736aa0c9637217074e224bbec46d4041b91bb9491" +dependencies = [ + "base64", + "byteorder", + "bytes", + "fallible-iterator 0.2.0", + "hmac", + "md-5", + "memchr", + "rand 0.9.2", + "sha2 0.10.9", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b858f82211e84682fecd373f68e1ceae642d8d751a1ebd13f33de6257b3e20" +dependencies = [ + "bytes", + "chrono", + "fallible-iterator 0.2.0", + "postgres-derive", + "postgres-protocol", + "serde_core", + "serde_json", + "uuid", +] + [[package]] name = "potential_utf" version = "0.1.4" @@ -3544,7 +3690,7 @@ dependencies = [ "once_cell", "socket2 0.6.2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -3944,7 +4090,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4338,6 +4484,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "siphasher" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + [[package]] name = "sized-chunks" version = "0.6.5" @@ -4673,6 +4825,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + [[package]] name = "strsim" version = "0.11.1" @@ -4824,7 +4987,7 @@ dependencies = [ "getrandom 0.4.1", "once_cell", "rustix 1.1.3", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4866,14 +5029,19 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", + "deadpool-postgres", "hex", "platform-challenge-sdk", "platform-core", + "postgres-types", "rusqlite", "serde", + "serde_json", "sled", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.18", + "tokio", + "tokio-postgres", "tracing", "uuid", ] @@ -5040,6 +5208,32 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcea47c8f71744367793f16c2db1f11cb859d28f436bdb4ca9193eb1f787ee42" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator 0.2.0", + "futures-channel", + "futures-util", + "log", + "parking_lot 0.12.5", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand 0.9.2", + "socket2 0.6.2", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-rustls" version = "0.26.4" @@ -5413,6 +5607,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d" + [[package]] name = "unicode-segmentation" version = "1.12.0" @@ -5613,6 +5813,15 @@ version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" +[[package]] +name = "wasi" +version = "0.14.7+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" +dependencies = [ + "wasip2", +] + [[package]] name = "wasip2" version = "1.0.2+wasi-0.2.9" @@ -5631,6 +5840,15 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasite" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fe902b4a6b8028a753d5424909b764ccf79b7a209eac9bf97e59cda9f71a42" +dependencies = [ + "wasi 0.14.7+wasi-0.2.4", +] + [[package]] name = "wasm-bindgen" version = "0.2.109" @@ -6273,6 +6491,19 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "whoami" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6a5b12f9df4f978d2cfdb1bd3bac52433f44393342d7ee9c25f5a1c14c0f45d" +dependencies = [ + "libc", + "libredox", + "objc2-system-configuration", + "wasite", + "web-sys", +] + [[package]] name = "widestring" version = "1.2.1" @@ -6301,7 +6532,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 3d904d0d..5e19fe84 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -10,13 +10,19 @@ description = "Storage layer for Term Challenge using platform SDK types" platform-core = { git = "https://github.com/PlatformNetwork/platform-v2", branch = "main" } platform-challenge-sdk = { git = "https://github.com/PlatformNetwork/platform-v2", branch = "main" } +tokio = { version = "1.40", features = ["full"] } +tokio-postgres = { version = "0.7", features = ["with-uuid-1", "with-chrono-0_4", "with-serde_json-1"] } +deadpool-postgres = { version = "0.14", features = ["serde"] } +postgres-types = { version = "0.2", features = ["derive", "with-uuid-1", "with-chrono-0_4", "with-serde_json-1"] } + sled = "0.34" rusqlite = { version = "0.31", features = ["bundled"] } serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" bincode = "1.3" +uuid = { version = "1.10", features = ["v4", "serde"] } chrono = { version = "0.4", features = ["serde"] } -thiserror = "1.0" -uuid = { version = "1", features = ["v4"] } +thiserror = "2.0" tracing = "0.1" hex = "0.4" diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 5de6b395..c1d3ff75 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,5 +1,7 @@ pub mod chain; pub mod local; +pub mod pg; +pub mod postgres; pub mod traits; pub use chain::ChainStorage; diff --git a/storage/src/pg.rs b/storage/src/pg.rs new file mode 100644 index 00000000..0de273eb --- /dev/null +++ b/storage/src/pg.rs @@ -0,0 +1,62 @@ +use std::fmt; + +use deadpool_postgres::{Config, CreatePoolError, ManagerConfig, Pool, RecyclingMethod, Runtime}; +use serde::Deserialize; +use tokio_postgres::NoTls; + +pub type PgPool = Pool; + +#[derive(Clone, Deserialize)] +pub struct PgConfig { + pub host: String, + pub port: u16, + pub user: String, + pub password: String, + pub dbname: String, + #[serde(default = "default_pool_size")] + pub pool_size: usize, +} + +impl fmt::Debug for PgConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PgConfig") + .field("host", &self.host) + .field("port", &self.port) + .field("user", &self.user) + .field("password", &"[REDACTED]") + .field("dbname", &self.dbname) + .field("pool_size", &self.pool_size) + .finish() + } +} + +fn default_pool_size() -> usize { + 16 +} + +impl Default for PgConfig { + fn default() -> Self { + Self { + host: "localhost".to_string(), + port: 5432, + user: "postgres".to_string(), + password: "postgres".to_string(), + dbname: "term_challenge".to_string(), + pool_size: default_pool_size(), + } + } +} + +pub fn create_pool(cfg: &PgConfig) -> Result { + let mut config = Config::new(); + config.host = Some(cfg.host.clone()); + config.port = Some(cfg.port); + config.user = Some(cfg.user.clone()); + config.password = Some(cfg.password.clone()); + config.dbname = Some(cfg.dbname.clone()); + config.manager = Some(ManagerConfig { + recycling_method: RecyclingMethod::Fast, + }); + + config.create_pool(Some(Runtime::Tokio1), NoTls) +} diff --git a/storage/src/postgres/evaluations.rs b/storage/src/postgres/evaluations.rs new file mode 100644 index 00000000..70ee8837 --- /dev/null +++ b/storage/src/postgres/evaluations.rs @@ -0,0 +1,214 @@ +use std::collections::HashMap; + +use platform_challenge_sdk::types::EvaluationResult; +use platform_core::{ChallengeId, Hotkey}; +use uuid::Uuid; + +use crate::pg::PgPool; +use crate::{Result, StorageError}; + +pub async fn insert_evaluation( + pool: &PgPool, + challenge_id: &ChallengeId, + validator: &Hotkey, + result: &EvaluationResult, +) -> Result<()> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let validator_ss58 = validator.to_ss58(); + let metrics_json = serde_json::to_value(&result.metrics)?; + + client + .execute( + "INSERT INTO evaluations ( + job_id, challenge_id, validator_hotkey, agent_hash, + score, metrics, logs, execution_time_ms, evaluated_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT (job_id) DO UPDATE SET + score = EXCLUDED.score, + metrics = EXCLUDED.metrics, + logs = EXCLUDED.logs, + execution_time_ms = EXCLUDED.execution_time_ms, + evaluated_at = EXCLUDED.evaluated_at", + &[ + &result.job_id, + &challenge_uuid, + &validator_ss58, + &result.agent_hash, + &result.score, + &metrics_json, + &result.logs, + &(result.execution_time_ms as i64), + &result.timestamp, + ], + ) + .await?; + + Ok(()) +} + +pub async fn get_evaluation(pool: &PgPool, job_id: &Uuid) -> Result { + let client = pool.get().await?; + let row = client + .query_opt( + "SELECT job_id, agent_hash, score, metrics, logs, execution_time_ms, evaluated_at + FROM evaluations WHERE job_id = $1", + &[job_id], + ) + .await? + .ok_or_else(|| StorageError::NotFound(format!("evaluation {}", job_id)))?; + + let metrics_json: serde_json::Value = row.get("metrics"); + let metrics: HashMap = serde_json::from_value(metrics_json)?; + let execution_time_ms: i64 = row.get("execution_time_ms"); + + Ok(EvaluationResult { + job_id: row.get("job_id"), + agent_hash: row.get("agent_hash"), + score: row.get("score"), + metrics, + logs: row.get("logs"), + execution_time_ms: execution_time_ms as u64, + timestamp: row.get("evaluated_at"), + }) +} + +pub async fn list_evaluations_by_challenge( + pool: &PgPool, + challenge_id: &ChallengeId, + limit: i64, + offset: i64, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let rows = client + .query( + "SELECT job_id, agent_hash, score, metrics, logs, execution_time_ms, evaluated_at + FROM evaluations + WHERE challenge_id = $1 + ORDER BY evaluated_at DESC + LIMIT $2 OFFSET $3", + &[&challenge_uuid, &limit, &offset], + ) + .await?; + + let mut results = Vec::with_capacity(rows.len()); + for row in &rows { + let metrics_json: serde_json::Value = row.get("metrics"); + let metrics: HashMap = serde_json::from_value(metrics_json)?; + let execution_time_ms: i64 = row.get("execution_time_ms"); + + results.push(EvaluationResult { + job_id: row.get("job_id"), + agent_hash: row.get("agent_hash"), + score: row.get("score"), + metrics, + logs: row.get("logs"), + execution_time_ms: execution_time_ms as u64, + timestamp: row.get("evaluated_at"), + }); + } + + Ok(results) +} + +pub async fn list_evaluations_by_agent( + pool: &PgPool, + agent_hash: &str, +) -> Result> { + let client = pool.get().await?; + + let rows = client + .query( + "SELECT job_id, agent_hash, score, metrics, logs, execution_time_ms, evaluated_at + FROM evaluations + WHERE agent_hash = $1 + ORDER BY evaluated_at DESC", + &[&agent_hash], + ) + .await?; + + let mut results = Vec::with_capacity(rows.len()); + for row in &rows { + let metrics_json: serde_json::Value = row.get("metrics"); + let metrics: HashMap = serde_json::from_value(metrics_json)?; + let execution_time_ms: i64 = row.get("execution_time_ms"); + + results.push(EvaluationResult { + job_id: row.get("job_id"), + agent_hash: row.get("agent_hash"), + score: row.get("score"), + metrics, + logs: row.get("logs"), + execution_time_ms: execution_time_ms as u64, + timestamp: row.get("evaluated_at"), + }); + } + + Ok(results) +} + +pub async fn get_latest_evaluation_for_agent( + pool: &PgPool, + challenge_id: &ChallengeId, + agent_hash: &str, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let row = client + .query_opt( + "SELECT job_id, agent_hash, score, metrics, logs, execution_time_ms, evaluated_at + FROM evaluations + WHERE challenge_id = $1 AND agent_hash = $2 + ORDER BY evaluated_at DESC + LIMIT 1", + &[&challenge_uuid, &agent_hash], + ) + .await?; + + match row { + Some(row) => { + let metrics_json: serde_json::Value = row.get("metrics"); + let metrics: HashMap = serde_json::from_value(metrics_json)?; + let execution_time_ms: i64 = row.get("execution_time_ms"); + + Ok(Some(EvaluationResult { + job_id: row.get("job_id"), + agent_hash: row.get("agent_hash"), + score: row.get("score"), + metrics, + logs: row.get("logs"), + execution_time_ms: execution_time_ms as u64, + timestamp: row.get("evaluated_at"), + })) + } + None => Ok(None), + } +} + +pub async fn delete_evaluation(pool: &PgPool, job_id: &Uuid) -> Result { + let client = pool.get().await?; + let rows_affected = client + .execute("DELETE FROM evaluations WHERE job_id = $1", &[job_id]) + .await?; + Ok(rows_affected > 0) +} + +pub async fn count_evaluations_by_challenge( + pool: &PgPool, + challenge_id: &ChallengeId, +) -> Result { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let row = client + .query_one( + "SELECT COUNT(*) as count FROM evaluations WHERE challenge_id = $1", + &[&challenge_uuid], + ) + .await?; + + Ok(row.get("count")) +} diff --git a/storage/src/postgres/leaderboard.rs b/storage/src/postgres/leaderboard.rs new file mode 100644 index 00000000..6a7f5fbe --- /dev/null +++ b/storage/src/postgres/leaderboard.rs @@ -0,0 +1,188 @@ +use platform_challenge_sdk::types::WeightAssignment; +use platform_core::{ChallengeId, Hotkey}; +use uuid::Uuid; + +use crate::pg::PgPool; +use crate::Result; + +#[derive(Clone, Debug)] +pub struct LeaderboardEntry { + pub rank: i32, + pub hotkey_ss58: String, + pub score: f64, + pub weight: f64, + pub submissions_count: i64, + pub last_evaluated_at: Option>, +} + +pub async fn upsert_leaderboard_entry( + pool: &PgPool, + challenge_id: &ChallengeId, + hotkey: &Hotkey, + score: f64, + weight: f64, +) -> Result<()> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = hotkey.to_ss58(); + + client + .execute( + "INSERT INTO leaderboard (challenge_id, hotkey, score, weight, updated_at) + VALUES ($1, $2, $3, $4, NOW()) + ON CONFLICT (challenge_id, hotkey) DO UPDATE SET + score = EXCLUDED.score, + weight = EXCLUDED.weight, + updated_at = NOW()", + &[&challenge_uuid, &hotkey_ss58, &score, &weight], + ) + .await?; + + Ok(()) +} + +pub async fn get_leaderboard( + pool: &PgPool, + challenge_id: &ChallengeId, + limit: i64, + offset: i64, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let rows = client + .query( + "SELECT hotkey, score, weight, + COALESCE(submissions_count, 0) as submissions_count, + updated_at, + ROW_NUMBER() OVER (ORDER BY score DESC) as rank + FROM leaderboard + WHERE challenge_id = $1 + ORDER BY score DESC + LIMIT $2 OFFSET $3", + &[&challenge_uuid, &limit, &offset], + ) + .await?; + + let mut entries = Vec::with_capacity(rows.len()); + for row in &rows { + let rank: i64 = row.get("rank"); + entries.push(LeaderboardEntry { + rank: rank as i32, + hotkey_ss58: row.get("hotkey"), + score: row.get("score"), + weight: row.get("weight"), + submissions_count: row.get("submissions_count"), + last_evaluated_at: row.get("updated_at"), + }); + } + + Ok(entries) +} + +pub async fn get_weight_assignments( + pool: &PgPool, + challenge_id: &ChallengeId, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let rows = client + .query( + "SELECT hotkey, weight FROM leaderboard + WHERE challenge_id = $1 AND weight > 0.0 + ORDER BY weight DESC", + &[&challenge_uuid], + ) + .await?; + + let mut assignments = Vec::with_capacity(rows.len()); + for row in &rows { + let hotkey: String = row.get("hotkey"); + let weight: f64 = row.get("weight"); + assignments.push(WeightAssignment::new(hotkey, weight)); + } + + Ok(assignments) +} + +pub async fn set_weight_assignments( + pool: &PgPool, + challenge_id: &ChallengeId, + assignments: &[WeightAssignment], +) -> Result<()> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + for assignment in assignments { + client + .execute( + "INSERT INTO leaderboard (challenge_id, hotkey, score, weight, updated_at) + VALUES ($1, $2, 0.0, $3, NOW()) + ON CONFLICT (challenge_id, hotkey) DO UPDATE SET + weight = EXCLUDED.weight, + updated_at = NOW()", + &[&challenge_uuid, &assignment.hotkey, &assignment.weight], + ) + .await?; + } + + Ok(()) +} + +pub async fn get_entry_by_hotkey( + pool: &PgPool, + challenge_id: &ChallengeId, + hotkey: &Hotkey, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = hotkey.to_ss58(); + + let row = client + .query_opt( + "SELECT hotkey, score, weight, + COALESCE(submissions_count, 0) as submissions_count, + updated_at, + (SELECT COUNT(*) + 1 FROM leaderboard lb2 + WHERE lb2.challenge_id = $1 AND lb2.score > leaderboard.score) as rank + FROM leaderboard + WHERE challenge_id = $1 AND hotkey = $2", + &[&challenge_uuid, &hotkey_ss58], + ) + .await?; + + match row { + Some(row) => { + let rank: i64 = row.get("rank"); + Ok(Some(LeaderboardEntry { + rank: rank as i32, + hotkey_ss58: row.get("hotkey"), + score: row.get("score"), + weight: row.get("weight"), + submissions_count: row.get("submissions_count"), + last_evaluated_at: row.get("updated_at"), + })) + } + None => Ok(None), + } +} + +pub async fn delete_leaderboard_entry( + pool: &PgPool, + challenge_id: &ChallengeId, + hotkey: &Hotkey, +) -> Result { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = hotkey.to_ss58(); + + let rows_affected = client + .execute( + "DELETE FROM leaderboard WHERE challenge_id = $1 AND hotkey = $2", + &[&challenge_uuid, &hotkey_ss58], + ) + .await?; + + Ok(rows_affected > 0) +} diff --git a/storage/src/postgres/mod.rs b/storage/src/postgres/mod.rs new file mode 100644 index 00000000..a1838c16 --- /dev/null +++ b/storage/src/postgres/mod.rs @@ -0,0 +1,5 @@ +pub mod evaluations; +pub mod leaderboard; +pub mod submissions; +pub mod task_logs; +pub mod validators; diff --git a/storage/src/postgres/submissions.rs b/storage/src/postgres/submissions.rs new file mode 100644 index 00000000..27d80ae4 --- /dev/null +++ b/storage/src/postgres/submissions.rs @@ -0,0 +1,228 @@ +use chrono::{DateTime, Utc}; +use platform_core::{ChallengeId, Hotkey}; +use uuid::Uuid; + +use crate::pg::PgPool; +use crate::{Result, StorageError}; + +#[derive(Clone, Debug)] +pub struct StoredSubmission { + pub id: Uuid, + pub challenge_id: Uuid, + pub miner_hotkey_ss58: String, + pub agent_hash: String, + pub epoch: i64, + pub score: Option, + pub status: String, + pub submitted_at: DateTime, + pub evaluated_at: Option>, +} + +pub async fn insert_submission( + pool: &PgPool, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, + agent_hash: &str, + epoch: u64, +) -> Result { + let client = pool.get().await?; + let id = Uuid::new_v4(); + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + let epoch_i64 = epoch as i64; + + client + .execute( + "INSERT INTO submissions (id, challenge_id, miner_hotkey, agent_hash, epoch, status, submitted_at) + VALUES ($1, $2, $3, $4, $5, 'pending', NOW())", + &[&id, &challenge_uuid, &hotkey_ss58, &agent_hash, &epoch_i64], + ) + .await?; + + Ok(id) +} + +pub async fn get_submission(pool: &PgPool, id: &Uuid) -> Result { + let client = pool.get().await?; + + let row = client + .query_opt( + "SELECT id, challenge_id, miner_hotkey, agent_hash, epoch, score, status, + submitted_at, evaluated_at + FROM submissions WHERE id = $1", + &[id], + ) + .await? + .ok_or_else(|| StorageError::NotFound(format!("submission {}", id)))?; + + Ok(StoredSubmission { + id: row.get("id"), + challenge_id: row.get("challenge_id"), + miner_hotkey_ss58: row.get("miner_hotkey"), + agent_hash: row.get("agent_hash"), + epoch: row.get("epoch"), + score: row.get("score"), + status: row.get("status"), + submitted_at: row.get("submitted_at"), + evaluated_at: row.get("evaluated_at"), + }) +} + +pub async fn update_submission_score( + pool: &PgPool, + id: &Uuid, + score: f64, + status: &str, +) -> Result<()> { + let client = pool.get().await?; + + client + .execute( + "UPDATE submissions SET score = $2, status = $3, evaluated_at = NOW() + WHERE id = $1", + &[id, &score, &status], + ) + .await?; + + Ok(()) +} + +pub async fn update_submission_status(pool: &PgPool, id: &Uuid, status: &str) -> Result<()> { + let client = pool.get().await?; + + client + .execute( + "UPDATE submissions SET status = $2 WHERE id = $1", + &[id, &status], + ) + .await?; + + Ok(()) +} + +pub async fn list_submissions_by_challenge( + pool: &PgPool, + challenge_id: &ChallengeId, + limit: i64, + offset: i64, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let rows = client + .query( + "SELECT id, challenge_id, miner_hotkey, agent_hash, epoch, score, status, + submitted_at, evaluated_at + FROM submissions + WHERE challenge_id = $1 + ORDER BY submitted_at DESC + LIMIT $2 OFFSET $3", + &[&challenge_uuid, &limit, &offset], + ) + .await?; + + let mut submissions = Vec::with_capacity(rows.len()); + for row in &rows { + submissions.push(StoredSubmission { + id: row.get("id"), + challenge_id: row.get("challenge_id"), + miner_hotkey_ss58: row.get("miner_hotkey"), + agent_hash: row.get("agent_hash"), + epoch: row.get("epoch"), + score: row.get("score"), + status: row.get("status"), + submitted_at: row.get("submitted_at"), + evaluated_at: row.get("evaluated_at"), + }); + } + + Ok(submissions) +} + +pub async fn list_submissions_by_miner( + pool: &PgPool, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, + limit: i64, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + + let rows = client + .query( + "SELECT id, challenge_id, miner_hotkey, agent_hash, epoch, score, status, + submitted_at, evaluated_at + FROM submissions + WHERE challenge_id = $1 AND miner_hotkey = $2 + ORDER BY submitted_at DESC + LIMIT $3", + &[&challenge_uuid, &hotkey_ss58, &limit], + ) + .await?; + + let mut submissions = Vec::with_capacity(rows.len()); + for row in &rows { + submissions.push(StoredSubmission { + id: row.get("id"), + challenge_id: row.get("challenge_id"), + miner_hotkey_ss58: row.get("miner_hotkey"), + agent_hash: row.get("agent_hash"), + epoch: row.get("epoch"), + score: row.get("score"), + status: row.get("status"), + submitted_at: row.get("submitted_at"), + evaluated_at: row.get("evaluated_at"), + }); + } + + Ok(submissions) +} + +pub async fn get_last_submission_epoch( + pool: &PgPool, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + + let row = client + .query_opt( + "SELECT MAX(epoch) as last_epoch FROM submissions + WHERE challenge_id = $1 AND miner_hotkey = $2", + &[&challenge_uuid, &hotkey_ss58], + ) + .await?; + + match row { + Some(row) => Ok(row.get("last_epoch")), + None => Ok(None), + } +} + +pub async fn count_submissions_by_challenge( + pool: &PgPool, + challenge_id: &ChallengeId, +) -> Result { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + + let row = client + .query_one( + "SELECT COUNT(*) as count FROM submissions WHERE challenge_id = $1", + &[&challenge_uuid], + ) + .await?; + + Ok(row.get("count")) +} + +pub async fn delete_submission(pool: &PgPool, id: &Uuid) -> Result { + let client = pool.get().await?; + let rows_affected = client + .execute("DELETE FROM submissions WHERE id = $1", &[id]) + .await?; + Ok(rows_affected > 0) +} diff --git a/storage/src/postgres/task_logs.rs b/storage/src/postgres/task_logs.rs new file mode 100644 index 00000000..62e0eda3 --- /dev/null +++ b/storage/src/postgres/task_logs.rs @@ -0,0 +1,231 @@ +use chrono::{DateTime, Utc}; +use platform_core::{ChallengeId, Hotkey}; +use uuid::Uuid; + +use crate::pg::PgPool; +use crate::Result; + +pub type TaskLogRecord = (String, bool, f64, u64, Option, Option); + +#[derive(Clone, Debug)] +pub struct StoredTaskLog { + pub id: Uuid, + pub submission_id: Uuid, + pub challenge_id: Uuid, + pub miner_hotkey_ss58: String, + pub task_id: String, + pub passed: bool, + pub score: f64, + pub execution_time_ms: i64, + pub output_preview: Option, + pub error: Option, + pub created_at: DateTime, +} + +pub async fn insert_task_log( + pool: &PgPool, + submission_id: &Uuid, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, + task_id: &str, + passed: bool, + score: f64, + execution_time_ms: u64, + output_preview: Option<&str>, + error: Option<&str>, +) -> Result { + let client = pool.get().await?; + let id = Uuid::new_v4(); + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + let exec_time_i64 = execution_time_ms as i64; + + client + .execute( + "INSERT INTO task_logs ( + id, submission_id, challenge_id, miner_hotkey, task_id, + passed, score, execution_time_ms, output_preview, error, created_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW())", + &[ + &id, + submission_id, + &challenge_uuid, + &hotkey_ss58, + &task_id, + &passed, + &score, + &exec_time_i64, + &output_preview, + &error, + ], + ) + .await?; + + Ok(id) +} + +pub async fn insert_task_logs_batch( + pool: &PgPool, + submission_id: &Uuid, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, + logs: &[TaskLogRecord], +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + + let mut ids = Vec::with_capacity(logs.len()); + + for (task_id, passed, score, execution_time_ms, output_preview, error) in logs { + let id = Uuid::new_v4(); + let exec_time_i64 = *execution_time_ms as i64; + let output_ref = output_preview.as_deref(); + let error_ref = error.as_deref(); + + client + .execute( + "INSERT INTO task_logs ( + id, submission_id, challenge_id, miner_hotkey, task_id, + passed, score, execution_time_ms, output_preview, error, created_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW())", + &[ + &id, + submission_id, + &challenge_uuid, + &hotkey_ss58, + task_id, + passed, + score, + &exec_time_i64, + &output_ref, + &error_ref, + ], + ) + .await?; + + ids.push(id); + } + + Ok(ids) +} + +pub async fn get_task_logs_by_submission( + pool: &PgPool, + submission_id: &Uuid, +) -> Result> { + let client = pool.get().await?; + + let rows = client + .query( + "SELECT id, submission_id, challenge_id, miner_hotkey, task_id, + passed, score, execution_time_ms, output_preview, error, created_at + FROM task_logs + WHERE submission_id = $1 + ORDER BY created_at ASC", + &[submission_id], + ) + .await?; + + let mut logs = Vec::with_capacity(rows.len()); + for row in &rows { + logs.push(StoredTaskLog { + id: row.get("id"), + submission_id: row.get("submission_id"), + challenge_id: row.get("challenge_id"), + miner_hotkey_ss58: row.get("miner_hotkey"), + task_id: row.get("task_id"), + passed: row.get("passed"), + score: row.get("score"), + execution_time_ms: row.get("execution_time_ms"), + output_preview: row.get("output_preview"), + error: row.get("error"), + created_at: row.get("created_at"), + }); + } + + Ok(logs) +} + +pub async fn get_task_logs_by_miner( + pool: &PgPool, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, + limit: i64, +) -> Result> { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + + let rows = client + .query( + "SELECT id, submission_id, challenge_id, miner_hotkey, task_id, + passed, score, execution_time_ms, output_preview, error, created_at + FROM task_logs + WHERE challenge_id = $1 AND miner_hotkey = $2 + ORDER BY created_at DESC + LIMIT $3", + &[&challenge_uuid, &hotkey_ss58, &limit], + ) + .await?; + + let mut logs = Vec::with_capacity(rows.len()); + for row in &rows { + logs.push(StoredTaskLog { + id: row.get("id"), + submission_id: row.get("submission_id"), + challenge_id: row.get("challenge_id"), + miner_hotkey_ss58: row.get("miner_hotkey"), + task_id: row.get("task_id"), + passed: row.get("passed"), + score: row.get("score"), + execution_time_ms: row.get("execution_time_ms"), + output_preview: row.get("output_preview"), + error: row.get("error"), + created_at: row.get("created_at"), + }); + } + + Ok(logs) +} + +pub async fn get_pass_rate_by_miner( + pool: &PgPool, + challenge_id: &ChallengeId, + miner_hotkey: &Hotkey, +) -> Result { + let client = pool.get().await?; + let challenge_uuid: Uuid = challenge_id.0; + let hotkey_ss58 = miner_hotkey.to_ss58(); + + let row = client + .query_one( + "SELECT + COUNT(*) FILTER (WHERE passed = true) as passed_count, + COUNT(*) as total_count + FROM task_logs + WHERE challenge_id = $1 AND miner_hotkey = $2", + &[&challenge_uuid, &hotkey_ss58], + ) + .await?; + + let passed: i64 = row.get("passed_count"); + let total: i64 = row.get("total_count"); + + if total == 0 { + return Ok(0.0); + } + + Ok(passed as f64 / total as f64) +} + +pub async fn delete_task_logs_by_submission(pool: &PgPool, submission_id: &Uuid) -> Result { + let client = pool.get().await?; + let rows_affected = client + .execute( + "DELETE FROM task_logs WHERE submission_id = $1", + &[submission_id], + ) + .await?; + Ok(rows_affected) +} diff --git a/storage/src/postgres/validators.rs b/storage/src/postgres/validators.rs new file mode 100644 index 00000000..360f61ef --- /dev/null +++ b/storage/src/postgres/validators.rs @@ -0,0 +1,178 @@ +use chrono::{DateTime, Utc}; +use platform_core::Hotkey; + +use crate::pg::PgPool; +use crate::{Result, StorageError}; + +#[derive(Clone, Debug)] +pub struct StoredValidator { + pub hotkey_ss58: String, + pub stake: i64, + pub is_active: bool, + pub last_seen: DateTime, + pub peer_id: Option, + pub registered_at: DateTime, +} + +pub async fn upsert_validator( + pool: &PgPool, + hotkey: &Hotkey, + stake: u64, + peer_id: Option<&str>, +) -> Result<()> { + let client = pool.get().await?; + let hotkey_ss58 = hotkey.to_ss58(); + let stake_i64 = stake as i64; + + client + .execute( + "INSERT INTO validators (hotkey, stake, is_active, last_seen, peer_id, registered_at) + VALUES ($1, $2, true, NOW(), $3, NOW()) + ON CONFLICT (hotkey) DO UPDATE SET + stake = EXCLUDED.stake, + is_active = true, + last_seen = NOW(), + peer_id = COALESCE(EXCLUDED.peer_id, validators.peer_id)", + &[&hotkey_ss58, &stake_i64, &peer_id], + ) + .await?; + + Ok(()) +} + +pub async fn get_validator(pool: &PgPool, hotkey: &Hotkey) -> Result> { + let client = pool.get().await?; + let hotkey_ss58 = hotkey.to_ss58(); + + let row = client + .query_opt( + "SELECT hotkey, stake, is_active, last_seen, peer_id, registered_at + FROM validators WHERE hotkey = $1", + &[&hotkey_ss58], + ) + .await?; + + match row { + Some(row) => Ok(Some(StoredValidator { + hotkey_ss58: row.get("hotkey"), + stake: row.get("stake"), + is_active: row.get("is_active"), + last_seen: row.get("last_seen"), + peer_id: row.get("peer_id"), + registered_at: row.get("registered_at"), + })), + None => Ok(None), + } +} + +pub async fn list_active_validators(pool: &PgPool) -> Result> { + let client = pool.get().await?; + + let rows = client + .query( + "SELECT hotkey, stake, is_active, last_seen, peer_id, registered_at + FROM validators + WHERE is_active = true + ORDER BY stake DESC", + &[], + ) + .await?; + + let mut validators = Vec::with_capacity(rows.len()); + for row in &rows { + validators.push(StoredValidator { + hotkey_ss58: row.get("hotkey"), + stake: row.get("stake"), + is_active: row.get("is_active"), + last_seen: row.get("last_seen"), + peer_id: row.get("peer_id"), + registered_at: row.get("registered_at"), + }); + } + + Ok(validators) +} + +pub async fn list_all_validators(pool: &PgPool) -> Result> { + let client = pool.get().await?; + + let rows = client + .query( + "SELECT hotkey, stake, is_active, last_seen, peer_id, registered_at + FROM validators + ORDER BY stake DESC", + &[], + ) + .await?; + + let mut validators = Vec::with_capacity(rows.len()); + for row in &rows { + validators.push(StoredValidator { + hotkey_ss58: row.get("hotkey"), + stake: row.get("stake"), + is_active: row.get("is_active"), + last_seen: row.get("last_seen"), + peer_id: row.get("peer_id"), + registered_at: row.get("registered_at"), + }); + } + + Ok(validators) +} + +pub async fn deactivate_validator(pool: &PgPool, hotkey: &Hotkey) -> Result { + let client = pool.get().await?; + let hotkey_ss58 = hotkey.to_ss58(); + + let rows_affected = client + .execute( + "UPDATE validators SET is_active = false WHERE hotkey = $1", + &[&hotkey_ss58], + ) + .await?; + + Ok(rows_affected > 0) +} + +pub async fn update_last_seen(pool: &PgPool, hotkey: &Hotkey) -> Result<()> { + let client = pool.get().await?; + let hotkey_ss58 = hotkey.to_ss58(); + + client + .execute( + "UPDATE validators SET last_seen = NOW() WHERE hotkey = $1", + &[&hotkey_ss58], + ) + .await?; + + Ok(()) +} + +pub async fn count_active_validators(pool: &PgPool) -> Result { + let client = pool.get().await?; + + let row = client + .query_one( + "SELECT COUNT(*) as count FROM validators WHERE is_active = true", + &[], + ) + .await?; + + Ok(row.get("count")) +} + +pub async fn delete_validator(pool: &PgPool, hotkey: &Hotkey) -> Result { + let client = pool.get().await?; + let hotkey_ss58 = hotkey.to_ss58(); + + let rows_affected = client + .execute("DELETE FROM validators WHERE hotkey = $1", &[&hotkey_ss58]) + .await?; + + Ok(rows_affected > 0) +} + +pub async fn hotkey_from_ss58(ss58: &str) -> Result { + Hotkey::from_ss58(ss58) + .ok_or_else(|| StorageError::InvalidData(format!("invalid SS58 address: {}", ss58))) +} diff --git a/storage/src/traits.rs b/storage/src/traits.rs index b5f06e0e..2c7ce051 100644 --- a/storage/src/traits.rs +++ b/storage/src/traits.rs @@ -14,6 +14,24 @@ pub enum StorageError { InvalidData(String), } +impl From for StorageError { + fn from(err: tokio_postgres::Error) -> Self { + StorageError::Database(err.to_string()) + } +} + +impl From for StorageError { + fn from(err: deadpool_postgres::PoolError) -> Self { + StorageError::Database(err.to_string()) + } +} + +impl From for StorageError { + fn from(err: serde_json::Error) -> Self { + StorageError::Serialization(err.to_string()) + } +} + pub type Result = std::result::Result; pub trait ChallengeStorage: Send + Sync {