diff --git a/crates/findex/.rustfmt.toml b/crates/findex/.rustfmt.toml new file mode 100644 index 00000000..6873ebc3 --- /dev/null +++ b/crates/findex/.rustfmt.toml @@ -0,0 +1,43 @@ +# Specifies which edition is used by the parser. +# Default value: "2015" +edition = "2021" + +# How imports should be grouped into use statements. Imports will be merged or split to the configured level of granularity. +# Default value: Preserve +imports_granularity = "Crate" + +# Format the metavariable matching patterns in macros. +# Default value: false +format_macro_matchers = true + +# Format string literals where necessary +# Default value: false +format_strings = true + +# Reorder impl items. type and const are put first, then macros and methods. +# Default value: false +reorder_impl_items = true + +# Controls the strategy for how imports are grouped together. +# Default value: Preserve +group_imports = "StdExternalCrate" + +# Add trailing semicolon after break, continue and return +# Default value: true +trailing_semicolon = true + +# Enable unstable features on the unstable channel. +# Default value: false +unstable_features = true + +# Use field initialize shorthand if possible. +# Default value: false +use_field_init_shorthand = true + +# Break comments to fit on the line +# Default value: false +wrap_comments = true + +# Which version of the formatting rules to use. Version::One is backwards-compatible with Rustfmt 1.0. Other versions are only backwards compatible within a major version number. +# Default value: "One" +version = "Two" diff --git a/crates/findex/.vscode/settings.json b/crates/findex/.vscode/settings.json new file mode 100644 index 00000000..a2044ce6 --- /dev/null +++ b/crates/findex/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + // change this to change the features + // "rest-interface" + "rust-analyzer.cargo.features": [ + "redis-interface" + ], +} diff --git a/crates/findex/Cargo.toml b/crates/findex/Cargo.toml index 99475fc9..10bcb66c 100644 --- a/crates/findex/Cargo.toml +++ b/crates/findex/Cargo.toml @@ -55,17 +55,24 @@ wasm = [ "serialization", ] -redis-interface = ["redis"] +redis-interface = ["rand_core", "rand_chacha", "redis"] rest-interface = [ "base64", "cosmian_crypto_core/ser", "reqwest", "serialization", + "rand_chacha", + "rand_core", ] sqlite-interface = ["rusqlite"] [dependencies] +findex = { git = "https://www.github.com/Cosmian/findex", branch = "test/cloudproof_v7", features = [ + "test-utils", +] } # Optional dependencies +rand_chacha = { version = "0.3.1", optional = true } +rand_core = { version = "0.6.4", optional = true } actix-rt = { version = "2.9", optional = true } async-trait = { workspace = true } base64 = { workspace = true, optional = true } @@ -78,22 +85,22 @@ lazy_static = { version = "1.4.0", optional = true } log = { version = "0.4.20", optional = true } pyo3 = { workspace = true, optional = true } rand = { workspace = true, optional = true } -redis = { version = "0.23", features = [ +redis = { version = "0.27.5", features = [ "aio", "ahash", "script", - "connection-manager", "tokio-comp", + "connection-manager", ], optional = true } reqwest = { version = "0.11.24", default-features = false, optional = true } rusqlite = { version = "0.31.0", features = ["bundled"], optional = true } serde = { version = "1.0", features = ["derive"] } -tokio = { version = "1.36.0", optional = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } wasm-bindgen = { workspace = true, optional = true } wasm-bindgen-futures = { version = "0.4.41", optional = true } wasm-logger = { version = "0.2.0", optional = true } +tokio = "1.41.0" [dev-dependencies] actix-rt = "2.9" diff --git a/crates/findex/src/db_interfaces/redis.rs b/crates/findex/src/db_interfaces/redis.rs index 8d99989c..f67531b8 100644 --- a/crates/findex/src/db_interfaces/redis.rs +++ b/crates/findex/src/db_interfaces/redis.rs @@ -1,332 +1,159 @@ //! Redis implementation of the Findex backends. - -use std::collections::HashMap; - -use async_trait::async_trait; -use cosmian_findex::{ - CoreError as FindexCoreError, DbInterface, EncryptedValue, Token, TokenToEncryptedValueMap, - TokenWithEncryptedValueList, Tokens, ENTRY_LENGTH, LINK_LENGTH, -}; -use redis::{aio::ConnectionManager, pipe, AsyncCommands, Script}; -use tracing::trace; - use crate::db_interfaces::DbInterfaceError; +use findex::MemoryADT; +use redis::{Commands, Connection}; +use std::{ + fmt::{self, Debug, Display}, + hash::Hash, + marker::PhantomData, + ops::Deref, + sync::{Arc, Mutex}, +}; -/// The length of the prefix of the table name in bytes -/// 0x00ee for the entry table -/// 0x00ef for the chain table -const TABLE_PREFIX_LENGTH: usize = 2; - -#[derive(Copy, Clone)] -enum FindexTable { - Entry = 0xee, - Chain = 0xef, -} - -/// Generate a key for the entry table or chain table -fn build_key(table: FindexTable, uid: &[u8]) -> Vec { - [&[0x00, table as u8], uid].concat() -} - -pub struct RedisEntryBackend { - manager: ConnectionManager, - upsert_script: Script, +#[derive(Clone)] +pub struct RedisBackend { + connection: Arc>, + write_script_hash: String, + _marker_adr: PhantomData
, } -impl std::fmt::Debug for RedisEntryBackend { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RedisEntryBackend").finish() +// Args that are passed to the LUA script are, in order: +// 1. Guard address. +// 2. Guard value. +// 3. Vector length. +// 4+. Vector elements (address, word). +const GUARDED_WRITE_LUA_SCRIPT: &str = r#" +local guard_address = ARGV[1] +local guard_value = ARGV[2] +local length = ARGV[3] + +local value = redis.call('GET',ARGV[1]) + +-- compare the value of the guard to the currently stored value +if((value==false) or (not(value == false) and (guard_value == value))) then + -- guard passed, loop over bindings and insert them + for i = 4,(length*2)+3,2 + do + redis.call('SET', ARGV[i], ARGV[i+1]) + end +end +return value +"#; + +const POISONED_LOCK_ERROR_MSG: &str = "Poisoned lock error"; + +impl Debug for RedisBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RedisMemory") + .field("connection", &"") // We don't want to debug the actual connection + .field("Addr type", &self._marker_adr) + .finish() } } -/// The conditional upsert script used to only update a table if the -/// indexed value matches ARGV[2]. When the value does not match, the -/// indexed value is returned. -const CONDITIONAL_UPSERT_SCRIPT: &str = r" - local value=redis.call('GET',ARGV[1]) - if((value==false) or (not(value == false) and (ARGV[2] == value))) then - redis.call('SET', ARGV[1], ARGV[3]) - return - else - return value - end; - "; - -impl RedisEntryBackend { +impl RedisBackend { /// Connects to a Redis server using the given URL. pub async fn connect(url: &str) -> Result { - let client = redis::Client::open(url)?; - let manager = ConnectionManager::new(client).await?; - - Ok(Self { - manager, - upsert_script: Script::new(CONDITIONAL_UPSERT_SCRIPT), - }) - } - - /// Connects to a Redis server with a `ConnectionManager`. - pub async fn connect_with_manager( - manager: ConnectionManager, - ) -> Result { + let mut connection = match redis::Client::open(url) { + Ok(client) => match client.get_connection() { + Ok(con) => con, + Err(e) => { + panic!("Failed to connect to Redis: {}", e); + } + }, + Err(e) => panic!("Error creating redis client: {:?}", e), + }; + let write_script_hash = redis::cmd("SCRIPT") + .arg("LOAD") + .arg(GUARDED_WRITE_LUA_SCRIPT) + .query(&mut connection)?; Ok(Self { - manager, - upsert_script: Script::new(CONDITIONAL_UPSERT_SCRIPT), + connection: Arc::new(Mutex::new(connection)), + write_script_hash, + _marker_adr: PhantomData, }) } - /// Clear all indexes - /// - /// # Warning - /// This is definitive - pub async fn clear_indexes(&self) -> Result<(), DbInterfaceError> { - redis::cmd("FLUSHDB") - .query_async(&mut self.manager.clone()) - .await?; + pub fn clear_indexes(&self) -> Result<(), redis::RedisError> { + let safe_connection = &mut *self.connection.lock().expect(POISONED_LOCK_ERROR_MSG); + redis::cmd("FLUSHDB").exec(safe_connection)?; Ok(()) } } -#[async_trait(?Send)] -impl DbInterface for RedisEntryBackend { - type Error = DbInterfaceError; - - async fn dump_tokens(&self) -> Result { - let keys: Vec> = self - .manager - .clone() - .keys(build_key(FindexTable::Entry, b"*")) - .await?; - - trace!("dumping {} keywords (ET+CT)", keys.len()); - - keys.iter() - .filter_map(|v| { - if v[..TABLE_PREFIX_LENGTH] == [0x00, FindexTable::Entry as u8] { - Some(Token::try_from(&v[TABLE_PREFIX_LENGTH..]).map_err(Self::Error::Findex)) - } else { - None - } - }) - .collect() - } - - async fn fetch( - &self, - tokens: Tokens, - ) -> Result, Self::Error> { - trace!("fetch_entry_table num keywords: {}:", tokens.len()); - - if tokens.is_empty() { - return Ok(Default::default()); - } - - // Collect into a vector to fix the order. - let uids = tokens.into_iter().collect::>(); +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RedisMemoryError(String); - let redis_keys = uids - .iter() - .map(|uid| build_key(FindexTable::Entry, uid)) - .collect::>(); +impl std::error::Error for RedisMemoryError {} - let values: Vec> = self.manager.clone().mget(redis_keys).await?; - - // Zip and filter empty values out. - let res = uids - .into_iter() - .zip(values) - .filter_map(|(k, v)| { - if v.is_empty() { - None - } else { - Some(EncryptedValue::try_from(v.as_slice()).map(|v| (k, v))) - } - }) - .collect::, FindexCoreError>>()?; - - trace!("fetch_entry_table non empty tuples len: {}", res.len()); - - Ok(res.into()) - } - - async fn upsert( - &self, - old_values: TokenToEncryptedValueMap, - new_values: TokenToEncryptedValueMap, - ) -> Result, Self::Error> { - trace!("upsert_entry_table num keywords {:?}", new_values.len()); - - let mut rejected = HashMap::with_capacity(new_values.len()); - for (uid, new_value) in new_values { - let new_value = Vec::from(&new_value); - let old_value = old_values.get(&uid).map(Vec::from).unwrap_or_default(); - let key = build_key(FindexTable::Entry, &uid); - - let indexed_value: Vec<_> = self - .upsert_script - .arg(key) - .arg(old_value) - .arg(new_value) - .invoke_async(&mut self.manager.clone()) - .await?; - - if !indexed_value.is_empty() { - let encrypted_value = EncryptedValue::try_from(indexed_value.as_slice())?; - rejected.insert(uid, encrypted_value); - } - } - - trace!("upsert_entry_table rejected: {}", rejected.len()); - - Ok(rejected.into()) - } - - async fn insert( - &self, - items: TokenToEncryptedValueMap, - ) -> Result<(), Self::Error> { - let mut pipe = pipe(); - for (token, value) in &*items { - pipe.set(build_key(FindexTable::Entry, token), Vec::from(value)); - } - pipe.atomic() - .query_async(&mut self.manager.clone()) - .await - .map_err(Self::Error::from) - } - - async fn delete(&self, entry_uids: Tokens) -> Result<(), Self::Error> { - let mut pipeline = pipe(); - for uid in entry_uids { - pipeline.del(build_key(FindexTable::Entry, &uid)); - } - pipeline - .atomic() - .query_async(&mut self.manager.clone()) - .await - .map_err(Self::Error::from) +impl From for RedisMemoryError { + fn from(err: redis::RedisError) -> Self { + Self(err.to_string()) } } -pub struct RedisChainBackend(ConnectionManager); - -impl std::fmt::Debug for RedisChainBackend { +impl Display for RedisMemoryError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("RedisChainBackend").finish() + write!(f, "Redis Memory Error: {}", self.0) } } -impl RedisChainBackend { - /// Connects to a Redis server using the given `url`. - pub async fn connect(url: &str) -> Result { - let client = redis::Client::open(url)?; - let manager = ConnectionManager::new(client).await?; - Ok(Self(manager)) - } - - /// Connects to a Redis server with a `ConnectionManager`. - pub async fn connect_with_manager( - manager: ConnectionManager, - ) -> Result { - Ok(Self(manager)) - } - - /// Clear all indexes - /// - /// # Warning - /// This is definitive - pub async fn clear_indexes(&self) -> Result<(), DbInterfaceError> { - redis::cmd("FLUSHDB") - .query_async(&mut self.0.clone()) - .await?; - Ok(()) - } -} - -#[async_trait(?Send)] -impl DbInterface for RedisChainBackend { - type Error = DbInterfaceError; - - async fn dump_tokens(&self) -> Result { - panic!("No token dump is performed for the Chain Table.") - } - - async fn fetch( +impl< + Address: Send + Sync + Hash + Eq + Debug + Clone + Deref, + const ADDRESS_LENGTH: usize, + const WORD_LENGTH: usize, + > MemoryADT for RedisBackend +{ + type Address = Address; + type Error = RedisMemoryError; + type Word = [u8; WORD_LENGTH]; + + async fn batch_read( &self, - tokens: Tokens, - ) -> Result, Self::Error> { - trace!("fetch_entry_table num keywords: {}:", tokens.len()); - if tokens.is_empty() { - return Ok(Default::default()); - } - - let uids = tokens.into_iter().collect::>(); - let redis_keys = uids - .iter() - .map(|uid| build_key(FindexTable::Chain, uid)) - .collect::>(); - - let values: Vec> = self.0.clone().mget(redis_keys).await?; - - // Zip and filter empty values out. - let res = uids - .into_iter() - .zip(values) - .filter(|(_, v)| !v.is_empty()) - .map(|(k, v)| Ok((k, EncryptedValue::try_from(v.as_slice())?))) - .collect::, Self::Error>>()?; - - trace!("fetch_entry_table non empty tuples len: {}", res.len()); - - Ok(res.into()) - } - - async fn upsert( - &self, - _old_values: TokenToEncryptedValueMap, - _new_values: TokenToEncryptedValueMap, - ) -> Result, Self::Error> { - panic!("No token upsert is performed for the Chain Table.") - } - - async fn insert( - &self, - items: TokenToEncryptedValueMap, - ) -> Result<(), Self::Error> { - let mut pipe = pipe(); - for (k, v) in &*items { - pipe.set(build_key(FindexTable::Chain, k), Vec::from(v)); - } - pipe.atomic() - .query_async(&mut self.0.clone()) - .await + addresses: Vec
, + ) -> Result>, Self::Error> { + let safe_connection = &mut *self.connection.lock().expect(POISONED_LOCK_ERROR_MSG); + let refs: Vec<&[u8; ADDRESS_LENGTH]> = + addresses.iter().map(|address| address.deref()).collect(); + safe_connection + .mget::<_, Vec<_>>(&refs) .map_err(Self::Error::from) } - async fn delete(&self, chain_uids: Tokens) -> Result<(), Self::Error> { - let mut pipeline = pipe(); - for uid in chain_uids { - pipeline.del(build_key(FindexTable::Chain, &uid)); + async fn guarded_write( + &self, + guard: (Self::Address, Option), + bindings: Vec<(Self::Address, Self::Word)>, + ) -> Result, Self::Error> { + let mut safe_connection = self.connection.lock().expect(POISONED_LOCK_ERROR_MSG); + let (guard_address, guard_value) = guard; + let mut cmd = redis::cmd("EVALSHA") + .arg(self.write_script_hash.clone()) + .arg(0) + .arg(&*guard_address) + .clone(); // Why cloning is necessary : https://stackoverflow.com/questions/64728534/how-to-resolve-creates-a-temporary-variable-which-is-freed-while-still-in-use + cmd = if let Some(byte_array) = guard_value { + cmd.arg(&byte_array).arg(bindings.len()).clone() + } else { + cmd.arg("false".to_string()).arg(bindings.len()).clone() + }; + for (address, word) in bindings { + cmd = cmd.arg(&*address).arg(&word).clone(); } - pipeline - .atomic() - .query_async(&mut self.0.clone()) - .await - .map_err(Self::Error::from) + cmd.query(&mut safe_connection).map_err(|e| e.into()) } } #[cfg(test)] mod tests { - use std::collections::HashSet; - - use cosmian_crypto_core::{CsRng, Nonce}; - use cosmian_findex::{MAC_LENGTH, NONCE_LENGTH}; - use rand::{RngCore, SeedableRng}; + use findex::{ + test_guarded_write_concurrent, test_single_write_and_read, test_wrong_guard, Address, + }; + use futures::executor::block_on; use serial_test::serial; use super::*; - use crate::{db_interfaces::tests::test_backend, logger::log_init, Configuration}; pub fn get_redis_url() -> String { if let Ok(var_env) = std::env::var("REDIS_HOST") { @@ -336,127 +163,60 @@ mod tests { } } - #[actix_rt::test] - #[serial] - async fn test_upsert_conflict() -> Result<(), DbInterfaceError> { - log_init(); - trace!("Test Redis upsert."); - - let mut rng = CsRng::from_entropy(); + const TEST_ADR_WORD_LENGTH: usize = 16; - // Generate 333 random UIDs. - let mut uids = HashSet::with_capacity(333); - while uids.len() < 333 { - let mut uid = [0_u8; Token::LENGTH]; - rng.fill_bytes(&mut uid); - uids.insert(uid); - } - let uids = uids.into_iter().collect::>(); - - let original_value = EncryptedValue { - nonce: Nonce::from([0; NONCE_LENGTH]), - ciphertext: [1; ENTRY_LENGTH], - tag: [0; MAC_LENGTH], - }; - let changed_value = EncryptedValue { - nonce: Nonce::from([0; NONCE_LENGTH]), - ciphertext: [2; ENTRY_LENGTH], - tag: [0; MAC_LENGTH], - }; - let new_value = EncryptedValue { - nonce: Nonce::from([0; NONCE_LENGTH]), - ciphertext: [2; ENTRY_LENGTH], - tag: [0; MAC_LENGTH], - }; - - let url = get_redis_url(); - let et = RedisEntryBackend::connect(&url).await?; - et.clear_indexes().await?; - - // First user upserts `original_value` to all the UIDs. - let rejected = et - .upsert( - HashMap::new().into(), - uids.iter() - .map(|k| (Token::from(*k), original_value.clone())) - .collect(), - ) - .await?; - assert!(rejected.is_empty()); - - let et_length = et.dump_tokens().await?.len(); - trace!("Entry Table length: {et_length}"); + async fn init_test_redis_db( + ) -> RedisBackend, TEST_ADR_WORD_LENGTH> { + RedisBackend::, TEST_ADR_WORD_LENGTH>::connect( + &get_redis_url(), + ) + .await + .unwrap() + } - // Another user upserts `changed_value` to 111 UIDs. - let rejected = et - .upsert( - uids.iter() - .map(|k| (Token::from(*k), original_value.clone())) - .collect(), - uids.iter() - .enumerate() - .map(|(idx, k)| { - if idx % 3 == 0 { - (Token::from(*k), changed_value.clone()) - } else { - (Token::from(*k), original_value.clone()) - } - }) - .collect(), - ) - .await?; - assert!(rejected.is_empty()); + #[actix_rt::test] + #[serial] + async fn test_db_flush() -> Result<(), DbInterfaceError> { + let memory = init_test_redis_db().await; - let et_length = et.dump_tokens().await?.len(); - println!("Entry Table length: {et_length}"); + let addr = Address::from([1; 16]); + let word = [2; 16]; - // The first user upserts `new_value` to all the UIDs from `original_value`. 111 - // UIDs should conflict. - let rejected = et - .upsert( - uids.iter() - .map(|k| (Token::from(*k), original_value.clone())) - .collect(), - uids.iter() - .map(|k| (Token::from(*k), new_value.clone())) - .collect(), - ) - .await?; - assert_eq!(111, rejected.len()); - for prev_value in rejected.values() { - assert_eq!(prev_value, &changed_value); - } + block_on(memory.guarded_write((addr.clone(), None), vec![(addr.clone(), word)])).unwrap(); - // The firs user upserts `new_value` to the 111 rejected UIDs from - // `changed_value`. - let rejected = et - .upsert( - rejected.clone(), - rejected.keys().map(|k| (*k, new_value.clone())).collect(), - ) - .await?; - assert_eq!(0, rejected.len()); + let result = block_on(memory.batch_read(vec![addr.clone()])).unwrap(); + assert_eq!(result, vec![Some([2; 16])]); + memory.clear_indexes().unwrap(); + let result = block_on(memory.batch_read(vec![addr])).unwrap(); + assert_eq!(result, vec![None]); Ok(()) } #[actix_rt::test] #[serial] - async fn test_redis_backend() { - log_init(); - trace!("Test Redis backend."); - - let url = get_redis_url(); + async fn test_rw_seq() -> Result<(), DbInterfaceError> { + let memory = init_test_redis_db().await; + memory.clear_indexes().unwrap(); + block_on(test_single_write_and_read(&memory, rand::random())); + Ok(()) + } - // Empty the Redis to prevent old ciphertexts to cause error during compacting. - let client = redis::Client::open(url.as_str()).unwrap(); - let mut manager = ConnectionManager::new(client).await.unwrap(); - redis::cmd("FLUSHDB") - .query_async::<_, ()>(&mut manager) - .await - .unwrap(); + #[actix_rt::test] + #[serial] + async fn test_guard_seq() -> Result<(), DbInterfaceError> { + let memory = init_test_redis_db().await; + memory.clear_indexes().unwrap(); + block_on(test_wrong_guard(&memory, rand::random())); + Ok(()) + } - let config = Configuration::Redis(url.clone(), url.clone()); - test_backend(config).await; + #[actix_rt::test] + #[serial] + async fn test_rw_ccr() -> Result<(), DbInterfaceError> { + let memory = init_test_redis_db().await; + memory.clear_indexes().unwrap(); + block_on(test_guarded_write_concurrent(memory, rand::random())); + Ok(()) } } diff --git a/crates/findex/src/instantiation/db_config.rs b/crates/findex/src/instantiation/db_config.rs index 7eb501a4..e059722a 100644 --- a/crates/findex/src/instantiation/db_config.rs +++ b/crates/findex/src/instantiation/db_config.rs @@ -35,7 +35,7 @@ pub enum Configuration { /// Redis DB interface requests an URL to a valid instance. #[cfg(feature = "redis-interface")] - Redis(String, String), + Redis(String), /// WASM DB interface requests WASM functions corresponding to the APIs used by /// the Entry/Chain tables. diff --git a/crates/findex/src/instantiation/findex.rs b/crates/findex/src/instantiation/findex.rs index fbf1cec2..798b2d54 100644 --- a/crates/findex/src/instantiation/findex.rs +++ b/crates/findex/src/instantiation/findex.rs @@ -1,14 +1,25 @@ use std::{ collections::{HashMap, HashSet}, + convert::Infallible, future::Future, }; +use cosmian_crypto_core::RandomFixedSizeCBytes; use cosmian_findex::{ ChainTable, Data, DxEnc, EntryTable, Error as FindexError, Findex, Index, IndexedValue, IndexedValueToKeywordsMap, Keyword, KeywordToDataMap, Keywords, Label, UserKey, ENTRY_LENGTH, LINK_LENGTH, }; +use findex::{ + dummy_decode, dummy_encode, Address, Findex as Findex_v7, IndexADT, Secret, Value, + ADDRESS_LENGTH, +}; +use rand_chacha::ChaChaRng; +use tracing::error; + +const WORD_LENGTH: usize = 16; + #[cfg(feature = "ffi")] use crate::db_interfaces::custom::ffi::{FfiChainBackend, FfiEntryBackend}; #[cfg(feature = "python")] @@ -16,12 +27,14 @@ use crate::db_interfaces::custom::python::{PythonChainBackend, PythonEntryBacken #[cfg(feature = "wasm")] use crate::db_interfaces::custom::wasm::{WasmChainBackend, WasmEntryBackend}; #[cfg(feature = "redis-interface")] -use crate::db_interfaces::redis::{RedisChainBackend, RedisEntryBackend}; +use crate::db_interfaces::redis::RedisBackend; #[cfg(feature = "rest-interface")] use crate::db_interfaces::rest::{RestChainBackend, RestEntryBackend, RestParameters}; #[cfg(feature = "sqlite-interface")] use crate::db_interfaces::sqlite::{SqlChainBackend, SqlEntryBackend}; use crate::{db_interfaces::DbInterfaceError, Configuration}; +#[cfg(feature = "redis-interface")] +use rand_core::SeedableRng; /// Wrapper around Findex instantiations used for static dispatch. #[derive(Debug)] @@ -37,10 +50,11 @@ pub enum InstantiatedFindex { #[cfg(feature = "redis-interface")] Redis( - Findex< - DbInterfaceError, - EntryTable, - ChainTable, + Findex_v7< + WORD_LENGTH, + Value, + Infallible, + RedisBackend, WORD_LENGTH>, >, ), @@ -79,6 +93,16 @@ pub enum InstantiatedFindex { >, ), } +/// Temporary enum for Findex migration +// #[deprecated( +// since = "7.0.0", +// note = "This enum is temporary and will be removed after migration to new Findex version" +// )] +// #[derive(Debug)] +// pub enum SearchResult { +// Old(KeywordToDataMap), +// Recent(HashMap>), +// } impl InstantiatedFindex { /// Wrapper around Findex [`new`](Index::new) for static dispatch. @@ -91,9 +115,12 @@ impl InstantiatedFindex { )), #[cfg(feature = "redis-interface")] - Configuration::Redis(entry_params, chain_params) => Self::Redis(Findex::new( - EntryTable::setup(RedisEntryBackend::connect(&entry_params).await?), - ChainTable::setup(RedisChainBackend::connect(&chain_params).await?), + Configuration::Redis(entry_params) => Self::Redis(Findex_v7::new( + Secret::random(&mut ChaChaRng::from_entropy()), + RedisBackend::, WORD_LENGTH>::connect(&entry_params) + .await?, + dummy_encode::, + dummy_decode, )), #[cfg(feature = "rest-interface")] @@ -127,14 +154,19 @@ impl InstantiatedFindex { Ok(findex) } - /// Wrapper around Findex [`keygen`](Index::keygen) for static dispatch. - #[must_use] + #[deprecated( + since = "7.0.0", + note = "keygen is no longer supported in the new Findex version. This is a temporary placeholder until removal." + )] pub fn keygen(&self) -> UserKey { match self { #[cfg(feature = "sqlite-interface")] Self::Sqlite(findex) => findex.keygen(), #[cfg(feature = "redis-interface")] - Self::Redis(findex) => findex.keygen(), + Self::Redis(findex) => { + error!("Keygen is deprecated and not supported in the new Findex version."); + UserKey::new(&mut ChaChaRng::from_entropy()) + } #[cfg(feature = "ffi")] Self::Ffi(findex) => findex.keygen(), #[cfg(feature = "python")] @@ -167,7 +199,9 @@ impl InstantiatedFindex { #[cfg(feature = "sqlite-interface")] Self::Sqlite(findex) => findex.search(key, label, keywords, interrupt).await, #[cfg(feature = "redis-interface")] - Self::Redis(findex) => findex.search(key, label, keywords, interrupt).await, + Self::Redis(findex) => Ok(todo!( + "SearchResult::Recent(findex.search(keywords).await.unwrap())" + )), #[cfg(feature = "wasm")] Self::Wasm(findex) => findex.search(key, label, keywords, interrupt).await, } @@ -184,7 +218,9 @@ impl InstantiatedFindex { #[cfg(feature = "sqlite-interface")] Self::Sqlite(findex) => findex.add(key, label, additions).await, #[cfg(feature = "redis-interface")] - Self::Redis(findex) => findex.add(key, label, additions).await, + Self::Redis(findex) => { + todo!("TBD") + } #[cfg(feature = "ffi")] Self::Ffi(findex) => findex.add(key, label, additions).await, #[cfg(feature = "python")] @@ -207,7 +243,7 @@ impl InstantiatedFindex { #[cfg(feature = "sqlite-interface")] Self::Sqlite(findex) => findex.delete(key, label, deletions).await, #[cfg(feature = "redis-interface")] - Self::Redis(findex) => findex.delete(key, label, deletions).await, + Self::Redis(findex) => todo!("TBD"), #[cfg(feature = "ffi")] Self::Ffi(findex) => findex.delete(key, label, deletions).await, #[cfg(feature = "python")] @@ -219,7 +255,10 @@ impl InstantiatedFindex { } } - /// Wrapper around Findex [`compact`](Findex::compact) for static dispatch. + #[deprecated( + since = "7.0.0", + note = "compact is no longer supported in the new Findex version. This is a temporary placeholder until removal." + )] pub async fn compact< F: Future, String>>, Filter: Fn(HashSet) -> F, @@ -247,17 +286,9 @@ impl InstantiatedFindex { .await } #[cfg(feature = "redis-interface")] - Self::Redis(findex) => { - findex - .compact( - old_key, - new_key, - old_label, - new_label, - compacting_rate, - data_filter, - ) - .await + Self::Redis(_findex) => { + error!("This is not supposed to be called on Redis."); + Ok(()) } #[cfg(feature = "ffi")] Self::Ffi(findex) => {