Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
f510359
Breaking change: Make Rows and Row API more consistent.
ignatz Aug 1, 2024
a68f042
libsql: release v0.5.0
LucioFranco Aug 2, 2024
0917f84
introduce NamespaceConfigurator
MarinPostma Aug 2, 2024
f9daa9e
add configurators to namespace store
MarinPostma Aug 2, 2024
8b377a6
add shcema configurator
MarinPostma Aug 3, 2024
978dd71
instanciate namesapces from configurators
MarinPostma Aug 3, 2024
907f2f9
pass configurators to NamespaceStore::new
MarinPostma Aug 5, 2024
fd03144
decoupled namespace configurators
MarinPostma Aug 5, 2024
0647711
legacy configurators
MarinPostma Aug 6, 2024
76558e8
fix behaviour of VACUUM for vector indices to make rowid consistent
sivukhin Aug 6, 2024
853143d
build bundles
sivukhin Aug 6, 2024
2115277
fix bug
sivukhin Aug 6, 2024
b12431c
configure durable wal
MarinPostma Aug 6, 2024
066f152
configure libsql_wal
MarinPostma Aug 6, 2024
b5dba72
partial implmentation of LibsqlWalReplicationConfigurator
MarinPostma Aug 6, 2024
ded5ba7
fmt + remove dbgs
MarinPostma Aug 6, 2024
e5b8c31
comment out libsql-wal replica configurator
MarinPostma Aug 6, 2024
6e7fb9f
restore encryption config
MarinPostma Aug 6, 2024
71c50e1
enable more windows CI
LucioFranco Aug 6, 2024
95be43c
Merge pull request #1642 from tursodatabase/lucio/windows-ci2
LucioFranco Aug 6, 2024
9d15ebf
Merge pull request #1641 from tursodatabase/vector-search-fix-vacuum
sivukhin Aug 6, 2024
07dc9b5
add LibsqlWalFooter
MarinPostma Aug 7, 2024
4069036
cancel query when request is dropped
MarinPostma Aug 7, 2024
5924766
write footer on checkpoint
MarinPostma Aug 7, 2024
d11ec01
downgrade debug to trace
MarinPostma Aug 7, 2024
fc178de
add query canceled metric
MarinPostma Aug 7, 2024
97652c1
Merge pull request #1628 from tursodatabase/lucio/0.5.0-libsql
LucioFranco Aug 7, 2024
351e6eb
add simple integration test
sivukhin Aug 7, 2024
3e56d28
fix potential crash in fts5
sivukhin Aug 7, 2024
7ed1468
build bundles
sivukhin Aug 7, 2024
0511ec3
Merge pull request #1643 from tursodatabase/libsql-footer
MarinPostma Aug 7, 2024
9595315
init cancel bomb berfore query exec
MarinPostma Aug 7, 2024
8db5ea8
Merge pull request #1644 from tursodatabase/query-cancel
MarinPostma Aug 7, 2024
0d41105
cargo fmt
sivukhin Aug 7, 2024
4085a0d
libsql: downgrade failed prefetch log to debug
LucioFranco Aug 7, 2024
dd80e69
Merge pull request #1646 from tursodatabase/lucio/downgrade-log
haaawk Aug 8, 2024
b0bc6eb
publish sqld debug builds to the separate image name
sivukhin Aug 8, 2024
5351b68
Merge pull request #1648 from tursodatabase/fix-debug-build-publish
MarinPostma Aug 8, 2024
51b1b49
remove digests artifacts from debug build step
sivukhin Aug 8, 2024
b97d37d
Merge pull request #1649 from tursodatabase/fix-debug-build-remove-di…
sivukhin Aug 8, 2024
9120ce6
Merge pull request #1623 from ignatz/align_row_and_rows
haaawk Aug 8, 2024
ec7bca5
Fix JSON f64 precision
wyhaya Aug 8, 2024
e7de104
Merge pull request #1645 from tursodatabase/fix-fts5-crash
sivukhin Aug 8, 2024
2c40df2
Merge pull request #1647 from wyhaya/main
haaawk Aug 8, 2024
5eeba43
improve random row selection
sivukhin Aug 8, 2024
4b3e7e7
fix random row selection query to have db name
sivukhin Aug 8, 2024
0b41b5a
build bundles
sivukhin Aug 8, 2024
f80444a
fix test
sivukhin Aug 8, 2024
d66b138
Merge pull request #1651 from tursodatabase/vector-search-improve-ran…
sivukhin Aug 8, 2024
8441108
allow vector index to be partial
sivukhin Aug 8, 2024
ec996fa
build bundles
sivukhin Aug 8, 2024
e78fb34
Merge pull request #1632 from tursodatabase/libsql-wal-integration
MarinPostma Aug 9, 2024
8077948
Merge pull request #1653 from tursodatabase/vector-search-allow-parti…
sivukhin Aug 9, 2024
c8d2ea0
Ensure that blobs match the length of arrays they're being deserializ…
ignatz Aug 9, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 1 addition & 15 deletions .github/workflows/publish-server.yml
Original file line number Diff line number Diff line change
Expand Up @@ -118,23 +118,9 @@ jobs:
context: .
platforms: ${{ env.platform }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
outputs: type=image,name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-debug,push-by-digest=true,name-canonical=true,push=true
build-args: |
BUILD_DEBUG=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
-
name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-debug-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1

build-arm64:
permissions: write-all
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,8 @@ jobs:
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo-
- name: check libsql remote
run: cargo check -p libsql --no-default-features -F remote
- name: build libsql all features
run: cargo build -p libsql --all-features

# test-rust-wasm:
# runs-on: ubuntu-latest
Expand Down
12 changes: 6 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ codegen-units = 1
panic = "unwind"

[workspace.dependencies]
rusqlite = { package = "libsql-rusqlite", path = "vendored/rusqlite", version = "0.31", default-features = false, features = [
rusqlite = { package = "libsql-rusqlite", path = "vendored/rusqlite", version = "0.32", default-features = false, features = [
"libsql-experimental",
"column_decltype",
"load_extension",
Expand Down
2 changes: 1 addition & 1 deletion libsql-ffi/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "libsql-ffi"
version = "0.3.0"
version = "0.4.0"
edition = "2021"
build = "build.rs"
license = "MIT"
Expand Down
149 changes: 88 additions & 61 deletions libsql-ffi/bundled/SQLite3MultipleCiphers/src/sqlite3.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
** README.md
** configure
** configure.ac
** ext/fts5/fts5_tokenize.c
** ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java
** ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java
** ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java
Expand Down Expand Up @@ -69,6 +70,7 @@
** src/test2.c
** src/test3.c
** src/test8.c
** src/vacuum.c
** src/vdbe.c
** src/vdbeInt.h
** src/vdbeapi.c
Expand Down Expand Up @@ -155952,6 +155954,10 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate(
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */

#ifndef SQLITE_OMIT_VECTOR
/* #include "vectorIndexInt.h" */
#endif

#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH)

/*
Expand Down Expand Up @@ -156229,6 +156235,27 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
if( rc!=SQLITE_OK ) goto end_of_vacuum;
db->init.iDb = 0;

#ifndef SQLITE_OMIT_VECTOR
// shadow tables for vector index will be populated automatically during CREATE INDEX command
// so we must skip them at this step
if( sqlite3FindTable(db, VECTOR_INDEX_GLOBAL_META_TABLE, zDbMain) != NULL ){
rc = execSqlF(db, pzErrMsg,
"SELECT'INSERT INTO vacuum_db.'||quote(name)"
"||' SELECT*FROM\"%w\".'||quote(name)"
"FROM vacuum_db.sqlite_schema "
"WHERE type='table'AND coalesce(rootpage,1)>0 AND name NOT IN (SELECT name||'_shadow' FROM " VECTOR_INDEX_GLOBAL_META_TABLE ")",
zDbMain
);
}else{
rc = execSqlF(db, pzErrMsg,
"SELECT'INSERT INTO vacuum_db.'||quote(name)"
"||' SELECT*FROM\"%w\".'||quote(name)"
"FROM vacuum_db.sqlite_schema "
"WHERE type='table'AND coalesce(rootpage,1)>0",
zDbMain
);
}
#else
/* Loop through the tables in the main database. For each, do
** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
** the contents to the temporary database.
Expand All @@ -156240,6 +156267,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
"WHERE type='table'AND coalesce(rootpage,1)>0",
zDbMain
);
#endif
assert( (db->mDbFlags & DBFLAG_Vacuum)!=0 );
db->mDbFlags &= ~DBFLAG_Vacuum;
if( rc!=SQLITE_OK ) goto end_of_vacuum;
Expand Down Expand Up @@ -211974,6 +212002,7 @@ int diskAnnCreateIndex(
int type, dims;
u64 maxNeighborsParam, blockSizeBytes;
char *zSql;
const char *zRowidColumnName;
char columnSqlDefs[VECTOR_INDEX_SQL_RENDER_LIMIT]; // definition of columns (e.g. index_key INTEGER BINARY, index_key1 TEXT, ...)
char columnSqlNames[VECTOR_INDEX_SQL_RENDER_LIMIT]; // just column names (e.g. index_key, index_key1, index_key2, ...)
if( vectorIdxKeyDefsRender(pKey, "index_key", columnSqlDefs, sizeof(columnSqlDefs)) != 0 ){
Expand Down Expand Up @@ -212041,6 +212070,7 @@ int diskAnnCreateIndex(
columnSqlDefs,
columnSqlNames
);
zRowidColumnName = "index_key";
}else{
zSql = sqlite3MPrintf(
db,
Expand All @@ -212050,9 +212080,31 @@ int diskAnnCreateIndex(
columnSqlDefs,
columnSqlNames
);
zRowidColumnName = "rowid";
}
rc = sqlite3_exec(db, zSql, 0, 0, 0);
sqlite3DbFree(db, zSql);
if( rc != SQLITE_OK ){
return rc;
}
/*
* vector blobs are usually pretty huge (more than a page size, for example, node block for 1024d f32 embeddings with 1bit compression will occupy ~20KB)
* in this case, main table B-Tree takes on redundant shape where all leaf nodes has only 1 cell
*
* as we have a query which selects random row using OFFSET/LIMIT trick - we will need to read all these leaf nodes pages just to skip them
* so, in order to remove this overhead for random row selection - we creating an index with just single column used
* in this case B-Tree leafs will be full of rowids and the overhead for page reads will be very small
*/
zSql = sqlite3MPrintf(
db,
"CREATE INDEX IF NOT EXISTS \"%w\".%s_shadow_idx ON %s_shadow (%s)",
zDbSName,
zIdxName,
zIdxName,
zRowidColumnName
);
rc = sqlite3_exec(db, zSql, 0, 0, 0);
sqlite3DbFree(db, zSql);
return rc;
}

Expand Down Expand Up @@ -212082,8 +212134,8 @@ static int diskAnnSelectRandomShadowRow(const DiskAnnIndex *pIndex, u64 *pRowid)

zSql = sqlite3MPrintf(
pIndex->db,
"SELECT rowid FROM \"%w\".%s LIMIT 1 OFFSET ABS(RANDOM()) %% MAX((SELECT COUNT(*) FROM %s), 1)",
pIndex->zDbSName, pIndex->zShadow, pIndex->zShadow
"SELECT rowid FROM \"%w\".%s LIMIT 1 OFFSET ABS(RANDOM()) %% MAX((SELECT COUNT(*) FROM \"%w\".%s), 1)",
pIndex->zDbSName, pIndex->zShadow, pIndex->zDbSName, pIndex->zShadow
);
if( zSql == NULL ){
rc = SQLITE_NOMEM_BKPT;
Expand Down Expand Up @@ -213658,11 +213710,6 @@ int vectorF64ParseSqliteBlob(
** VectorIdxParams utilities
****************************************************************************/

// VACUUM creates tables and indices first and only then populate data
// we need to ignore inserts from 'INSERT INTO vacuum.t SELECT * FROM t' statements because
// all shadow tables will be populated by VACUUM process during regular process of table copy
#define IsVacuum(db) ((db->mDbFlags&DBFLAG_Vacuum)!=0)

void vectorIdxParamsInit(VectorIdxParams *pParams, u8 *pBinBuf, int nBinSize) {
assert( nBinSize <= VECTOR_INDEX_PARAMS_BUF_SIZE );

Expand Down Expand Up @@ -214381,10 +214428,6 @@ int vectorIndexDrop(sqlite3 *db, const char *zDbSName, const char *zIdxName) {
// this is done to prevent unrecoverable situations where index were dropped but index parameters deletion failed and second attempt will fail on first step
int rcIdx, rcParams;

if( IsVacuum(db) ){
return SQLITE_OK;
}

assert( zDbSName != NULL );

rcIdx = diskAnnDropIndex(db, zDbSName, zIdxName);
Expand All @@ -214395,10 +214438,6 @@ int vectorIndexDrop(sqlite3 *db, const char *zDbSName, const char *zIdxName) {
int vectorIndexClear(sqlite3 *db, const char *zDbSName, const char *zIdxName) {
assert( zDbSName != NULL );

if( IsVacuum(db) ){
return SQLITE_OK;
}

return diskAnnClearIndex(db, zDbSName, zIdxName);
}

Expand All @@ -214408,7 +214447,7 @@ int vectorIndexClear(sqlite3 *db, const char *zDbSName, const char *zIdxName) {
* this made intentionally in order to natively support upload of SQLite dumps
*
* dump populates tables first and create indices after
* so we must omit them because shadow tables already filled
* so we must omit index refill setp because shadow tables already filled
*
* 1. in case of any error :-1 returned (and pParse errMsg is populated with some error message)
* 2. if vector index must not be created : 0 returned
Expand All @@ -214426,10 +214465,6 @@ int vectorIndexCreate(Parse *pParse, const Index *pIdx, const char *zDbSName, co
int hasLibsqlVectorIdxFn = 0, hasCollation = 0;
const char *pzErrMsg;

if( IsVacuum(pParse->db) ){
return CREATE_IGNORE;
}

assert( zDbSName != NULL );

sqlite3 *db = pParse->db;
Expand Down Expand Up @@ -214488,11 +214523,6 @@ int vectorIndexCreate(Parse *pParse, const Index *pIdx, const char *zDbSName, co
sqlite3ErrorMsg(pParse, "vector index: must contain exactly one column wrapped into the " VECTOR_INDEX_MARKER_FUNCTION " function");
return CREATE_FAIL;
}
// we are able to support this but I doubt this works for now - more polishing required to make this work
if( pIdx->pPartIdxWhere != NULL ) {
sqlite3ErrorMsg(pParse, "vector index: where condition is forbidden");
return CREATE_FAIL;
}

pArgsList = pIdx->aColExpr->a[0].pExpr->x.pList;
pListItem = pArgsList->a;
Expand Down Expand Up @@ -214582,7 +214612,6 @@ int vectorIndexSearch(
VectorIdxParams idxParams;
vectorIdxParamsInit(&idxParams, NULL, 0);

assert( !IsVacuum(db) );
assert( zDbSName != NULL );

if( argc != 3 ){
Expand Down Expand Up @@ -214667,10 +214696,6 @@ int vectorIndexInsert(
int rc;
VectorInRow vectorInRow;

if( IsVacuum(pCur->db) ){
return SQLITE_OK;
}

rc = vectorInRowAlloc(pCur->db, pRecord, &vectorInRow, pzErrMsg);
if( rc != SQLITE_OK ){
return rc;
Expand All @@ -214690,10 +214715,6 @@ int vectorIndexDelete(
){
VectorInRow payload;

if( IsVacuum(pCur->db) ){
return SQLITE_OK;
}

payload.pVector = NULL;
payload.nKeys = r->nField - 1;
payload.pKeyValues = r->aMem + 1;
Expand Down Expand Up @@ -259749,40 +259770,46 @@ static int fts5TriCreate(
Fts5Tokenizer **ppOut
){
int rc = SQLITE_OK;
TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew));
UNUSED_PARAM(pUnused);
if( pNew==0 ){
rc = SQLITE_NOMEM;
TrigramTokenizer *pNew = 0;

if( nArg%2 ){
rc = SQLITE_ERROR;
}else{
int i;
pNew->bFold = 1;
pNew->iFoldParam = 0;
for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
const char *zArg = azArg[i+1];
if( 0==sqlite3_stricmp(azArg[i], "case_sensitive") ){
if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1] ){
rc = SQLITE_ERROR;
pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew));
UNUSED_PARAM(pUnused);
if( pNew==0 ){
rc = SQLITE_NOMEM;
}else{
int i;
pNew->bFold = 1;
pNew->iFoldParam = 0;
for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
const char *zArg = azArg[i+1];
if( 0==sqlite3_stricmp(azArg[i], "case_sensitive") ){
if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1] ){
rc = SQLITE_ERROR;
}else{
pNew->bFold = (zArg[0]=='0');
}
}else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){
rc = SQLITE_ERROR;
}else{
pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0;
}
}else{
pNew->bFold = (zArg[0]=='0');
}
}else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){
if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){
rc = SQLITE_ERROR;
}else{
pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0;
}
}else{
rc = SQLITE_ERROR;
}
}

if( pNew->iFoldParam!=0 && pNew->bFold==0 ){
rc = SQLITE_ERROR;
}
if( pNew->iFoldParam!=0 && pNew->bFold==0 ){
rc = SQLITE_ERROR;
}

if( rc!=SQLITE_OK ){
fts5TriDelete((Fts5Tokenizer*)pNew);
pNew = 0;
if( rc!=SQLITE_OK ){
fts5TriDelete((Fts5Tokenizer*)pNew);
pNew = 0;
}
}
}
*ppOut = (Fts5Tokenizer*)pNew;
Expand Down
Loading