diff --git a/.changeset/short-cheetahs-unite.md b/.changeset/short-cheetahs-unite.md new file mode 100644 index 000000000..d6b3d8ad7 --- /dev/null +++ b/.changeset/short-cheetahs-unite.md @@ -0,0 +1,14 @@ +--- +'@powersync/service-module-postgres-storage': minor +'@powersync/service-module-mongodb-storage': minor +'@powersync/service-core-tests': minor +'@powersync/service-module-postgres': minor +'@powersync/service-module-mongodb': minor +'@powersync/service-core': minor +'@powersync/service-module-mssql': minor +'@powersync/service-module-mysql': minor +'@powersync/service-sync-rules': minor +'@powersync/service-errors': patch +--- + +Introduce storage versions. diff --git a/modules/module-mongodb-storage/src/migrations/db/migrations/1770213298299-storage-version.ts b/modules/module-mongodb-storage/src/migrations/db/migrations/1770213298299-storage-version.ts new file mode 100644 index 000000000..b28f6a119 --- /dev/null +++ b/modules/module-mongodb-storage/src/migrations/db/migrations/1770213298299-storage-version.ts @@ -0,0 +1,44 @@ +import { migrations, storage as core_storage } from '@powersync/service-core'; +import * as mongo_storage from '../../../storage/storage-index.js'; +import { MongoStorageConfig } from '../../../types/types.js'; + +export const up: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + const db = mongo_storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + + try { + await db.sync_rules.updateMany( + { storage_version: { $exists: false } }, + { $set: { storage_version: core_storage.LEGACY_STORAGE_VERSION } } + ); + } finally { + await db.client.close(); + } +}; + +export const down: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + + const db = mongo_storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + + try { + const newRules = await db.sync_rules + .find({ storage_version: { $gt: core_storage.LEGACY_STORAGE_VERSION } }) + .toArray(); + if (newRules.length > 0) { + throw new Error( + `Cannot revert migration due to newer storage versions in use: ${newRules.map((r) => `${r._id}: v${r.storage_version}`).join(', ')}` + ); + } + await db.sync_rules.updateMany( + { storage_version: core_storage.LEGACY_STORAGE_VERSION }, + { $unset: { storage_version: 1 } } + ); + } finally { + await db.client.close(); + } +}; diff --git a/modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts b/modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts index aad07740d..82a7d024b 100644 --- a/modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts +++ b/modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts @@ -13,6 +13,11 @@ import { SyncRuleDocument } from './implementation/models.js'; import { MongoPersistedSyncRulesContent } from './implementation/MongoPersistedSyncRulesContent.js'; import { MongoSyncBucketStorage, MongoSyncBucketStorageOptions } from './implementation/MongoSyncBucketStorage.js'; import { generateSlotName } from '../utils/util.js'; +import { MongoChecksumOptions } from './implementation/MongoChecksums.js'; + +export interface MongoBucketStorageOptions { + checksumOptions?: Omit; +} export class MongoBucketStorage extends BaseObserver @@ -32,7 +37,7 @@ export class MongoBucketStorage options: { slot_name_prefix: string; }, - private internalOptions?: MongoSyncBucketStorageOptions + private internalOptions?: MongoBucketStorageOptions ) { super(); this.client = db.client; @@ -50,10 +55,15 @@ export class MongoBucketStorage if ((typeof id as any) == 'bigint') { id = Number(id); } - const storage = new MongoSyncBucketStorage(this, id, syncRules, slot_name, undefined, this.internalOptions); + const storageConfig = (syncRules as MongoPersistedSyncRulesContent).getStorageConfig(); + const storage = new MongoSyncBucketStorage(this, id, syncRules, slot_name, undefined, { + ...this.internalOptions, + storageConfig + }); if (!options?.skipLifecycleHooks) { this.iterateListeners((cb) => cb.syncStorageCreated?.(storage)); } + storage.registerListener({ batchStarted: (batch) => { batch.registerListener({ @@ -205,8 +215,10 @@ export class MongoBucketStorage const id = Number(id_doc!.op_id); const slot_name = generateSlotName(this.slot_name_prefix, id); + const storageVersion = options.storageVersion ?? storage.CURRENT_STORAGE_VERSION; const doc: SyncRuleDocument = { _id: id, + storage_version: storageVersion, content: options.content, last_checkpoint: null, last_checkpoint_lsn: null, diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoChecksums.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoChecksums.ts index abcb15845..55099d2fe 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/MongoChecksums.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoChecksums.ts @@ -13,6 +13,7 @@ import { PartialOrFullChecksum } from '@powersync/service-core'; import { PowerSyncMongo } from './db.js'; +import { StorageConfig } from './models.js'; /** * Checksum calculation options, primarily for tests. @@ -27,6 +28,8 @@ export interface MongoChecksumOptions { * Limit on the number of documents to calculate a checksum on at a time. */ operationBatchLimit?: number; + + storageConfig: StorageConfig; } const DEFAULT_BUCKET_BATCH_LIMIT = 200; @@ -43,12 +46,15 @@ const DEFAULT_OPERATION_BATCH_LIMIT = 50_000; */ export class MongoChecksums { private _cache: ChecksumCache | undefined; + private readonly storageConfig: StorageConfig; constructor( private db: PowerSyncMongo, private group_id: number, - private options?: MongoChecksumOptions - ) {} + private options: MongoChecksumOptions + ) { + this.storageConfig = options.storageConfig; + } /** * Lazy-instantiated cache. @@ -222,6 +228,11 @@ export class MongoChecksums { }); } + // Historically, checksum may be stored as 'int' or 'double'. + // More recently, this should be a 'long'. + // $toLong ensures that we always sum it as a long, avoiding inaccuracies in the calculations. + const checksumLong = this.storageConfig.longChecksums ? '$checksum' : { $toLong: '$checksum' }; + // Aggregate over a max of `batchLimit` operations at a time. // Let's say we have 3 buckets (A, B, C), each with 10 operations, and our batch limit is 12. // Then we'll do three batches: @@ -245,10 +256,7 @@ export class MongoChecksums { { $group: { _id: '$_id.b', - // Historically, checksum may be stored as 'int' or 'double'. - // More recently, this should be a 'long'. - // $toLong ensures that we always sum it as a long, avoiding inaccuracies in the calculations. - checksum_total: { $sum: { $toLong: '$checksum' } }, + checksum_total: { $sum: checksumLong }, count: { $sum: 1 }, has_clear_op: { $max: { diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts index dc0fc5237..58620e706 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts @@ -1,20 +1,42 @@ -import { SyncConfigWithErrors, HydratedSyncRules, versionedHydrationState } from '@powersync/service-sync-rules'; +import { + CompatibilityOption, + DEFAULT_HYDRATION_STATE, + HydratedSyncRules, + HydrationState, + SyncConfigWithErrors, + versionedHydrationState +} from '@powersync/service-sync-rules'; import { storage } from '@powersync/service-core'; +import { StorageConfig } from './models.js'; + export class MongoPersistedSyncRules implements storage.PersistedSyncRules { public readonly slot_name: string; + public readonly hydrationState: HydrationState; constructor( public readonly id: number, public readonly sync_rules: SyncConfigWithErrors, public readonly checkpoint_lsn: string | null, - slot_name: string | null + slot_name: string | null, + public readonly storageConfig: StorageConfig ) { this.slot_name = slot_name ?? `powersync_${id}`; + + if ( + storageConfig.versionedBuckets || + this.sync_rules.config.compatibility.isEnabled(CompatibilityOption.versionedBucketIds) + ) { + // For new sync config versions (using the new storage version), we always enable versioned bucket names. + // For older versions, this depends on the compatibility option. + this.hydrationState = versionedHydrationState(this.id); + } else { + this.hydrationState = DEFAULT_HYDRATION_STATE; + } } hydratedSyncRules(): HydratedSyncRules { - return this.sync_rules.config.hydrate({ hydrationState: versionedHydrationState(this.id) }); + return this.sync_rules.config.hydrate({ hydrationState: this.hydrationState }); } } diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts index a843d9a00..8d3f09067 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts @@ -4,7 +4,8 @@ import { SqlSyncRules } from '@powersync/service-sync-rules'; import { MongoPersistedSyncRules } from './MongoPersistedSyncRules.js'; import { MongoSyncRulesLock } from './MongoSyncRulesLock.js'; import { PowerSyncMongo } from './db.js'; -import { SyncRuleDocument } from './models.js'; +import { getMongoStorageConfig, SyncRuleDocument } from './models.js'; +import { ErrorCode, ServiceError } from '@powersync/lib-services-framework'; export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRulesContent { public readonly slot_name: string; @@ -17,6 +18,7 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule public readonly last_keepalive_ts: Date | null; public readonly last_checkpoint_ts: Date | null; public readonly active: boolean; + public readonly storageVersion: number; public current_lock: MongoSyncRulesLock | null = null; @@ -34,6 +36,23 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule this.last_checkpoint_ts = doc.last_checkpoint_ts; this.last_keepalive_ts = doc.last_keepalive_ts; this.active = doc.state == 'ACTIVE'; + this.storageVersion = doc.storage_version ?? storage.LEGACY_STORAGE_VERSION; + } + + /** + * Load the storage config. + * + * This may throw if the persisted storage version is not supported. + */ + getStorageConfig() { + const storageConfig = getMongoStorageConfig(this.storageVersion); + if (storageConfig == null) { + throw new ServiceError( + ErrorCode.PSYNC_S1005, + `Unsupported storage version ${this.storageVersion} for sync rules ${this.id}` + ); + } + return storageConfig; } parsed(options: storage.ParseSyncRulesOptions) { @@ -41,7 +60,8 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule this.id, SqlSyncRules.fromYaml(this.sync_rules_content, options), this.last_checkpoint_lsn, - this.slot_name + this.slot_name, + this.getStorageConfig() ); } diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts index ed33379b4..51d66cd2a 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts @@ -32,7 +32,14 @@ import * as timers from 'timers/promises'; import { idPrefixFilter, mapOpEntry, readSingleBatch, setSessionSnapshotTime } from '../../utils/util.js'; import { MongoBucketStorage } from '../MongoBucketStorage.js'; import { PowerSyncMongo } from './db.js'; -import { BucketDataDocument, BucketDataKey, BucketStateDocument, SourceKey, SourceTableDocument } from './models.js'; +import { + BucketDataDocument, + BucketDataKey, + BucketStateDocument, + SourceKey, + SourceTableDocument, + StorageConfig +} from './models.js'; import { MongoBucketBatch } from './MongoBucketBatch.js'; import { MongoChecksumOptions, MongoChecksums } from './MongoChecksums.js'; import { MongoCompactor } from './MongoCompactor.js'; @@ -40,7 +47,8 @@ import { MongoParameterCompactor } from './MongoParameterCompactor.js'; import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js'; export interface MongoSyncBucketStorageOptions { - checksumOptions?: MongoChecksumOptions; + checksumOptions?: Omit; + storageConfig: StorageConfig; } /** @@ -69,12 +77,15 @@ export class MongoSyncBucketStorage public readonly group_id: number, private readonly sync_rules: storage.PersistedSyncRulesContent, public readonly slot_name: string, - writeCheckpointMode?: storage.WriteCheckpointMode, - options?: MongoSyncBucketStorageOptions + writeCheckpointMode: storage.WriteCheckpointMode | undefined, + options: MongoSyncBucketStorageOptions ) { super(); this.db = factory.db; - this.checksums = new MongoChecksums(this.db, this.group_id, options?.checksumOptions); + this.checksums = new MongoChecksums(this.db, this.group_id, { + ...options.checksumOptions, + storageConfig: options?.storageConfig + }); this.writeCheckpointAPI = new MongoWriteCheckpointAPI({ db: this.db, mode: writeCheckpointMode ?? storage.WriteCheckpointMode.MANAGED, diff --git a/modules/module-mongodb-storage/src/storage/implementation/models.ts b/modules/module-mongodb-storage/src/storage/implementation/models.ts index be906bbef..a3c2110df 100644 --- a/modules/module-mongodb-storage/src/storage/implementation/models.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/models.ts @@ -204,6 +204,29 @@ export interface SyncRuleDocument { id: string; expires_at: Date; } | null; + + storage_version?: number; +} + +export interface StorageConfig extends storage.StorageVersionConfig { + /** + * When true, bucket_data.checksum is guaranteed to be persisted as a Long. + * + * When false, it could also have been persisted as an Int32 or Double, in which case it must be converted to + * a Long before summing. + */ + longChecksums: boolean; +} + +const LONG_CHECKSUMS_STORAGE_VERSION = 2; + +export function getMongoStorageConfig(storageVersion: number): StorageConfig | undefined { + const baseConfig = storage.STORAGE_VERSION_CONFIG[storageVersion]; + if (baseConfig == null) { + return undefined; + } + + return { ...baseConfig, longChecksums: storageVersion >= LONG_CHECKSUMS_STORAGE_VERSION }; } export interface CheckpointEventDocument { diff --git a/modules/module-mongodb-storage/src/utils/test-utils.ts b/modules/module-mongodb-storage/src/utils/test-utils.ts index eece317f4..67ec3c149 100644 --- a/modules/module-mongodb-storage/src/utils/test-utils.ts +++ b/modules/module-mongodb-storage/src/utils/test-utils.ts @@ -1,14 +1,13 @@ import { mongo } from '@powersync/lib-service-mongodb'; -import { PowerSyncMongo } from '../storage/implementation/db.js'; import { TestStorageOptions } from '@powersync/service-core'; +import { MongoBucketStorage, MongoBucketStorageOptions } from '../storage/MongoBucketStorage.js'; import { MongoReportStorage } from '../storage/MongoReportStorage.js'; -import { MongoBucketStorage } from '../storage/MongoBucketStorage.js'; -import { MongoSyncBucketStorageOptions } from '../storage/implementation/MongoSyncBucketStorage.js'; +import { PowerSyncMongo } from '../storage/implementation/db.js'; export type MongoTestStorageOptions = { url: string; isCI: boolean; - internalOptions?: MongoSyncBucketStorageOptions; + internalOptions?: MongoBucketStorageOptions; }; export function mongoTestStorageFactoryGenerator(factoryOptions: MongoTestStorageOptions) { diff --git a/modules/module-mongodb-storage/test/src/__snapshots__/storage.test.ts.snap b/modules/module-mongodb-storage/test/src/__snapshots__/storage.test.ts.snap deleted file mode 100644 index c852d392d..000000000 --- a/modules/module-mongodb-storage/test/src/__snapshots__/storage.test.ts.snap +++ /dev/null @@ -1,25 +0,0 @@ -// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html - -exports[`Mongo Sync Bucket Storage - Data > empty storage metrics 1`] = ` -{ - "operations_size_bytes": 0, - "parameters_size_bytes": 0, - "replication_size_bytes": 0, -} -`; - -exports[`Mongo Sync Bucket Storage - split buckets > empty storage metrics 1`] = ` -{ - "operations_size_bytes": 0, - "parameters_size_bytes": 0, - "replication_size_bytes": 0, -} -`; - -exports[`Mongo Sync Bucket Storage - split operations > empty storage metrics 1`] = ` -{ - "operations_size_bytes": 0, - "parameters_size_bytes": 0, - "replication_size_bytes": 0, -} -`; diff --git a/modules/module-mongodb-storage/test/src/__snapshots__/storage_sync.test.ts.snap b/modules/module-mongodb-storage/test/src/__snapshots__/storage_sync.test.ts.snap index e3449a7c7..8204151e6 100644 --- a/modules/module-mongodb-storage/test/src/__snapshots__/storage_sync.test.ts.snap +++ b/modules/module-mongodb-storage/test/src/__snapshots__/storage_sync.test.ts.snap @@ -1,6 +1,6 @@ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html -exports[`sync - mongodb > compacting data - invalidate checkpoint 1`] = ` +exports[`sync - mongodb > storage v1 > compacting data - invalidate checkpoint 1`] = ` [ { "checkpoint": { @@ -31,7 +31,7 @@ exports[`sync - mongodb > compacting data - invalidate checkpoint 1`] = ` ] `; -exports[`sync - mongodb > compacting data - invalidate checkpoint 2`] = ` +exports[`sync - mongodb > storage v1 > compacting data - invalidate checkpoint 2`] = ` [ { "data": { @@ -104,7 +104,7 @@ exports[`sync - mongodb > compacting data - invalidate checkpoint 2`] = ` ] `; -exports[`sync - mongodb > encodes sync rules id in buckes for streams 1`] = ` +exports[`sync - mongodb > storage v1 > encodes sync rules id in buckes for streams 1`] = ` [ { "checkpoint": { @@ -159,7 +159,7 @@ exports[`sync - mongodb > encodes sync rules id in buckes for streams 1`] = ` ] `; -exports[`sync - mongodb > encodes sync rules id in buckes for streams 2`] = ` +exports[`sync - mongodb > storage v1 > encodes sync rules id in buckes for streams 2`] = ` [ { "checkpoint": { @@ -214,7 +214,7 @@ exports[`sync - mongodb > encodes sync rules id in buckes for streams 2`] = ` ] `; -exports[`sync - mongodb > expired token 1`] = ` +exports[`sync - mongodb > storage v1 > expired token 1`] = ` [ { "token_expires_in": 0, @@ -222,7 +222,7 @@ exports[`sync - mongodb > expired token 1`] = ` ] `; -exports[`sync - mongodb > expiring token 1`] = ` +exports[`sync - mongodb > storage v1 > expiring token 1`] = ` [ { "checkpoint": { @@ -258,7 +258,7 @@ exports[`sync - mongodb > expiring token 1`] = ` ] `; -exports[`sync - mongodb > expiring token 2`] = ` +exports[`sync - mongodb > storage v1 > expiring token 2`] = ` [ { "token_expires_in": 0, @@ -266,7 +266,7 @@ exports[`sync - mongodb > expiring token 2`] = ` ] `; -exports[`sync - mongodb > sends checkpoint complete line for empty checkpoint 1`] = ` +exports[`sync - mongodb > storage v1 > sends checkpoint complete line for empty checkpoint 1`] = ` [ { "checkpoint": { @@ -335,7 +335,7 @@ exports[`sync - mongodb > sends checkpoint complete line for empty checkpoint 1` ] `; -exports[`sync - mongodb > sync buckets in order 1`] = ` +exports[`sync - mongodb > storage v1 > sync buckets in order 1`] = ` [ { "checkpoint": { @@ -431,7 +431,7 @@ exports[`sync - mongodb > sync buckets in order 1`] = ` ] `; -exports[`sync - mongodb > sync global data 1`] = ` +exports[`sync - mongodb > storage v1 > sync global data 1`] = ` [ { "checkpoint": { @@ -495,7 +495,7 @@ exports[`sync - mongodb > sync global data 1`] = ` ] `; -exports[`sync - mongodb > sync interrupts low-priority buckets on new checkpoints (2) 1`] = ` +exports[`sync - mongodb > storage v1 > sync interrupts low-priority buckets on new checkpoints (2) 1`] = ` [ { "checkpoint": { @@ -689,7 +689,7 @@ exports[`sync - mongodb > sync interrupts low-priority buckets on new checkpoint ] `; -exports[`sync - mongodb > sync legacy non-raw data 1`] = ` +exports[`sync - mongodb > storage v1 > sync legacy non-raw data 1`] = ` [ { "checkpoint": { @@ -749,7 +749,7 @@ exports[`sync - mongodb > sync legacy non-raw data 1`] = ` ] `; -exports[`sync - mongodb > sync updates to data query only 1`] = ` +exports[`sync - mongodb > storage v1 > sync updates to data query only 1`] = ` [ { "checkpoint": { @@ -785,7 +785,7 @@ exports[`sync - mongodb > sync updates to data query only 1`] = ` ] `; -exports[`sync - mongodb > sync updates to data query only 2`] = ` +exports[`sync - mongodb > storage v1 > sync updates to data query only 2`] = ` [ { "checkpoint_diff": { @@ -834,7 +834,7 @@ exports[`sync - mongodb > sync updates to data query only 2`] = ` ] `; -exports[`sync - mongodb > sync updates to global data 1`] = ` +exports[`sync - mongodb > storage v1 > sync updates to global data 1`] = ` [ { "checkpoint": { @@ -870,7 +870,7 @@ exports[`sync - mongodb > sync updates to global data 1`] = ` ] `; -exports[`sync - mongodb > sync updates to global data 2`] = ` +exports[`sync - mongodb > storage v1 > sync updates to global data 2`] = ` [ { "checkpoint_diff": { @@ -919,7 +919,7 @@ exports[`sync - mongodb > sync updates to global data 2`] = ` ] `; -exports[`sync - mongodb > sync updates to global data 3`] = ` +exports[`sync - mongodb > storage v1 > sync updates to global data 3`] = ` [ { "checkpoint_diff": { @@ -968,7 +968,7 @@ exports[`sync - mongodb > sync updates to global data 3`] = ` ] `; -exports[`sync - mongodb > sync updates to parameter query + data 1`] = ` +exports[`sync - mongodb > storage v1 > sync updates to parameter query + data 1`] = ` [ { "checkpoint": { @@ -992,7 +992,7 @@ exports[`sync - mongodb > sync updates to parameter query + data 1`] = ` ] `; -exports[`sync - mongodb > sync updates to parameter query + data 2`] = ` +exports[`sync - mongodb > storage v1 > sync updates to parameter query + data 2`] = ` [ { "checkpoint_diff": { @@ -1041,7 +1041,7 @@ exports[`sync - mongodb > sync updates to parameter query + data 2`] = ` ] `; -exports[`sync - mongodb > sync updates to parameter query only 1`] = ` +exports[`sync - mongodb > storage v1 > sync updates to parameter query only 1`] = ` [ { "checkpoint": { @@ -1065,7 +1065,7 @@ exports[`sync - mongodb > sync updates to parameter query only 1`] = ` ] `; -exports[`sync - mongodb > sync updates to parameter query only 2`] = ` +exports[`sync - mongodb > storage v1 > sync updates to parameter query only 2`] = ` [ { "checkpoint_diff": { @@ -1094,3 +1094,1098 @@ exports[`sync - mongodb > sync updates to parameter query only 2`] = ` }, ] `; + +exports[`sync - mongodb > storage v2 > compacting data - invalidate checkpoint 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -93886621, + "count": 2, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > compacting data - invalidate checkpoint 2`] = ` +[ + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": -93886621, + "op": "CLEAR", + "op_id": "2", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_diff": { + "last_op_id": "4", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 499012468, + "count": 4, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "2", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 1859363232, + "data": "{"id":"t1","description":"Test 1b"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "3", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + { + "checksum": 3028503153, + "data": "{"id":"t2","description":"Test 2b"}", + "object_id": "t2", + "object_type": "test", + "op": "PUT", + "op_id": "4", + "subkey": "13423353-9f27-59b4-baf0-64a5e09f1769", + }, + ], + "has_more": false, + "next_after": "4", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "4", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > encodes sync rules id in buckes for streams 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#test|0[]", + "checksum": 920318466, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "test", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#test|0[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > encodes sync rules id in buckes for streams 2`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "2#test|0[]", + "checksum": 920318466, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "test", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "2#test|0[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > expired token 1`] = ` +[ + { + "token_expires_in": 0, + }, +] +`; + +exports[`sync - mongodb > storage v2 > expiring token 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > expiring token 2`] = ` +[ + { + "token_expires_in": 0, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sends checkpoint complete line for empty checkpoint 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -1221282404, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 3073684892, + "data": "{"id":"t1","description":"sync"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + null, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, + { + "checkpoint_diff": { + "last_op_id": "1", + "removed_buckets": [], + "updated_buckets": [], + "write_checkpoint": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync buckets in order 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#b0[]", + "checksum": 920318466, + "count": 1, + "priority": 2, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + { + "bucket": "1#b1[]", + "checksum": -1382098757, + "count": 1, + "priority": 1, + "subscriptions": [ + { + "default": 1, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "b0", + }, + { + "errors": [], + "is_default": true, + "name": "b1", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b1[]", + "data": [ + { + "checksum": 2912868539, + "data": "{"id":"earlier","description":"Test 2"}", + "object_id": "earlier", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "0dfe86bd-d15b-5fd0-9c7b-a31693030ee0", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "partial_checkpoint_complete": { + "last_op_id": "2", + "priority": 1, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b0[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync global data 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -93886621, + "count": 2, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + { + "checksum": 3280762209, + "data": "{"id":"t2","description":"Test 2"}", + "object_id": "t2", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "13423353-9f27-59b4-baf0-64a5e09f1769", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync interrupts low-priority buckets on new checkpoints (2) 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#b0a[]", + "checksum": -659831575, + "count": 2000, + "priority": 2, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + { + "bucket": "1#b0b[]", + "checksum": -659831575, + "count": 2000, + "priority": 2, + "subscriptions": [ + { + "default": 1, + }, + ], + }, + { + "bucket": "1#b1[]", + "checksum": -1096116670, + "count": 1, + "priority": 1, + "subscriptions": [ + { + "default": 2, + }, + ], + }, + ], + "last_op_id": "4001", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "b0a", + }, + { + "errors": [], + "is_default": true, + "name": "b0b", + }, + { + "errors": [], + "is_default": true, + "name": "b1", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b1[]", + "data": undefined, + "has_more": false, + "next_after": "1", + }, + }, + { + "partial_checkpoint_complete": { + "last_op_id": "4001", + "priority": 1, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b0a[]", + "data": undefined, + "has_more": true, + "next_after": "2000", + }, + }, + { + "data": { + "after": "2000", + "bucket": "1#b0a[]", + "data": undefined, + "has_more": true, + "next_after": "4000", + }, + }, + { + "checkpoint_diff": { + "last_op_id": "4004", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#b0a[]", + "checksum": 883076828, + "count": 2001, + "priority": 2, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + { + "bucket": "1#b0b[]", + "checksum": 883076828, + "count": 2001, + "priority": 2, + "subscriptions": [ + { + "default": 1, + }, + ], + }, + { + "bucket": "1#b1[]", + "checksum": 1841937527, + "count": 2, + "priority": 1, + "subscriptions": [ + { + "default": 2, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "1", + "bucket": "1#b1[]", + "data": undefined, + "has_more": false, + "next_after": "4002", + }, + }, + { + "partial_checkpoint_complete": { + "last_op_id": "4004", + "priority": 1, + }, + }, + { + "data": { + "after": "4000", + "bucket": "1#b0a[]", + "data": undefined, + "has_more": false, + "next_after": "4003", + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b0b[]", + "data": undefined, + "has_more": true, + "next_after": "1999", + }, + }, + { + "data": { + "after": "1999", + "bucket": "1#b0b[]", + "data": undefined, + "has_more": true, + "next_after": "3999", + }, + }, + { + "data": { + "after": "3999", + "bucket": "1#b0b[]", + "data": undefined, + "has_more": false, + "next_after": "4004", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "4004", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync legacy non-raw data 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -852817836, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 3442149460n, + "data": { + "description": "Test +"string"", + "id": "t1", + "large_num": 12345678901234567890n, + }, + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to data query only 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "by_user", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to data query only 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "2", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 1418351250, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#by_user["user1"]", + "data": [ + { + "checksum": 1418351250, + "data": "{"id":"list1","user_id":"user1","name":"User 1"}", + "object_id": "list1", + "object_type": "lists", + "op": "PUT", + "op_id": "2", + "subkey": "0ffb7b58-d14d-5efa-be6c-c8eda74ab7a8", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to global data 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to global data 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "1", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 920318466, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to global data 3`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "2", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -93886621, + "count": 2, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "1", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 3280762209, + "data": "{"id":"t2","description":"Test 2"}", + "object_id": "t2", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "13423353-9f27-59b4-baf0-64a5e09f1769", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to parameter query + data 1`] = ` +[ + { + "checkpoint": { + "buckets": [], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "by_user", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to parameter query + data 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "2", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 1418351250, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#by_user["user1"]", + "data": [ + { + "checksum": 1418351250, + "data": "{"id":"list1","user_id":"user1","name":"User 1"}", + "object_id": "list1", + "object_type": "lists", + "op": "PUT", + "op_id": "1", + "subkey": "0ffb7b58-d14d-5efa-be6c-c8eda74ab7a8", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to parameter query only 1`] = ` +[ + { + "checkpoint": { + "buckets": [], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "by_user", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - mongodb > storage v2 > sync updates to parameter query only 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "1", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; diff --git a/modules/module-mongodb-storage/test/src/storage_compacting.test.ts b/modules/module-mongodb-storage/test/src/storage_compacting.test.ts index 09e55eee8..4d1f3023f 100644 --- a/modules/module-mongodb-storage/test/src/storage_compacting.test.ts +++ b/modules/module-mongodb-storage/test/src/storage_compacting.test.ts @@ -1,4 +1,4 @@ -import { register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; +import { bucketRequest, bucketRequests, register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; import { describe, expect, test } from 'vitest'; import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; import { storage, SyncRulesBucketStorage } from '@powersync/service-core'; @@ -49,11 +49,11 @@ bucket_definitions: const bucketStorage = factory.getInstance(syncRules); const { checkpoint } = await populate(bucketStorage); - return { bucketStorage, checkpoint, factory }; + return { bucketStorage, checkpoint, factory, syncRules }; }; test('full compact', async () => { - const { bucketStorage, checkpoint, factory } = await setup(); + const { bucketStorage, checkpoint, factory, syncRules } = await setup(); // Simulate bucket_state from old version not being available await factory.db.bucket_state.deleteMany({}); @@ -68,14 +68,17 @@ bucket_definitions: signal: null as any }); - const checksumAfter = await bucketStorage.getChecksums(checkpoint, ['by_user["u1"]', 'by_user["u2"]']); - expect(checksumAfter.get('by_user["u1"]')).toEqual({ - bucket: 'by_user["u1"]', + const checksumAfter = await bucketStorage.getChecksums( + checkpoint, + bucketRequests(syncRules, ['by_user["u1"]', 'by_user["u2"]']) + ); + expect(checksumAfter.get(bucketRequest(syncRules, 'by_user["u1"]'))).toEqual({ + bucket: bucketRequest(syncRules, 'by_user["u1"]'), checksum: -659469718, count: 1 }); - expect(checksumAfter.get('by_user["u2"]')).toEqual({ - bucket: 'by_user["u2"]', + expect(checksumAfter.get(bucketRequest(syncRules, 'by_user["u2"]'))).toEqual({ + bucket: bucketRequest(syncRules, 'by_user["u2"]'), checksum: 430217650, count: 1 }); @@ -119,14 +122,17 @@ bucket_definitions: }); expect(result2.buckets).toEqual(0); - const checksumAfter = await bucketStorage.getChecksums(checkpoint, ['by_user2["u1"]', 'by_user2["u2"]']); - expect(checksumAfter.get('by_user2["u1"]')).toEqual({ - bucket: 'by_user2["u1"]', + const checksumAfter = await bucketStorage.getChecksums( + checkpoint, + bucketRequests(syncRules, ['by_user2["u1"]', 'by_user2["u2"]']) + ); + expect(checksumAfter.get(bucketRequest(syncRules, 'by_user2["u1"]'))).toEqual({ + bucket: bucketRequest(syncRules, 'by_user2["u1"]'), checksum: -659469718, count: 1 }); - expect(checksumAfter.get('by_user2["u2"]')).toEqual({ - bucket: 'by_user2["u2"]', + expect(checksumAfter.get(bucketRequest(syncRules, 'by_user2["u2"]'))).toEqual({ + bucket: bucketRequest(syncRules, 'by_user2["u2"]'), checksum: 430217650, count: 1 }); diff --git a/modules/module-mongodb-storage/test/src/storage_sync.test.ts b/modules/module-mongodb-storage/test/src/storage_sync.test.ts index f49d595bf..4d61429b4 100644 --- a/modules/module-mongodb-storage/test/src/storage_sync.test.ts +++ b/modules/module-mongodb-storage/test/src/storage_sync.test.ts @@ -1,26 +1,28 @@ import { storage } from '@powersync/service-core'; -import { register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; +import { bucketRequest, register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; import { describe, expect, test } from 'vitest'; -import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; +import { INITIALIZED_MONGO_STORAGE_FACTORY, TEST_STORAGE_VERSIONS } from './util.js'; -describe('sync - mongodb', () => { - register.registerSyncTests(INITIALIZED_MONGO_STORAGE_FACTORY); +function registerSyncStorageTests(storageFactory: storage.TestStorageFactory, storageVersion: number) { + register.registerSyncTests(storageFactory, { storageVersion }); // The split of returned results can vary depending on storage drivers test('large batch (2)', async () => { // Test syncing a batch of data that is small in count, // but large enough in size to be split over multiple returned chunks. // Similar to the above test, but splits over 1MB chunks. - const sync_rules = test_utils.testRules( - ` + await using factory = await storageFactory(); + const syncRules = await factory.updateSyncRules({ + content: ` bucket_definitions: global: data: - SELECT id, description FROM "%" - ` - ); - await using factory = await INITIALIZED_MONGO_STORAGE_FACTORY(); - const bucketStorage = factory.getInstance(sync_rules); + `, + storageVersion + }); + const bucketStorage = factory.getInstance(syncRules); + const globalBucket = bucketRequest(syncRules, 'global[]'); const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; @@ -74,7 +76,7 @@ describe('sync - mongodb', () => { const options: storage.BucketDataBatchOptions = {}; const batch1 = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]), options) + bucketStorage.getBucketDataBatch(checkpoint, new Map([[globalBucket, 0n]]), options) ); expect(test_utils.getBatchData(batch1)).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }, @@ -89,7 +91,7 @@ describe('sync - mongodb', () => { const batch2 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch1[0].chunkData.next_after)]]), + new Map([[globalBucket, BigInt(batch1[0].chunkData.next_after)]]), options ) ); @@ -105,7 +107,7 @@ describe('sync - mongodb', () => { const batch3 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch2[0].chunkData.next_after)]]), + new Map([[globalBucket, BigInt(batch2[0].chunkData.next_after)]]), options ) ); @@ -120,9 +122,18 @@ describe('sync - mongodb', () => { // Test that the checksum type is correct. // Specifically, test that it never persisted as double. - const checksumTypes = await factory.db.bucket_data + const mongoFactory = factory as any; + const checksumTypes = await mongoFactory.db.bucket_data .aggregate([{ $group: { _id: { $type: '$checksum' }, count: { $sum: 1 } } }]) .toArray(); expect(checksumTypes).toEqual([{ _id: 'long', count: 4 }]); }); +} + +describe('sync - mongodb', () => { + for (const storageVersion of TEST_STORAGE_VERSIONS) { + describe(`storage v${storageVersion}`, () => { + registerSyncStorageTests(INITIALIZED_MONGO_STORAGE_FACTORY, storageVersion); + }); + } }); diff --git a/modules/module-mongodb-storage/test/src/util.ts b/modules/module-mongodb-storage/test/src/util.ts index 4a7174056..5b4de59b4 100644 --- a/modules/module-mongodb-storage/test/src/util.ts +++ b/modules/module-mongodb-storage/test/src/util.ts @@ -1,5 +1,6 @@ import { env } from './env.js'; import { mongoTestReportStorageFactoryGenerator, mongoTestStorageFactoryGenerator } from '@module/utils/test-utils.js'; +import { CURRENT_STORAGE_VERSION, LEGACY_STORAGE_VERSION } from '@powersync/service-core'; export const INITIALIZED_MONGO_STORAGE_FACTORY = mongoTestStorageFactoryGenerator({ url: env.MONGO_TEST_URL, @@ -10,3 +11,5 @@ export const INITIALIZED_MONGO_REPORT_STORAGE_FACTORY = mongoTestReportStorageFa url: env.MONGO_TEST_URL, isCI: env.CI }); + +export const TEST_STORAGE_VERSIONS = [LEGACY_STORAGE_VERSION, CURRENT_STORAGE_VERSION]; diff --git a/modules/module-mongodb/test/src/change_stream.test.ts b/modules/module-mongodb/test/src/change_stream.test.ts index f833ddca4..1e5d266ec 100644 --- a/modules/module-mongodb/test/src/change_stream.test.ts +++ b/modules/module-mongodb/test/src/change_stream.test.ts @@ -3,12 +3,11 @@ import { setTimeout } from 'node:timers/promises'; import { describe, expect, test, vi } from 'vitest'; import { mongo } from '@powersync/lib-service-mongodb'; -import { storage } from '@powersync/service-core'; import { test_utils } from '@powersync/service-core-tests'; import { PostImagesOption } from '@module/types/types.js'; import { ChangeStreamTestContext } from './change_stream_utils.js'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -21,9 +20,12 @@ describe('change stream', () => { describeWithStorage({ timeout: 20_000 }, defineChangeStreamTests); }); -function defineChangeStreamTests(factory: storage.TestStorageFactory) { +function defineChangeStreamTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return ChangeStreamTestContext.open(factory, { ...options, storageVersion }); + }; test('replicating basic values', async () => { - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ mongoOptions: { postImages: PostImagesOption.READ_ONLY } }); const { db } = context; @@ -59,7 +61,7 @@ bucket_definitions: }); test('replicating wildcard', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(` bucket_definitions: @@ -91,7 +93,7 @@ bucket_definitions: }); test('updateLookup - no fullDocument available', async () => { - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ mongoOptions: { postImages: PostImagesOption.OFF } }); const { db, client } = context; @@ -137,7 +139,7 @@ bucket_definitions: test('postImages - autoConfigure', async () => { // Similar to the above test, but with postImages enabled. // This resolves the consistency issue. - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ mongoOptions: { postImages: PostImagesOption.AUTO_CONFIGURE } }); const { db, client } = context; @@ -185,7 +187,7 @@ bucket_definitions: test('postImages - on', async () => { // Similar to postImages - autoConfigure, but does not auto-configure. // changeStreamPreAndPostImages must be manually configured. - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ mongoOptions: { postImages: PostImagesOption.READ_ONLY } }); const { db, client } = context; @@ -230,7 +232,7 @@ bucket_definitions: }); test('replicating case sensitive table', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(` bucket_definitions: @@ -254,7 +256,7 @@ bucket_definitions: }); test('replicating large values', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(` bucket_definitions: @@ -285,7 +287,7 @@ bucket_definitions: }); test('replicating dropCollection', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; const syncRuleContent = ` bucket_definitions: @@ -317,7 +319,7 @@ bucket_definitions: }); test('replicating renameCollection', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; const syncRuleContent = ` bucket_definitions: @@ -348,7 +350,7 @@ bucket_definitions: }); test('initial sync', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -373,7 +375,7 @@ bucket_definitions: // MongoServerError: PlanExecutor error during aggregation :: caused by :: BSONObj size: 33554925 (0x20001ED) is invalid. // Size must be between 0 and 16793600(16MB) - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(`bucket_definitions: global: data: @@ -422,7 +424,7 @@ bucket_definitions: }); test('collection not in sync rules', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -439,7 +441,7 @@ bucket_definitions: }); test('postImages - new collection with postImages enabled', async () => { - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ mongoOptions: { postImages: PostImagesOption.AUTO_CONFIGURE } }); const { db } = context; @@ -472,7 +474,7 @@ bucket_definitions: }); test('postImages - new collection with postImages disabled', async () => { - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ mongoOptions: { postImages: PostImagesOption.AUTO_CONFIGURE } }); const { db } = context; @@ -502,7 +504,7 @@ bucket_definitions: }); test('recover from error', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(` bucket_definitions: diff --git a/modules/module-mongodb/test/src/change_stream_utils.ts b/modules/module-mongodb/test/src/change_stream_utils.ts index 122435640..cd9e94056 100644 --- a/modules/module-mongodb/test/src/change_stream_utils.ts +++ b/modules/module-mongodb/test/src/change_stream_utils.ts @@ -4,11 +4,14 @@ import { createCoreReplicationMetrics, initializeCoreReplicationMetrics, InternalOpId, + LEGACY_STORAGE_VERSION, OplogEntry, ProtocolOpId, ReplicationCheckpoint, + STORAGE_VERSION_CONFIG, SyncRulesBucketStorage, - TestStorageOptions + TestStorageOptions, + utils } from '@powersync/service-core'; import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests'; @@ -23,6 +26,7 @@ export class ChangeStreamTestContext { private _walStream?: ChangeStream; private abortController = new AbortController(); private streamPromise?: Promise; + private syncRulesId?: number; public storage?: SyncRulesBucketStorage; /** @@ -35,6 +39,7 @@ export class ChangeStreamTestContext { factory: (options: TestStorageOptions) => Promise, options?: { doNotClear?: boolean; + storageVersion?: number; mongoOptions?: Partial; streamOptions?: Partial; } @@ -45,13 +50,19 @@ export class ChangeStreamTestContext { if (!options?.doNotClear) { await clearTestDb(connectionManager.db); } - return new ChangeStreamTestContext(f, connectionManager, options?.streamOptions); + + const storageVersion = options?.storageVersion ?? LEGACY_STORAGE_VERSION; + const versionedBuckets = STORAGE_VERSION_CONFIG[storageVersion]?.versionedBuckets ?? false; + + return new ChangeStreamTestContext(f, connectionManager, options?.streamOptions, storageVersion, versionedBuckets); } constructor( public factory: BucketStorageFactory, public connectionManager: MongoManager, - private streamOptions?: Partial + private streamOptions: Partial = {}, + private storageVersion: number = LEGACY_STORAGE_VERSION, + private versionedBuckets: boolean = STORAGE_VERSION_CONFIG[storageVersion]?.versionedBuckets ?? false ) { createCoreReplicationMetrics(METRICS_HELPER.metricsEngine); initializeCoreReplicationMetrics(METRICS_HELPER.metricsEngine); @@ -88,7 +99,12 @@ export class ChangeStreamTestContext { } async updateSyncRules(content: string) { - const syncRules = await this.factory.updateSyncRules({ content: content, validate: true }); + const syncRules = await this.factory.updateSyncRules({ + content: content, + validate: true, + storageVersion: this.storageVersion + }); + this.syncRulesId = syncRules.id; this.storage = this.factory.getInstance(syncRules); return this.storage!; } @@ -99,6 +115,7 @@ export class ChangeStreamTestContext { throw new Error(`Next sync rules not available`); } + this.syncRulesId = syncRules.id; this.storage = this.factory.getInstance(syncRules); return this.storage!; } @@ -159,9 +176,21 @@ export class ChangeStreamTestContext { return checkpoint; } + private resolveBucketName(bucket: string) { + if (!this.versionedBuckets || /^\d+#/.test(bucket)) { + return bucket; + } + if (this.syncRulesId == null) { + throw new Error('Sync rules not configured - call updateSyncRules() first'); + } + return `${this.syncRulesId}#${bucket}`; + } + async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { let checkpoint = await this.getCheckpoint(options); - const map = new Map(Object.entries(buckets)); + const map = new Map( + Object.entries(buckets).map(([bucket, opId]) => [this.resolveBucketName(bucket), opId]) + ); return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); } @@ -170,8 +199,9 @@ export class ChangeStreamTestContext { if (typeof start == 'string') { start = BigInt(start); } + const resolvedBucket = this.resolveBucketName(bucket); const checkpoint = await this.getCheckpoint(options); - const map = new Map([[bucket, start]]); + const map = new Map([[resolvedBucket, start]]); let data: OplogEntry[] = []; while (true) { const batch = this.storage!.getBucketDataBatch(checkpoint, map); @@ -181,20 +211,26 @@ export class ChangeStreamTestContext { if (batches.length == 0 || !batches[0]!.chunkData.has_more) { break; } - map.set(bucket, BigInt(batches[0]!.chunkData.next_after)); + map.set(resolvedBucket, BigInt(batches[0]!.chunkData.next_after)); } return data; } - async getChecksums(buckets: string[], options?: { timeout?: number }) { + async getChecksums(buckets: string[], options?: { timeout?: number }): Promise { let checkpoint = await this.getCheckpoint(options); - return this.storage!.getChecksums(checkpoint, buckets); + const versionedBuckets = buckets.map((bucket) => this.resolveBucketName(bucket)); + const checksums = await this.storage!.getChecksums(checkpoint, versionedBuckets); + + const unversioned: utils.ChecksumMap = new Map(); + for (let i = 0; i < buckets.length; i++) { + unversioned.set(buckets[i], checksums.get(versionedBuckets[i])!); + } + return unversioned; } async getChecksum(bucket: string, options?: { timeout?: number }) { - let checkpoint = await this.getCheckpoint(options); - const map = await this.storage!.getChecksums(checkpoint, [bucket]); - return map.get(bucket); + const checksums = await this.getChecksums([bucket], options); + return checksums.get(bucket); } } diff --git a/modules/module-mongodb/test/src/chunked_snapshot.test.ts b/modules/module-mongodb/test/src/chunked_snapshot.test.ts index c7c3d653d..145c54f5e 100644 --- a/modules/module-mongodb/test/src/chunked_snapshot.test.ts +++ b/modules/module-mongodb/test/src/chunked_snapshot.test.ts @@ -1,18 +1,22 @@ import { mongo } from '@powersync/lib-service-mongodb'; -import { reduceBucket, TestStorageFactory } from '@powersync/service-core'; +import { reduceBucket } from '@powersync/service-core'; import { METRICS_HELPER } from '@powersync/service-core-tests'; import { JSONBig } from '@powersync/service-jsonbig'; import { SqliteJsonValue } from '@powersync/service-sync-rules'; import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; import { ChangeStreamTestContext } from './change_stream_utils.js'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; describe('chunked snapshots', () => { describeWithStorage({ timeout: 120_000 }, defineBatchTests); }); -function defineBatchTests(factory: TestStorageFactory) { +function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return ChangeStreamTestContext.open(factory, { ...options, storageVersion }); + }; + // This is not as sensitive to the id type as postgres, but we still test a couple of cases test('chunked snapshot (int32)', async () => { await testChunkedSnapshot({ @@ -93,7 +97,7 @@ function defineBatchTests(factory: TestStorageFactory) { const idToSqlite = options.idToSqlite ?? ((n) => n); const idToString = (id: any) => String(idToSqlite(id)); - await using context = await ChangeStreamTestContext.open(factory, { + await using context = await openContext({ // We need to use a smaller chunk size here, so that we can run a query in between chunks streamOptions: { snapshotChunkLength: 100 } }); diff --git a/modules/module-mongodb/test/src/resume.test.ts b/modules/module-mongodb/test/src/resume.test.ts index 68e9b2540..c3133d523 100644 --- a/modules/module-mongodb/test/src/resume.test.ts +++ b/modules/module-mongodb/test/src/resume.test.ts @@ -1,19 +1,22 @@ import { ChangeStreamInvalidatedError } from '@module/replication/ChangeStream.js'; import { MongoManager } from '@module/replication/MongoManager.js'; import { normalizeConnectionConfig } from '@module/types/types.js'; -import { BucketStorageFactory, TestStorageOptions } from '@powersync/service-core'; import { describe, expect, test } from 'vitest'; import { ChangeStreamTestContext } from './change_stream_utils.js'; import { env } from './env.js'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; describe('mongodb resuming replication', () => { describeWithStorage({}, defineResumeTest); }); -function defineResumeTest(factoryGenerator: (options?: TestStorageOptions) => Promise) { +function defineResumeTest({ factory: factoryGenerator, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return ChangeStreamTestContext.open(factoryGenerator, { ...options, storageVersion }); + }; + test('resuming with a different source database', async () => { - await using context = await ChangeStreamTestContext.open(factoryGenerator); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(/* yaml */ @@ -53,7 +56,7 @@ function defineResumeTest(factoryGenerator: (options?: TestStorageOptions) => Pr const factory = await factoryGenerator({ doNotClear: true }); // Create a new context without updating the sync rules - await using context2 = new ChangeStreamTestContext(factory, connectionManager); + await using context2 = new ChangeStreamTestContext(factory, connectionManager, {}, storageVersion); const activeContent = await factory.getActiveSyncRulesContent(); context2.storage = factory.getInstance(activeContent!); diff --git a/modules/module-mongodb/test/src/resuming_snapshots.test.ts b/modules/module-mongodb/test/src/resuming_snapshots.test.ts index ff06f6d3f..81737e63a 100644 --- a/modules/module-mongodb/test/src/resuming_snapshots.test.ts +++ b/modules/module-mongodb/test/src/resuming_snapshots.test.ts @@ -8,19 +8,19 @@ import { env } from './env.js'; import { describeWithStorage } from './util.js'; describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () { - describeWithStorage({ timeout: 240_000 }, function (factory) { + describeWithStorage({ timeout: 240_000 }, function ({ factory, storageVersion }) { test('resuming initial replication (1)', async () => { // Stop early - likely to not include deleted row in first replication attempt. - await testResumingReplication(factory, 2000); + await testResumingReplication(factory, storageVersion, 2000); }); test('resuming initial replication (2)', async () => { // Stop late - likely to include deleted row in first replication attempt. - await testResumingReplication(factory, 8000); + await testResumingReplication(factory, storageVersion, 8000); }); }); }); -async function testResumingReplication(factory: TestStorageFactory, stopAfter: number) { +async function testResumingReplication(factory: TestStorageFactory, storageVersion: number, stopAfter: number) { // This tests interrupting and then resuming initial replication. // We interrupt replication after test_data1 has fully replicated, and // test_data2 has partially replicated. @@ -35,7 +35,10 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n let startRowCount: number; { - await using context = await ChangeStreamTestContext.open(factory, { streamOptions: { snapshotChunkLength: 1000 } }); + await using context = await ChangeStreamTestContext.open(factory, { + storageVersion, + streamOptions: { snapshotChunkLength: 1000 } + }); await context.updateSyncRules(`bucket_definitions: global: @@ -87,6 +90,7 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n // Bypass the usual "clear db on factory open" step. await using context2 = await ChangeStreamTestContext.open(factory, { doNotClear: true, + storageVersion, streamOptions: { snapshotChunkLength: 1000 } }); diff --git a/modules/module-mongodb/test/src/slow_tests.test.ts b/modules/module-mongodb/test/src/slow_tests.test.ts index 9e21aaf47..abad39ec0 100644 --- a/modules/module-mongodb/test/src/slow_tests.test.ts +++ b/modules/module-mongodb/test/src/slow_tests.test.ts @@ -2,19 +2,21 @@ import { setTimeout } from 'node:timers/promises'; import { describe, expect, test } from 'vitest'; import { mongo } from '@powersync/lib-service-mongodb'; -import { storage } from '@powersync/service-core'; - import { ChangeStreamTestContext, setSnapshotHistorySeconds } from './change_stream_utils.js'; import { env } from './env.js'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; describe.runIf(env.CI || env.SLOW_TESTS)('change stream slow tests', { timeout: 60_000 }, function () { describeWithStorage({}, defineSlowTests); }); -function defineSlowTests(factory: storage.TestStorageFactory) { +function defineSlowTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return ChangeStreamTestContext.open(factory, { ...options, storageVersion }); + }; + test('replicating snapshot with lots of data', async () => { - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); // Test with low minSnapshotHistoryWindowInSeconds, to trigger: // > Read timestamp .. is older than the oldest available timestamp. // This happened when we had {snapshot: true} in the initial @@ -52,7 +54,7 @@ bucket_definitions: // changestream), we may miss updates, which this test would // hopefully catch. - await using context = await ChangeStreamTestContext.open(factory); + await using context = await openContext(); const { db } = context; await context.updateSyncRules(` bucket_definitions: diff --git a/modules/module-mongodb/test/src/util.ts b/modules/module-mongodb/test/src/util.ts index cda52142e..15cf8b495 100644 --- a/modules/module-mongodb/test/src/util.ts +++ b/modules/module-mongodb/test/src/util.ts @@ -3,9 +3,14 @@ import * as mongo_storage from '@powersync/service-module-mongodb-storage'; import * as postgres_storage from '@powersync/service-module-postgres-storage'; import * as types from '@module/types/types.js'; -import { env } from './env.js'; -import { BSON_DESERIALIZE_DATA_OPTIONS, TestStorageFactory } from '@powersync/service-core'; +import { + BSON_DESERIALIZE_DATA_OPTIONS, + CURRENT_STORAGE_VERSION, + LEGACY_STORAGE_VERSION, + TestStorageFactory +} from '@powersync/service-core'; import { describe, TestOptions } from 'vitest'; +import { env } from './env.js'; export const TEST_URI = env.MONGO_TEST_DATA_URL; @@ -23,14 +28,34 @@ export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.test_utils. url: env.PG_STORAGE_TEST_URL }); -export function describeWithStorage(options: TestOptions, fn: (factory: TestStorageFactory) => void) { - describe.skipIf(!env.TEST_MONGO_STORAGE)(`mongodb storage`, options, function () { - fn(INITIALIZED_MONGO_STORAGE_FACTORY); - }); +const TEST_STORAGE_VERSIONS = [LEGACY_STORAGE_VERSION, CURRENT_STORAGE_VERSION]; - describe.skipIf(!env.TEST_POSTGRES_STORAGE)(`postgres storage`, options, function () { - fn(INITIALIZED_POSTGRES_STORAGE_FACTORY); - }); +export interface StorageVersionTestContext { + factory: TestStorageFactory; + storageVersion: number; +} + +export function describeWithStorage(options: TestOptions, fn: (context: StorageVersionTestContext) => void) { + const describeFactory = (storageName: string, factory: TestStorageFactory) => { + describe(`${storageName} storage`, options, function () { + for (const storageVersion of TEST_STORAGE_VERSIONS) { + describe(`storage v${storageVersion}`, function () { + fn({ + factory, + storageVersion + }); + }); + } + }); + }; + + if (env.TEST_MONGO_STORAGE) { + describeFactory('mongodb', INITIALIZED_MONGO_STORAGE_FACTORY); + } + + if (env.TEST_POSTGRES_STORAGE) { + describeFactory('postgres', INITIALIZED_POSTGRES_STORAGE_FACTORY); + } } export async function clearTestDb(db: mongo.Db) { diff --git a/modules/module-mssql/test/src/CDCStreamTestContext.ts b/modules/module-mssql/test/src/CDCStreamTestContext.ts index 6b674befc..6f7ed523c 100644 --- a/modules/module-mssql/test/src/CDCStreamTestContext.ts +++ b/modules/module-mssql/test/src/CDCStreamTestContext.ts @@ -3,6 +3,7 @@ import { createCoreReplicationMetrics, initializeCoreReplicationMetrics, InternalOpId, + LEGACY_STORAGE_VERSION, OplogEntry, storage, SyncRulesBucketStorage @@ -72,7 +73,11 @@ export class CDCStreamTestContext implements AsyncDisposable { } async updateSyncRules(content: string) { - const syncRules = await this.factory.updateSyncRules({ content: content, validate: true }); + const syncRules = await this.factory.updateSyncRules({ + content: content, + validate: true, + storageVersion: LEGACY_STORAGE_VERSION + }); this.storage = this.factory.getInstance(syncRules); return this.storage!; } diff --git a/modules/module-mysql/test/src/BinlogStreamUtils.ts b/modules/module-mysql/test/src/BinlogStreamUtils.ts index 665be6c21..8633fe5f1 100644 --- a/modules/module-mysql/test/src/BinlogStreamUtils.ts +++ b/modules/module-mysql/test/src/BinlogStreamUtils.ts @@ -7,6 +7,7 @@ import { createCoreReplicationMetrics, initializeCoreReplicationMetrics, InternalOpId, + LEGACY_STORAGE_VERSION, OplogEntry, ProtocolOpId, ReplicationCheckpoint, @@ -68,7 +69,11 @@ export class BinlogStreamTestContext { } async updateSyncRules(content: string): Promise { - const syncRules = await this.factory.updateSyncRules({ content: content, validate: true }); + const syncRules = await this.factory.updateSyncRules({ + content: content, + validate: true, + storageVersion: LEGACY_STORAGE_VERSION + }); this.storage = this.factory.getInstance(syncRules); return this.storage!; } diff --git a/modules/module-postgres-storage/src/migrations/scripts/1771232439485-storage-version.ts b/modules/module-postgres-storage/src/migrations/scripts/1771232439485-storage-version.ts new file mode 100644 index 000000000..2b6f30003 --- /dev/null +++ b/modules/module-postgres-storage/src/migrations/scripts/1771232439485-storage-version.ts @@ -0,0 +1,44 @@ +import { migrations, storage } from '@powersync/service-core'; +import { openMigrationDB } from '../migration-utils.js'; + +export const up: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + await using client = openMigrationDB(configuration.storage); + await client.transaction(async (db) => { + await db.sql` + ALTER TABLE sync_rules + ADD COLUMN storage_version integer NOT NULL DEFAULT 1 + `.execute(); + }); +}; + +export const down: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + await using client = openMigrationDB(configuration.storage); + await client.transaction(async (db) => { + const newRules = await db.sql` + SELECT + id, + storage_version + FROM + sync_rules + WHERE + storage_version > ${{ type: 'int4', value: storage.LEGACY_STORAGE_VERSION }} + `.rows<{ id: number | bigint; storage_version: number | bigint }>(); + + if (newRules.length > 0) { + throw new Error( + `Cannot revert migration due to newer storage versions in use: ${newRules.map((r) => `${r.id}: v${r.storage_version}`).join(', ')}` + ); + } + + await db.sql` + ALTER TABLE sync_rules + DROP COLUMN storage_version + `.execute(); + }); +}; diff --git a/modules/module-postgres-storage/src/storage/PostgresBucketStorageFactory.ts b/modules/module-postgres-storage/src/storage/PostgresBucketStorageFactory.ts index 85fbe4dad..c1d3d4611 100644 --- a/modules/module-postgres-storage/src/storage/PostgresBucketStorageFactory.ts +++ b/modules/module-postgres-storage/src/storage/PostgresBucketStorageFactory.ts @@ -181,6 +181,7 @@ export class PostgresBucketStorageFactory // Apply unconditionally. Any errors will be reported via the diagnostics API. } + const storageVersion = options.storageVersion ?? storage.CURRENT_STORAGE_VERSION; return this.db.transaction(async (db) => { await db.sql` UPDATE sync_rules @@ -197,7 +198,7 @@ export class PostgresBucketStorageFactory nextval('sync_rules_id_sequence') AS id ) INSERT INTO - sync_rules (id, content, state, slot_name) + sync_rules (id, content, state, slot_name, storage_version) VALUES ( ( @@ -218,7 +219,8 @@ export class PostgresBucketStorageFactory ), '_', ${{ type: 'varchar', value: crypto.randomBytes(2).toString('hex') }} - ) + ), + ${{ type: 'int4', value: storageVersion }} ) RETURNING * diff --git a/modules/module-postgres-storage/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts b/modules/module-postgres-storage/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts index d7e98e970..39854e618 100644 --- a/modules/module-postgres-storage/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +++ b/modules/module-postgres-storage/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts @@ -1,8 +1,13 @@ import * as lib_postgres from '@powersync/lib-service-postgres'; import { ErrorCode, logger, ServiceError } from '@powersync/lib-services-framework'; import { storage } from '@powersync/service-core'; -import { SqlSyncRules, versionedHydrationState } from '@powersync/service-sync-rules'; - +import { + CompatibilityOption, + DEFAULT_HYDRATION_STATE, + HydrationState, + SqlSyncRules, + versionedHydrationState +} from '@powersync/service-sync-rules'; import { models } from '../../types/types.js'; export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncRulesContent { @@ -15,6 +20,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR public readonly last_keepalive_ts: Date | null; public readonly last_checkpoint_ts: Date | null; public readonly active: boolean; + public readonly storageVersion: number; current_lock: storage.ReplicationLock | null = null; constructor( @@ -29,16 +35,44 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR this.last_checkpoint_ts = row.last_checkpoint_ts ? new Date(row.last_checkpoint_ts) : null; this.last_keepalive_ts = row.last_keepalive_ts ? new Date(row.last_keepalive_ts) : null; this.active = row.state == 'ACTIVE'; + this.storageVersion = row.storage_version ?? storage.LEGACY_STORAGE_VERSION; + } + + /** + * Load the storage config. + * + * This may throw if the persisted storage version is not supported. + */ + getStorageConfig() { + const storageConfig = storage.STORAGE_VERSION_CONFIG[this.storageVersion]; + if (storageConfig == null) { + throw new ServiceError( + ErrorCode.PSYNC_S1005, + `Unsupported storage version ${this.storageVersion} for sync rules ${this.id}` + ); + } + return storageConfig; } parsed(options: storage.ParseSyncRulesOptions): storage.PersistedSyncRules { + let hydrationState: HydrationState; + const syncRules = SqlSyncRules.fromYaml(this.sync_rules_content, options); + const storageConfig = this.getStorageConfig(); + if ( + storageConfig.versionedBuckets || + syncRules.config.compatibility.isEnabled(CompatibilityOption.versionedBucketIds) + ) { + hydrationState = versionedHydrationState(this.id); + } else { + hydrationState = DEFAULT_HYDRATION_STATE; + } return { id: this.id, slot_name: this.slot_name, - sync_rules: SqlSyncRules.fromYaml(this.sync_rules_content, options), + sync_rules: syncRules, hydratedSyncRules() { return this.sync_rules.config.hydrate({ - hydrationState: versionedHydrationState(this.id) + hydrationState }); } }; diff --git a/modules/module-postgres-storage/src/types/models/SyncRules.ts b/modules/module-postgres-storage/src/types/models/SyncRules.ts index 8a6ca1a35..8f94e45e9 100644 --- a/modules/module-postgres-storage/src/types/models/SyncRules.ts +++ b/modules/module-postgres-storage/src/types/models/SyncRules.ts @@ -47,6 +47,7 @@ export const SyncRules = t.object({ */ last_fatal_error: t.Null.or(t.string), keepalive_op: t.Null.or(bigint), + storage_version: t.Null.or(pgwire_number).optional(), content: t.string }); diff --git a/modules/module-postgres-storage/src/utils/db.ts b/modules/module-postgres-storage/src/utils/db.ts index 500cb3aa8..b3a529164 100644 --- a/modules/module-postgres-storage/src/utils/db.ts +++ b/modules/module-postgres-storage/src/utils/db.ts @@ -9,6 +9,9 @@ export const NOTIFICATION_CHANNEL = 'powersynccheckpoints'; */ export const sql = lib_postgres.sql; +/** + * Drop all Postgres storage tables used by the service, including migrations. + */ export const dropTables = async (client: lib_postgres.DatabaseClient) => { // Lock a connection for automatic schema search paths await client.lockConnection(async (db) => { @@ -23,5 +26,39 @@ export const dropTables = async (client: lib_postgres.DatabaseClient) => { await db.sql`DROP TABLE IF EXISTS custom_write_checkpoints`.execute(); await db.sql`DROP SEQUENCE IF EXISTS op_id_sequence`.execute(); await db.sql`DROP SEQUENCE IF EXISTS sync_rules_id_sequence`.execute(); + await db.sql`DROP TABLE IF EXISTS migrations`.execute(); }); }; + +/** + * Clear all Postgres storage tables and reset sequences. + * + * Does not clear migration state. + */ +export const truncateTables = async (db: lib_postgres.DatabaseClient) => { + // Lock a connection for automatic schema search paths + await db.query( + { + statement: `TRUNCATE TABLE bucket_data, + bucket_parameters, + sync_rules, + instance, + current_data, + source_tables, + write_checkpoints, + custom_write_checkpoints, + connection_report_events RESTART IDENTITY CASCADE + ` + }, + { + statement: `ALTER SEQUENCE IF EXISTS op_id_sequence RESTART + WITH + 1` + }, + { + statement: `ALTER SEQUENCE IF EXISTS sync_rules_id_sequence RESTART + WITH + 1` + } + ); +}; diff --git a/modules/module-postgres-storage/src/utils/test-utils.ts b/modules/module-postgres-storage/src/utils/test-utils.ts index c0ba7c2b0..3f01716fc 100644 --- a/modules/module-postgres-storage/src/utils/test-utils.ts +++ b/modules/module-postgres-storage/src/utils/test-utils.ts @@ -3,6 +3,7 @@ import { PostgresMigrationAgent } from '../migrations/PostgresMigrationAgent.js' import { normalizePostgresStorageConfig, PostgresStorageConfigDecoded } from '../types/types.js'; import { PostgresReportStorage } from '../storage/PostgresReportStorage.js'; import { PostgresBucketStorageFactory } from '../storage/PostgresBucketStorageFactory.js'; +import { truncateTables } from './db.js'; export type PostgresTestStorageOptions = { url: string; @@ -22,7 +23,7 @@ export function postgresTestSetup(factoryOptions: PostgresTestStorageOptions) { const TEST_CONNECTION_OPTIONS = normalizePostgresStorageConfig(BASE_CONFIG); - const migrate = async (direction: framework.migrations.Direction) => { + const runMigrations = async (options: { down: boolean; up: boolean }) => { await using migrationManager: PowerSyncMigrationManager = new framework.MigrationManager(); await using migrationAgent = factoryOptions.migrationAgent ? factoryOptions.migrationAgent(BASE_CONFIG) @@ -31,14 +32,16 @@ export function postgresTestSetup(factoryOptions: PostgresTestStorageOptions) { const mockServiceContext = { configuration: { storage: BASE_CONFIG } } as unknown as ServiceContext; - await migrationManager.migrate({ - direction: framework.migrations.Direction.Down, - migrationContext: { - service_context: mockServiceContext - } - }); + if (options.down) { + await migrationManager.migrate({ + direction: framework.migrations.Direction.Down, + migrationContext: { + service_context: mockServiceContext + } + }); + } - if (direction == framework.migrations.Direction.Up) { + if (options.up) { await migrationManager.migrate({ direction: framework.migrations.Direction.Up, migrationContext: { @@ -48,11 +51,28 @@ export function postgresTestSetup(factoryOptions: PostgresTestStorageOptions) { } }; + const migrate = async (direction: framework.migrations.Direction) => { + await runMigrations({ + down: true, + up: direction == framework.migrations.Direction.Up + }); + }; + + const clearStorage = async () => { + await runMigrations({ down: false, up: true }); + + await using storageFactory = new PostgresBucketStorageFactory({ + config: TEST_CONNECTION_OPTIONS, + slot_name_prefix: 'test_' + }); + await truncateTables(storageFactory.db); + }; + return { reportFactory: async (options?: TestStorageOptions) => { try { if (!options?.doNotClear) { - await migrate(framework.migrations.Direction.Up); + await clearStorage(); } return new PostgresReportStorage({ @@ -67,7 +87,7 @@ export function postgresTestSetup(factoryOptions: PostgresTestStorageOptions) { factory: async (options?: TestStorageOptions) => { try { if (!options?.doNotClear) { - await migrate(framework.migrations.Direction.Up); + await clearStorage(); } return new PostgresBucketStorageFactory({ diff --git a/modules/module-postgres-storage/test/src/__snapshots__/storage.test.ts.snap b/modules/module-postgres-storage/test/src/__snapshots__/storage.test.ts.snap deleted file mode 100644 index d1b24f45b..000000000 --- a/modules/module-postgres-storage/test/src/__snapshots__/storage.test.ts.snap +++ /dev/null @@ -1,9 +0,0 @@ -// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html - -exports[`Postgres Sync Bucket Storage - Data > empty storage metrics 1`] = ` -{ - "operations_size_bytes": 16384, - "parameters_size_bytes": 32768, - "replication_size_bytes": 16384, -} -`; diff --git a/modules/module-postgres-storage/test/src/__snapshots__/storage_sync.test.ts.snap b/modules/module-postgres-storage/test/src/__snapshots__/storage_sync.test.ts.snap index 08a73c8c1..39fe6c1bb 100644 --- a/modules/module-postgres-storage/test/src/__snapshots__/storage_sync.test.ts.snap +++ b/modules/module-postgres-storage/test/src/__snapshots__/storage_sync.test.ts.snap @@ -1,6 +1,6 @@ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html -exports[`sync - postgres > compacting data - invalidate checkpoint 1`] = ` +exports[`sync - postgres > storage v1 > compacting data - invalidate checkpoint 1`] = ` [ { "checkpoint": { @@ -31,7 +31,7 @@ exports[`sync - postgres > compacting data - invalidate checkpoint 1`] = ` ] `; -exports[`sync - postgres > compacting data - invalidate checkpoint 2`] = ` +exports[`sync - postgres > storage v1 > compacting data - invalidate checkpoint 2`] = ` [ { "data": { @@ -104,7 +104,7 @@ exports[`sync - postgres > compacting data - invalidate checkpoint 2`] = ` ] `; -exports[`sync - postgres > encodes sync rules id in buckes for streams 1`] = ` +exports[`sync - postgres > storage v1 > encodes sync rules id in buckes for streams 1`] = ` [ { "checkpoint": { @@ -159,7 +159,7 @@ exports[`sync - postgres > encodes sync rules id in buckes for streams 1`] = ` ] `; -exports[`sync - postgres > encodes sync rules id in buckes for streams 2`] = ` +exports[`sync - postgres > storage v1 > encodes sync rules id in buckes for streams 2`] = ` [ { "checkpoint": { @@ -214,7 +214,7 @@ exports[`sync - postgres > encodes sync rules id in buckes for streams 2`] = ` ] `; -exports[`sync - postgres > expired token 1`] = ` +exports[`sync - postgres > storage v1 > expired token 1`] = ` [ { "token_expires_in": 0, @@ -222,7 +222,7 @@ exports[`sync - postgres > expired token 1`] = ` ] `; -exports[`sync - postgres > expiring token 1`] = ` +exports[`sync - postgres > storage v1 > expiring token 1`] = ` [ { "checkpoint": { @@ -258,7 +258,7 @@ exports[`sync - postgres > expiring token 1`] = ` ] `; -exports[`sync - postgres > expiring token 2`] = ` +exports[`sync - postgres > storage v1 > expiring token 2`] = ` [ { "token_expires_in": 0, @@ -266,7 +266,7 @@ exports[`sync - postgres > expiring token 2`] = ` ] `; -exports[`sync - postgres > sends checkpoint complete line for empty checkpoint 1`] = ` +exports[`sync - postgres > storage v1 > sends checkpoint complete line for empty checkpoint 1`] = ` [ { "checkpoint": { @@ -335,7 +335,7 @@ exports[`sync - postgres > sends checkpoint complete line for empty checkpoint 1 ] `; -exports[`sync - postgres > sync buckets in order 1`] = ` +exports[`sync - postgres > storage v1 > sync buckets in order 1`] = ` [ { "checkpoint": { @@ -431,7 +431,7 @@ exports[`sync - postgres > sync buckets in order 1`] = ` ] `; -exports[`sync - postgres > sync global data 1`] = ` +exports[`sync - postgres > storage v1 > sync global data 1`] = ` [ { "checkpoint": { @@ -495,7 +495,7 @@ exports[`sync - postgres > sync global data 1`] = ` ] `; -exports[`sync - postgres > sync interrupts low-priority buckets on new checkpoints (2) 1`] = ` +exports[`sync - postgres > storage v1 > sync interrupts low-priority buckets on new checkpoints (2) 1`] = ` [ { "checkpoint": { @@ -689,7 +689,7 @@ exports[`sync - postgres > sync interrupts low-priority buckets on new checkpoin ] `; -exports[`sync - postgres > sync legacy non-raw data 1`] = ` +exports[`sync - postgres > storage v1 > sync legacy non-raw data 1`] = ` [ { "checkpoint": { @@ -749,7 +749,7 @@ exports[`sync - postgres > sync legacy non-raw data 1`] = ` ] `; -exports[`sync - postgres > sync updates to data query only 1`] = ` +exports[`sync - postgres > storage v1 > sync updates to data query only 1`] = ` [ { "checkpoint": { @@ -785,7 +785,7 @@ exports[`sync - postgres > sync updates to data query only 1`] = ` ] `; -exports[`sync - postgres > sync updates to data query only 2`] = ` +exports[`sync - postgres > storage v1 > sync updates to data query only 2`] = ` [ { "checkpoint_diff": { @@ -834,7 +834,7 @@ exports[`sync - postgres > sync updates to data query only 2`] = ` ] `; -exports[`sync - postgres > sync updates to global data 1`] = ` +exports[`sync - postgres > storage v1 > sync updates to global data 1`] = ` [ { "checkpoint": { @@ -870,7 +870,7 @@ exports[`sync - postgres > sync updates to global data 1`] = ` ] `; -exports[`sync - postgres > sync updates to global data 2`] = ` +exports[`sync - postgres > storage v1 > sync updates to global data 2`] = ` [ { "checkpoint_diff": { @@ -919,7 +919,7 @@ exports[`sync - postgres > sync updates to global data 2`] = ` ] `; -exports[`sync - postgres > sync updates to global data 3`] = ` +exports[`sync - postgres > storage v1 > sync updates to global data 3`] = ` [ { "checkpoint_diff": { @@ -968,7 +968,7 @@ exports[`sync - postgres > sync updates to global data 3`] = ` ] `; -exports[`sync - postgres > sync updates to parameter query + data 1`] = ` +exports[`sync - postgres > storage v1 > sync updates to parameter query + data 1`] = ` [ { "checkpoint": { @@ -992,7 +992,7 @@ exports[`sync - postgres > sync updates to parameter query + data 1`] = ` ] `; -exports[`sync - postgres > sync updates to parameter query + data 2`] = ` +exports[`sync - postgres > storage v1 > sync updates to parameter query + data 2`] = ` [ { "checkpoint_diff": { @@ -1041,7 +1041,7 @@ exports[`sync - postgres > sync updates to parameter query + data 2`] = ` ] `; -exports[`sync - postgres > sync updates to parameter query only 1`] = ` +exports[`sync - postgres > storage v1 > sync updates to parameter query only 1`] = ` [ { "checkpoint": { @@ -1065,7 +1065,7 @@ exports[`sync - postgres > sync updates to parameter query only 1`] = ` ] `; -exports[`sync - postgres > sync updates to parameter query only 2`] = ` +exports[`sync - postgres > storage v1 > sync updates to parameter query only 2`] = ` [ { "checkpoint_diff": { @@ -1094,3 +1094,1098 @@ exports[`sync - postgres > sync updates to parameter query only 2`] = ` }, ] `; + +exports[`sync - postgres > storage v2 > compacting data - invalidate checkpoint 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -93886621, + "count": 2, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > compacting data - invalidate checkpoint 2`] = ` +[ + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": -93886621, + "op": "CLEAR", + "op_id": "2", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_diff": { + "last_op_id": "4", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 499012468, + "count": 4, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "2", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 1859363232, + "data": "{"id":"t1","description":"Test 1b"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "3", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + { + "checksum": 3028503153, + "data": "{"id":"t2","description":"Test 2b"}", + "object_id": "t2", + "object_type": "test", + "op": "PUT", + "op_id": "4", + "subkey": "a17e6883-d5d2-599d-a805-d60528127dbd", + }, + ], + "has_more": false, + "next_after": "4", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "4", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > encodes sync rules id in buckes for streams 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#test|0[]", + "checksum": 920318466, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "test", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#test|0[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > encodes sync rules id in buckes for streams 2`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "2#test|0[]", + "checksum": 920318466, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "test", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "2#test|0[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > expired token 1`] = ` +[ + { + "token_expires_in": 0, + }, +] +`; + +exports[`sync - postgres > storage v2 > expiring token 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > expiring token 2`] = ` +[ + { + "token_expires_in": 0, + }, +] +`; + +exports[`sync - postgres > storage v2 > sends checkpoint complete line for empty checkpoint 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -1221282404, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 3073684892, + "data": "{"id":"t1","description":"sync"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + null, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, + { + "checkpoint_diff": { + "last_op_id": "1", + "removed_buckets": [], + "updated_buckets": [], + "write_checkpoint": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync buckets in order 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#b0[]", + "checksum": 920318466, + "count": 1, + "priority": 2, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + { + "bucket": "1#b1[]", + "checksum": -1382098757, + "count": 1, + "priority": 1, + "subscriptions": [ + { + "default": 1, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "b0", + }, + { + "errors": [], + "is_default": true, + "name": "b1", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b1[]", + "data": [ + { + "checksum": 2912868539, + "data": "{"id":"earlier","description":"Test 2"}", + "object_id": "earlier", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "243b0e26-87b2-578a-993c-5ac5b6f7fd64", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "partial_checkpoint_complete": { + "last_op_id": "2", + "priority": 1, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b0[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync global data 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -93886621, + "count": 2, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "2", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + { + "checksum": 3280762209, + "data": "{"id":"t2","description":"Test 2"}", + "object_id": "t2", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "a17e6883-d5d2-599d-a805-d60528127dbd", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync interrupts low-priority buckets on new checkpoints (2) 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#b0a[]", + "checksum": -659831575, + "count": 2000, + "priority": 2, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + { + "bucket": "1#b0b[]", + "checksum": -659831575, + "count": 2000, + "priority": 2, + "subscriptions": [ + { + "default": 1, + }, + ], + }, + { + "bucket": "1#b1[]", + "checksum": -1096116670, + "count": 1, + "priority": 1, + "subscriptions": [ + { + "default": 2, + }, + ], + }, + ], + "last_op_id": "4001", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "b0a", + }, + { + "errors": [], + "is_default": true, + "name": "b0b", + }, + { + "errors": [], + "is_default": true, + "name": "b1", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b1[]", + "data": undefined, + "has_more": false, + "next_after": "1", + }, + }, + { + "partial_checkpoint_complete": { + "last_op_id": "4001", + "priority": 1, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b0a[]", + "data": undefined, + "has_more": true, + "next_after": "2000", + }, + }, + { + "data": { + "after": "2000", + "bucket": "1#b0a[]", + "data": undefined, + "has_more": true, + "next_after": "4000", + }, + }, + { + "checkpoint_diff": { + "last_op_id": "4004", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#b0a[]", + "checksum": 883076828, + "count": 2001, + "priority": 2, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + { + "bucket": "1#b0b[]", + "checksum": 883076828, + "count": 2001, + "priority": 2, + "subscriptions": [ + { + "default": 1, + }, + ], + }, + { + "bucket": "1#b1[]", + "checksum": 1841937527, + "count": 2, + "priority": 1, + "subscriptions": [ + { + "default": 2, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "1", + "bucket": "1#b1[]", + "data": undefined, + "has_more": false, + "next_after": "4002", + }, + }, + { + "partial_checkpoint_complete": { + "last_op_id": "4004", + "priority": 1, + }, + }, + { + "data": { + "after": "4000", + "bucket": "1#b0a[]", + "data": undefined, + "has_more": false, + "next_after": "4003", + }, + }, + { + "data": { + "after": "0", + "bucket": "1#b0b[]", + "data": undefined, + "has_more": true, + "next_after": "1999", + }, + }, + { + "data": { + "after": "1999", + "bucket": "1#b0b[]", + "data": undefined, + "has_more": true, + "next_after": "3999", + }, + }, + { + "data": { + "after": "3999", + "bucket": "1#b0b[]", + "data": undefined, + "has_more": false, + "next_after": "4004", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "4004", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync legacy non-raw data 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -852817836, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 3442149460n, + "data": { + "description": "Test +"string"", + "id": "t1", + "large_num": 12345678901234567890n, + }, + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to data query only 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "1", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "by_user", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to data query only 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "2", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 1418351250, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#by_user["user1"]", + "data": [ + { + "checksum": 1418351250, + "data": "{"id":"list1","user_id":"user1","name":"User 1"}", + "object_id": "list1", + "object_type": "lists", + "op": "PUT", + "op_id": "2", + "subkey": "5ad0aa14-3d5e-5428-ad5b-2c33927d991c", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to global data 1`] = ` +[ + { + "checkpoint": { + "buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "mybucket", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to global data 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "1", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": 920318466, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 920318466, + "data": "{"id":"t1","description":"Test 1"}", + "object_id": "t1", + "object_type": "test", + "op": "PUT", + "op_id": "1", + "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to global data 3`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "2", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#mybucket[]", + "checksum": -93886621, + "count": 2, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "1", + "bucket": "1#mybucket[]", + "data": [ + { + "checksum": 3280762209, + "data": "{"id":"t2","description":"Test 2"}", + "object_id": "t2", + "object_type": "test", + "op": "PUT", + "op_id": "2", + "subkey": "a17e6883-d5d2-599d-a805-d60528127dbd", + }, + ], + "has_more": false, + "next_after": "2", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to parameter query + data 1`] = ` +[ + { + "checkpoint": { + "buckets": [], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "by_user", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to parameter query + data 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "2", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 1418351250, + "count": 1, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "data": { + "after": "0", + "bucket": "1#by_user["user1"]", + "data": [ + { + "checksum": 1418351250, + "data": "{"id":"list1","user_id":"user1","name":"User 1"}", + "object_id": "list1", + "object_type": "lists", + "op": "PUT", + "op_id": "1", + "subkey": "5ad0aa14-3d5e-5428-ad5b-2c33927d991c", + }, + ], + "has_more": false, + "next_after": "1", + }, + }, + { + "checkpoint_complete": { + "last_op_id": "2", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to parameter query only 1`] = ` +[ + { + "checkpoint": { + "buckets": [], + "last_op_id": "0", + "streams": [ + { + "errors": [], + "is_default": true, + "name": "by_user", + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "0", + }, + }, +] +`; + +exports[`sync - postgres > storage v2 > sync updates to parameter query only 2`] = ` +[ + { + "checkpoint_diff": { + "last_op_id": "1", + "removed_buckets": [], + "updated_buckets": [ + { + "bucket": "1#by_user["user1"]", + "checksum": 0, + "count": 0, + "priority": 3, + "subscriptions": [ + { + "default": 0, + }, + ], + }, + ], + "write_checkpoint": undefined, + }, + }, + { + "checkpoint_complete": { + "last_op_id": "1", + }, + }, +] +`; diff --git a/modules/module-postgres-storage/test/src/migrations.test.ts b/modules/module-postgres-storage/test/src/migrations.test.ts index 1f2e12a64..58386736d 100644 --- a/modules/module-postgres-storage/test/src/migrations.test.ts +++ b/modules/module-postgres-storage/test/src/migrations.test.ts @@ -2,9 +2,10 @@ import { beforeEach, describe, expect, it } from 'vitest'; import { Direction } from '@powersync/lib-services-framework'; import { register } from '@powersync/service-core-tests'; +import { dropTables, PostgresBucketStorageFactory } from '../../src/index.js'; import { PostgresMigrationAgent } from '../../src/migrations/PostgresMigrationAgent.js'; import { env } from './env.js'; -import { POSTGRES_STORAGE_FACTORY, POSTGRES_STORAGE_SETUP } from './util.js'; +import { POSTGRES_STORAGE_FACTORY, POSTGRES_STORAGE_SETUP, TEST_CONNECTION_OPTIONS } from './util.js'; const MIGRATION_AGENT_FACTORY = () => { return new PostgresMigrationAgent({ type: 'postgresql', uri: env.PG_STORAGE_TEST_URL, sslmode: 'disable' }); @@ -15,6 +16,12 @@ describe('Migrations', () => { // The migration tests clear the migration store, without running the down migrations. // This ensures all the down migrations have been run before. const setup = POSTGRES_STORAGE_SETUP; + await using factory = new PostgresBucketStorageFactory({ + config: TEST_CONNECTION_OPTIONS, + slot_name_prefix: 'test_' + }); + + await dropTables(factory.db); await setup.migrate(Direction.Down); }); diff --git a/modules/module-postgres-storage/test/src/storage.test.ts b/modules/module-postgres-storage/test/src/storage.test.ts index 9fb1ce197..b8a82f4ce 100644 --- a/modules/module-postgres-storage/test/src/storage.test.ts +++ b/modules/module-postgres-storage/test/src/storage.test.ts @@ -1,5 +1,5 @@ import { storage } from '@powersync/service-core'; -import { register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; +import { bucketRequestMap, register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; import { describe, expect, test } from 'vitest'; import { POSTGRES_STORAGE_FACTORY } from './util.js'; @@ -24,16 +24,16 @@ describe('Postgres Sync Bucket Storage - pg-specific', () => { // Test syncing a batch of data that is small in count, // but large enough in size to be split over multiple returned chunks. // Similar to the above test, but splits over 1MB chunks. - const sync_rules = test_utils.testRules( - ` + await using factory = await POSTGRES_STORAGE_FACTORY(); + const syncRules = await factory.updateSyncRules({ + content: ` bucket_definitions: global: data: - SELECT id, description FROM "%" ` - ); - await using factory = await POSTGRES_STORAGE_FACTORY(); - const bucketStorage = factory.getInstance(sync_rules); + }); + const bucketStorage = factory.getInstance(syncRules); const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; @@ -87,7 +87,7 @@ describe('Postgres Sync Bucket Storage - pg-specific', () => { const options: storage.BucketDataBatchOptions = {}; const batch1 = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]), options) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]), options) ); expect(test_utils.getBatchData(batch1)).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 } @@ -101,7 +101,7 @@ describe('Postgres Sync Bucket Storage - pg-specific', () => { const batch2 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch1[0].chunkData.next_after)]]), + bucketRequestMap(syncRules, [['global[]', BigInt(batch1[0].chunkData.next_after)]]), options ) ); @@ -117,7 +117,7 @@ describe('Postgres Sync Bucket Storage - pg-specific', () => { const batch3 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch2[0].chunkData.next_after)]]), + bucketRequestMap(syncRules, [['global[]', BigInt(batch2[0].chunkData.next_after)]]), options ) ); @@ -133,7 +133,7 @@ describe('Postgres Sync Bucket Storage - pg-specific', () => { const batch4 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch3[0].chunkData.next_after)]]), + bucketRequestMap(syncRules, [['global[]', BigInt(batch3[0].chunkData.next_after)]]), options ) ); diff --git a/modules/module-postgres-storage/test/src/storage_sync.test.ts b/modules/module-postgres-storage/test/src/storage_sync.test.ts index d7aae902a..62eeb479f 100644 --- a/modules/module-postgres-storage/test/src/storage_sync.test.ts +++ b/modules/module-postgres-storage/test/src/storage_sync.test.ts @@ -1,12 +1,152 @@ -import { register } from '@powersync/service-core-tests'; -import { describe } from 'vitest'; -import { POSTGRES_STORAGE_FACTORY } from './util.js'; +import { storage } from '@powersync/service-core'; +import { bucketRequest, register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; +import { describe, expect, test } from 'vitest'; +import { POSTGRES_STORAGE_FACTORY, TEST_STORAGE_VERSIONS } from './util.js'; /** * Bucket compacting is not yet implemented. * This causes the internal compacting test to fail. * Other tests have been verified manually. */ +function registerStorageVersionTests(storageVersion: number) { + describe(`storage v${storageVersion}`, () => { + const storageFactory = POSTGRES_STORAGE_FACTORY; + + register.registerSyncTests(storageFactory, { storageVersion }); + + test('large batch (2)', async () => { + // Test syncing a batch of data that is small in count, + // but large enough in size to be split over multiple returned chunks. + // Similar to the above test, but splits over 1MB chunks. + await using factory = await storageFactory(); + const syncRules = await factory.updateSyncRules({ + content: ` + bucket_definitions: + global: + data: + - SELECT id, description FROM "%" + `, + storageVersion + }); + const bucketStorage = factory.getInstance(syncRules); + const globalBucket = bucketRequest(syncRules, 'global[]'); + + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { + const sourceTable = TEST_TABLE; + + const largeDescription = '0123456789'.repeat(2_000_00); + + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'test1', + description: 'test1' + }, + afterReplicaId: test_utils.rid('test1') + }); + + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'large1', + description: largeDescription + }, + afterReplicaId: test_utils.rid('large1') + }); + + // Large enough to split the returned batch + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'large2', + description: largeDescription + }, + afterReplicaId: test_utils.rid('large2') + }); + + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'test3', + description: 'test3' + }, + afterReplicaId: test_utils.rid('test3') + }); + }); + + const checkpoint = result!.flushed_op; + + const options: storage.BucketDataBatchOptions = {}; + + const batch1 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([[globalBucket, 0n]]), options) + ); + expect(test_utils.getBatchData(batch1)).toEqual([ + { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 } + ]); + expect(test_utils.getBatchMeta(batch1)).toEqual({ + after: '0', + has_more: true, + next_after: '1' + }); + + const batch2 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch( + checkpoint, + new Map([[globalBucket, BigInt(batch1[0].chunkData.next_after)]]), + options + ) + ); + expect(test_utils.getBatchData(batch2)).toEqual([ + { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 } + ]); + expect(test_utils.getBatchMeta(batch2)).toEqual({ + after: '1', + has_more: true, + next_after: '2' + }); + + const batch3 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch( + checkpoint, + new Map([[globalBucket, BigInt(batch2[0].chunkData.next_after)]]), + options + ) + ); + expect(test_utils.getBatchData(batch3)).toEqual([ + { op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 } + ]); + expect(test_utils.getBatchMeta(batch3)).toEqual({ + after: '2', + has_more: true, + next_after: '3' + }); + + const batch4 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch( + checkpoint, + new Map([[globalBucket, BigInt(batch3[0].chunkData.next_after)]]), + options + ) + ); + expect(test_utils.getBatchData(batch4)).toEqual([ + { op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 } + ]); + expect(test_utils.getBatchMeta(batch4)).toEqual({ + after: '3', + has_more: false, + next_after: '4' + }); + }); + }); +} + describe('sync - postgres', () => { - register.registerSyncTests(POSTGRES_STORAGE_FACTORY); + for (const storageVersion of TEST_STORAGE_VERSIONS) { + registerStorageVersionTests(storageVersion); + } }); diff --git a/modules/module-postgres-storage/test/src/util.ts b/modules/module-postgres-storage/test/src/util.ts index d055dc343..44ab64637 100644 --- a/modules/module-postgres-storage/test/src/util.ts +++ b/modules/module-postgres-storage/test/src/util.ts @@ -3,6 +3,7 @@ import { fileURLToPath } from 'url'; import { normalizePostgresStorageConfig, PostgresMigrationAgent } from '../../src/index.js'; import { env } from './env.js'; import { postgresTestSetup } from '../../src/utils/test-utils.js'; +import { CURRENT_STORAGE_VERSION, LEGACY_STORAGE_VERSION } from '@powersync/service-core'; const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); @@ -34,3 +35,5 @@ export const POSTGRES_STORAGE_SETUP = postgresTestSetup({ export const POSTGRES_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP.factory; export const POSTGRES_REPORT_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP.reportFactory; + +export const TEST_STORAGE_VERSIONS = [LEGACY_STORAGE_VERSION, CURRENT_STORAGE_VERSION]; diff --git a/modules/module-postgres/test/src/checkpoints.test.ts b/modules/module-postgres/test/src/checkpoints.test.ts index 6657a12e8..57ce4f070 100644 --- a/modules/module-postgres/test/src/checkpoints.test.ts +++ b/modules/module-postgres/test/src/checkpoints.test.ts @@ -1,7 +1,7 @@ import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js'; -import { checkpointUserId, createWriteCheckpoint, TestStorageFactory } from '@powersync/service-core'; +import { checkpointUserId, createWriteCheckpoint } from '@powersync/service-core'; import { describe, test } from 'vitest'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; import timers from 'node:timers/promises'; @@ -15,9 +15,9 @@ describe('checkpoint tests', () => { describeWithStorage({}, checkpointTests); }); -const checkpointTests = (factory: TestStorageFactory) => { +const checkpointTests = ({ factory, storageVersion }: StorageVersionTestContext) => { test('write checkpoints', { timeout: 50_000 }, async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await WalStreamTestContext.open(factory, { storageVersion }); await context.updateSyncRules(BASIC_SYNC_RULES); const { pool } = context; diff --git a/modules/module-postgres/test/src/chunked_snapshots.test.ts b/modules/module-postgres/test/src/chunked_snapshots.test.ts index dffb8aa1e..2b60cc7af 100644 --- a/modules/module-postgres/test/src/chunked_snapshots.test.ts +++ b/modules/module-postgres/test/src/chunked_snapshots.test.ts @@ -1,17 +1,21 @@ -import { reduceBucket, TestStorageFactory } from '@powersync/service-core'; +import { reduceBucket } from '@powersync/service-core'; import { METRICS_HELPER } from '@powersync/service-core-tests'; import { SqliteJsonValue } from '@powersync/service-sync-rules'; import * as crypto from 'node:crypto'; import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; describe('chunked snapshots', () => { describeWithStorage({ timeout: 120_000 }, defineBatchTests); }); -function defineBatchTests(factory: TestStorageFactory) { +function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return WalStreamTestContext.open(factory, { ...options, storageVersion }); + }; + // We need to test every supported type, since chunking could be quite sensitive to // how each specific type is handled. test('chunked snapshot edge case (int2)', async () => { @@ -89,7 +93,7 @@ function defineBatchTests(factory: TestStorageFactory) { // 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column. // 6. We end up with a row that has a missing TOAST column. - await using context = await WalStreamTestContext.open(factory, { + await using context = await openContext({ // We need to use a smaller chunk size here, so that we can run a query in between chunks walStreamOptions: { snapshotChunkLength: 100 } }); diff --git a/modules/module-postgres/test/src/large_batch.test.ts b/modules/module-postgres/test/src/large_batch.test.ts index 777662f12..90ea9ec93 100644 --- a/modules/module-postgres/test/src/large_batch.test.ts +++ b/modules/module-postgres/test/src/large_batch.test.ts @@ -1,14 +1,11 @@ -import { storage } from '@powersync/service-core'; import { describe, expect, test } from 'vitest'; import { populateData } from '../../dist/utils/populate_test_data.js'; import { env } from './env.js'; -import { describeWithStorage, TEST_CONNECTION_OPTIONS } from './util.js'; +import { describeWithStorage, StorageVersionTestContext, TEST_CONNECTION_OPTIONS } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () { - describeWithStorage({ timeout: 240_000 }, function (factory) { - defineBatchTests(factory); - }); + describeWithStorage({ timeout: 240_000 }, defineBatchTests); }); const BASIC_SYNC_RULES = `bucket_definitions: @@ -16,9 +13,13 @@ const BASIC_SYNC_RULES = `bucket_definitions: data: - SELECT id, description, other FROM "test_data"`; -function defineBatchTests(factory: storage.TestStorageFactory) { +function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return WalStreamTestContext.open(factory, { ...options, storageVersion }); + }; + test('update large record', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // This test generates a large transaction in MongoDB, despite the replicated data // not being that large. // If we don't limit transaction size, we could run into this error: @@ -41,17 +42,16 @@ function defineBatchTests(factory: storage.TestStorageFactory) { context.startStreaming(); - const checkpoint = await context.getCheckpoint({ timeout: 100_000 }); + const checksum = await context.getChecksums(['global[]'], { timeout: 50_000 }); const duration = Date.now() - start; const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024); - const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']); expect(checksum.get('global[]')!.count).toEqual(operation_count); const perSecond = Math.round((operation_count / duration) * 1000); console.log(`${operation_count} ops in ${duration}ms ${perSecond} ops/s. ${used}MB heap`); }); test('initial replication performance', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Manual test to check initial replication performance and memory usage await context.updateSyncRules(BASIC_SYNC_RULES); const { pool } = context; @@ -89,9 +89,8 @@ function defineBatchTests(factory: storage.TestStorageFactory) { await context.replicateSnapshot(); context.startStreaming(); - const checkpoint = await context.getCheckpoint({ timeout: 100_000 }); + const checksum = await context.getChecksums(['global[]'], { timeout: 100_000 }); const duration = Date.now() - start; - const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']); expect(checksum.get('global[]')!.count).toEqual(operation_count); const perSecond = Math.round((operation_count / duration) * 1000); console.log(`${operation_count} ops in ${duration}ms ${perSecond} ops/s.`); @@ -102,7 +101,7 @@ function defineBatchTests(factory: storage.TestStorageFactory) { }); test('large number of operations', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // This just tests performance of a large number of operations inside a transaction. await context.updateSyncRules(BASIC_SYNC_RULES); const { pool } = context; @@ -141,10 +140,9 @@ function defineBatchTests(factory: storage.TestStorageFactory) { context.startStreaming(); - const checkpoint = await context.getCheckpoint({ timeout: 50_000 }); + const checksum = await context.getChecksums(['global[]']); const duration = Date.now() - start; const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024); - const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']); expect(checksum.get('global[]')!.count).toEqual(operationCount); const perSecond = Math.round((operationCount / duration) * 1000); // This number depends on the test machine, so we keep the test significantly @@ -158,10 +156,8 @@ function defineBatchTests(factory: storage.TestStorageFactory) { const truncateStart = Date.now(); await pool.query(`TRUNCATE test_data`); - const checkpoint2 = await context.getCheckpoint({ timeout: 20_000 }); + const checksum2 = await context.getChecksums(['global[]'], { timeout: 20_000 }); const truncateDuration = Date.now() - truncateStart; - - const checksum2 = await context.storage!.getChecksums(checkpoint2, ['global[]']); const truncateCount = checksum2.get('global[]')!.count - checksum.get('global[]')!.count; expect(truncateCount).toEqual(numTransactions * perTransaction); const truncatePerSecond = Math.round((truncateCount / truncateDuration) * 1000); @@ -183,7 +179,7 @@ function defineBatchTests(factory: storage.TestStorageFactory) { // 4. Another document to make sure the internal batching overflows // to a second batch. - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(`bucket_definitions: global: data: @@ -227,9 +223,7 @@ function defineBatchTests(factory: storage.TestStorageFactory) { await context.replicateSnapshot(); context.startStreaming(); - - const checkpoint = await context.getCheckpoint({ timeout: 50_000 }); - const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']); + const checksum = await context.getChecksums(['global[]'], { timeout: 50_000 }); expect(checksum.get('global[]')!.count).toEqual((numDocs + 2) * 4); }); diff --git a/modules/module-postgres/test/src/resuming_snapshots.test.ts b/modules/module-postgres/test/src/resuming_snapshots.test.ts index b06c36714..c2cb61b62 100644 --- a/modules/module-postgres/test/src/resuming_snapshots.test.ts +++ b/modules/module-postgres/test/src/resuming_snapshots.test.ts @@ -1,27 +1,30 @@ import { describe, expect, test } from 'vitest'; import { env } from './env.js'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; -import { TestStorageFactory } from '@powersync/service-core'; import { METRICS_HELPER } from '@powersync/service-core-tests'; import { ReplicationMetric } from '@powersync/service-types'; import * as timers from 'node:timers/promises'; import { ReplicationAbortedError } from '@powersync/lib-services-framework'; describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () { - describeWithStorage({ timeout: 240_000 }, function (factory) { + describeWithStorage({ timeout: 240_000 }, function ({ factory, storageVersion }) { test('resuming initial replication (1)', async () => { // Stop early - likely to not include deleted row in first replication attempt. - await testResumingReplication(factory, 2000); + await testResumingReplication(factory, storageVersion, 2000); }); test('resuming initial replication (2)', async () => { // Stop late - likely to include deleted row in first replication attempt. - await testResumingReplication(factory, 8000); + await testResumingReplication(factory, storageVersion, 8000); }); }); }); -async function testResumingReplication(factory: TestStorageFactory, stopAfter: number) { +async function testResumingReplication( + factory: StorageVersionTestContext['factory'], + storageVersion: number, + stopAfter: number +) { // This tests interrupting and then resuming initial replication. // We interrupt replication after test_data1 has fully replicated, and // test_data2 has partially replicated. @@ -33,7 +36,10 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n // have been / have not been replicated at that point is not deterministic. // We do allow for some variation in the test results to account for this. - await using context = await WalStreamTestContext.open(factory, { walStreamOptions: { snapshotChunkLength: 1000 } }); + await using context = await WalStreamTestContext.open(factory, { + storageVersion, + walStreamOptions: { snapshotChunkLength: 1000 } + }); await context.updateSyncRules(`bucket_definitions: global: @@ -84,6 +90,7 @@ async function testResumingReplication(factory: TestStorageFactory, stopAfter: n // Bypass the usual "clear db on factory open" step. await using context2 = await WalStreamTestContext.open(factory, { doNotClear: true, + storageVersion, walStreamOptions: { snapshotChunkLength: 1000 } }); diff --git a/modules/module-postgres/test/src/route_api_adapter.test.ts b/modules/module-postgres/test/src/route_api_adapter.test.ts index 98f16930c..904740ab8 100644 --- a/modules/module-postgres/test/src/route_api_adapter.test.ts +++ b/modules/module-postgres/test/src/route_api_adapter.test.ts @@ -20,7 +20,9 @@ describe('PostgresRouteAPIAdapter tests', () => { `); const schema = await api.getConnectionSchema(); - expect(schema).toStrictEqual([ + // Ignore any other potential schemas in the test database, for example the 'powersync' schema. + const filtered = schema.filter((s) => s.name == 'public'); + expect(filtered).toStrictEqual([ { name: 'public', tables: [ diff --git a/modules/module-postgres/test/src/schema_changes.test.ts b/modules/module-postgres/test/src/schema_changes.test.ts index 4372fb258..5235df929 100644 --- a/modules/module-postgres/test/src/schema_changes.test.ts +++ b/modules/module-postgres/test/src/schema_changes.test.ts @@ -2,8 +2,7 @@ import { compareIds, putOp, reduceBucket, removeOp, test_utils } from '@powersyn import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; -import { storage } from '@powersync/service-core'; -import { describeWithStorage } from './util.js'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; describe('schema changes', { timeout: 20_000 }, function () { @@ -24,9 +23,12 @@ const PUT_T3 = test_utils.putOp('test_data', { id: 't3', description: 'test3' }) const REMOVE_T1 = test_utils.removeOp('test_data', 't1'); const REMOVE_T2 = test_utils.removeOp('test_data', 't2'); -function defineTests(factory: storage.TestStorageFactory) { +function defineTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return WalStreamTestContext.open(factory, { ...options, storageVersion }); + }; test('re-create table', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Drop a table and re-create it. await context.updateSyncRules(BASIC_SYNC_RULES); @@ -70,7 +72,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('add table', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Add table after initial replication await context.updateSyncRules(BASIC_SYNC_RULES); const { pool } = context; @@ -98,7 +100,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('rename table (1)', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -136,7 +138,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('rename table (2)', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Rename table in sync rules -> in sync rules const { pool } = context; @@ -189,7 +191,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('rename table (3)', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Rename table in sync rules -> not in sync rules const { pool } = context; @@ -224,7 +226,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('change replica id', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Change replica id from default to full // Causes a re-import of the table. @@ -268,7 +270,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('change full replica id by adding column', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Change replica id from full by adding column // Causes a re-import of the table. // Other changes such as renaming column would have the same effect @@ -311,7 +313,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('change default replica id by changing column type', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Change default replica id by changing column type // Causes a re-import of the table. const { pool } = context; @@ -348,7 +350,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('change index id by changing column type', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Change index replica id by changing column type // Causes a re-import of the table. // Secondary functionality tested here is that replica id column order stays @@ -404,7 +406,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('add to publication', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Add table to publication after initial replication const { pool } = context; @@ -447,7 +449,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('add to publication (not in sync rules)', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Add table to publication after initial replication // Since the table is not in sync rules, it should not be replicated. const { pool } = context; @@ -474,7 +476,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('replica identity nothing', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Technically not a schema change, but fits here. // Replica ID works a little differently here - the table doesn't have // one defined, but we generate a unique one for each replicated row. @@ -521,7 +523,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('replica identity default without PK', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); // Same as no replica identity const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -573,7 +575,7 @@ function defineTests(factory: storage.TestStorageFactory) { // await new Promise((resolve) => setTimeout(resolve, 100)); // await this.snapshotTable(batch, db, result.table); test('table snapshot consistency', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -640,7 +642,7 @@ function defineTests(factory: storage.TestStorageFactory) { }); test('custom types', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(` streams: diff --git a/modules/module-postgres/test/src/slow_tests.test.ts b/modules/module-postgres/test/src/slow_tests.test.ts index 5747f6ed5..112a49a39 100644 --- a/modules/module-postgres/test/src/slow_tests.test.ts +++ b/modules/module-postgres/test/src/slow_tests.test.ts @@ -7,6 +7,7 @@ import { connectPgPool, describeWithStorage, getClientCheckpoint, + StorageVersionTestContext, TEST_CONNECTION_OPTIONS } from './util.js'; @@ -14,7 +15,7 @@ import * as pgwire from '@powersync/service-jpgwire'; import { SqliteRow } from '@powersync/service-sync-rules'; import { PgManager } from '@module/replication/PgManager.js'; -import { createCoreReplicationMetrics, initializeCoreReplicationMetrics, storage } from '@powersync/service-core'; +import { createCoreReplicationMetrics, initializeCoreReplicationMetrics } from '@powersync/service-core'; import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests'; import * as mongo_storage from '@powersync/service-module-mongodb-storage'; import * as postgres_storage from '@powersync/service-module-postgres-storage'; @@ -22,12 +23,12 @@ import * as timers from 'node:timers/promises'; import { CustomTypeRegistry } from '@module/types/registry.js'; describe.skipIf(!(env.CI || env.SLOW_TESTS))('slow tests', function () { - describeWithStorage({ timeout: 120_000 }, function (factory) { - defineSlowTests(factory); + describeWithStorage({ timeout: 120_000 }, function ({ factory, storageVersion }) { + defineSlowTests({ factory, storageVersion }); }); }); -function defineSlowTests(factory: storage.TestStorageFactory) { +function defineSlowTests({ factory, storageVersion }: StorageVersionTestContext) { let walStream: WalStream | undefined; let connections: PgManager | undefined; let abortController: AbortController | undefined; @@ -81,7 +82,7 @@ bucket_definitions: data: - SELECT * FROM "test_data" `; - const syncRules = await f.updateSyncRules({ content: syncRuleContent }); + const syncRules = await f.updateSyncRules({ content: syncRuleContent, storageVersion }); const storage = f.getInstance(syncRules); abortController = new AbortController(); const options: WalStreamOptions = { @@ -306,7 +307,7 @@ bucket_definitions: data: - SELECT id, description FROM "test_data" `; - const syncRules = await f.updateSyncRules({ content: syncRuleContent }); + const syncRules = await f.updateSyncRules({ content: syncRuleContent, storageVersion }); const storage = f.getInstance(syncRules); // 1. Setup some base data that will be replicated in initial replication diff --git a/modules/module-postgres/test/src/util.ts b/modules/module-postgres/test/src/util.ts index f0516f6b8..fde09f63a 100644 --- a/modules/module-postgres/test/src/util.ts +++ b/modules/module-postgres/test/src/util.ts @@ -2,12 +2,18 @@ import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js' import * as types from '@module/types/types.js'; import * as lib_postgres from '@powersync/lib-service-postgres'; import { logger } from '@powersync/lib-services-framework'; -import { BucketStorageFactory, InternalOpId, TestStorageFactory } from '@powersync/service-core'; +import { + BucketStorageFactory, + CURRENT_STORAGE_VERSION, + InternalOpId, + LEGACY_STORAGE_VERSION, + TestStorageFactory +} from '@powersync/service-core'; import * as pgwire from '@powersync/service-jpgwire'; import * as mongo_storage from '@powersync/service-module-mongodb-storage'; import * as postgres_storage from '@powersync/service-module-postgres-storage'; -import { env } from './env.js'; import { describe, TestOptions } from 'vitest'; +import { env } from './env.js'; export const TEST_URI = env.PG_TEST_URL; @@ -20,14 +26,34 @@ export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.test_utils. url: env.PG_STORAGE_TEST_URL }); -export function describeWithStorage(options: TestOptions, fn: (factory: TestStorageFactory) => void) { - describe.skipIf(!env.TEST_MONGO_STORAGE)(`mongodb storage`, options, function () { - fn(INITIALIZED_MONGO_STORAGE_FACTORY); - }); +const TEST_STORAGE_VERSIONS = [LEGACY_STORAGE_VERSION, CURRENT_STORAGE_VERSION]; + +export interface StorageVersionTestContext { + factory: TestStorageFactory; + storageVersion: number; +} - describe.skipIf(!env.TEST_POSTGRES_STORAGE)(`postgres storage`, options, function () { - fn(INITIALIZED_POSTGRES_STORAGE_FACTORY); - }); +export function describeWithStorage(options: TestOptions, fn: (context: StorageVersionTestContext) => void) { + const describeFactory = (storageName: string, factory: TestStorageFactory) => { + describe(`${storageName} storage`, options, function () { + for (const storageVersion of TEST_STORAGE_VERSIONS) { + describe(`storage v${storageVersion}`, function () { + fn({ + factory, + storageVersion + }); + }); + } + }); + }; + + if (env.TEST_MONGO_STORAGE) { + describeFactory('mongodb', INITIALIZED_MONGO_STORAGE_FACTORY); + } + + if (env.TEST_POSTGRES_STORAGE) { + describeFactory('postgres', INITIALIZED_POSTGRES_STORAGE_FACTORY); + } } export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ diff --git a/modules/module-postgres/test/src/wal_stream.test.ts b/modules/module-postgres/test/src/wal_stream.test.ts index bd7440809..44a915bf3 100644 --- a/modules/module-postgres/test/src/wal_stream.test.ts +++ b/modules/module-postgres/test/src/wal_stream.test.ts @@ -1,11 +1,10 @@ import { MissingReplicationSlotError } from '@module/replication/WalStream.js'; -import { storage } from '@powersync/service-core'; import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests'; import { pgwireRows } from '@powersync/service-jpgwire'; import { ReplicationMetric } from '@powersync/service-types'; import * as crypto from 'crypto'; -import { afterAll, beforeAll, describe, expect, test } from 'vitest'; -import { describeWithStorage } from './util.js'; +import { describe, expect, test } from 'vitest'; +import { describeWithStorage, StorageVersionTestContext } from './util.js'; import { WalStreamTestContext, withMaxWalSize } from './wal_stream_utils.js'; import { JSONBig } from '@powersync/service-jsonbig'; @@ -20,9 +19,12 @@ describe('wal stream', () => { describeWithStorage({ timeout: 20_000 }, defineWalStreamTests); }); -function defineWalStreamTests(factory: storage.TestStorageFactory) { +function defineWalStreamTests({ factory, storageVersion }: StorageVersionTestContext) { + const openContext = (options?: Parameters[1]) => { + return WalStreamTestContext.open(factory, { ...options, storageVersion }); + }; test('replicating basic values', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(` bucket_definitions: @@ -57,7 +59,7 @@ bucket_definitions: }); test('replicating case sensitive table', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(` bucket_definitions: @@ -88,7 +90,7 @@ bucket_definitions: }); test('replicating TOAST values', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(` bucket_definitions: @@ -126,7 +128,7 @@ bucket_definitions: }); test('replicating TRUNCATE', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; const syncRuleContent = ` bucket_definitions: @@ -157,7 +159,7 @@ bucket_definitions: }); test('replicating changing primary key', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); await pool.query(`DROP TABLE IF EXISTS test_data`); @@ -198,7 +200,7 @@ bucket_definitions: }); test('initial sync', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -217,7 +219,7 @@ bucket_definitions: }); test('record too large', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(`bucket_definitions: global: data: @@ -254,7 +256,7 @@ bucket_definitions: }); test('table not in sync rules', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); @@ -280,7 +282,7 @@ bucket_definitions: test('reporting slot issues', async () => { { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(` bucket_definitions: @@ -310,7 +312,7 @@ bucket_definitions: } { - await using context = await WalStreamTestContext.open(factory, { doNotClear: true }); + await using context = await openContext({ doNotClear: true }); const { pool } = context; await pool.query('DROP PUBLICATION powersync'); await pool.query(`UPDATE test_data SET description = 'updated'`); @@ -345,7 +347,7 @@ bucket_definitions: test('dropped replication slot', async () => { { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(` bucket_definitions: @@ -375,7 +377,7 @@ bucket_definitions: } { - await using context = await WalStreamTestContext.open(factory, { doNotClear: true }); + await using context = await openContext({ doNotClear: true }); const { pool } = context; const storage = await context.factory.getActiveStorage(); @@ -396,7 +398,7 @@ bucket_definitions: }); test('replication slot lost', async () => { - await using baseContext = await WalStreamTestContext.open(factory, { doNotClear: true }); + await using baseContext = await openContext({ doNotClear: true }); const serverVersion = await baseContext.connectionManager.getServerVersion(); if (serverVersion!.compareMain('13.0.0') < 0) { @@ -408,7 +410,7 @@ bucket_definitions: await using s = await withMaxWalSize(baseContext.pool, '100MB'); { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(` bucket_definitions: @@ -438,7 +440,7 @@ bucket_definitions: } { - await using context = await WalStreamTestContext.open(factory, { doNotClear: true }); + await using context = await openContext({ doNotClear: true }); const { pool } = context; const storage = await context.factory.getActiveStorage(); const slotName = storage?.slot_name!; @@ -479,7 +481,7 @@ bucket_definitions: }); test('old date format', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(BASIC_SYNC_RULES); const { pool } = context; @@ -494,7 +496,7 @@ bucket_definitions: }); test('new date format', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(` streams: stream: @@ -515,7 +517,7 @@ config: }); test('custom types', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(` streams: @@ -550,7 +552,7 @@ config: }); test('custom types in primary key', async () => { - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); await context.updateSyncRules(` streams: @@ -576,7 +578,7 @@ config: test('replica identity handling', async () => { // This specifically test a case of timestamps being used as part of the replica identity. // There was a regression in versions 1.15.0-1.15.5, which this tests for. - await using context = await WalStreamTestContext.open(factory); + await using context = await openContext(); const { pool } = context; await context.updateSyncRules(BASIC_SYNC_RULES); diff --git a/modules/module-postgres/test/src/wal_stream_utils.ts b/modules/module-postgres/test/src/wal_stream_utils.ts index e58230c58..d5a0ac2dc 100644 --- a/modules/module-postgres/test/src/wal_stream_utils.ts +++ b/modules/module-postgres/test/src/wal_stream_utils.ts @@ -1,23 +1,26 @@ import { PgManager } from '@module/replication/PgManager.js'; import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js'; +import { CustomTypeRegistry } from '@module/types/registry.js'; import { BucketStorageFactory, createCoreReplicationMetrics, initializeCoreReplicationMetrics, InternalOpId, + LEGACY_STORAGE_VERSION, OplogEntry, + STORAGE_VERSION_CONFIG, storage, SyncRulesBucketStorage } from '@powersync/service-core'; import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests'; import * as pgwire from '@powersync/service-jpgwire'; import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; -import { CustomTypeRegistry } from '@module/types/registry.js'; export class WalStreamTestContext implements AsyncDisposable { private _walStream?: WalStream; private abortController = new AbortController(); private streamPromise?: Promise; + private syncRulesId?: number; public storage?: SyncRulesBucketStorage; private replicationConnection?: pgwire.PgConnection; private snapshotPromise?: Promise; @@ -30,7 +33,7 @@ export class WalStreamTestContext implements AsyncDisposable { */ static async open( factory: (options: storage.TestStorageOptions) => Promise, - options?: { doNotClear?: boolean; walStreamOptions?: Partial } + options?: { doNotClear?: boolean; storageVersion?: number; walStreamOptions?: Partial } ) { const f = await factory({ doNotClear: options?.doNotClear }); const connectionManager = new PgManager(TEST_CONNECTION_OPTIONS, {}); @@ -39,13 +42,18 @@ export class WalStreamTestContext implements AsyncDisposable { await clearTestDb(connectionManager.pool); } - return new WalStreamTestContext(f, connectionManager, options?.walStreamOptions); + const storageVersion = options?.storageVersion ?? LEGACY_STORAGE_VERSION; + const versionedBuckets = STORAGE_VERSION_CONFIG[storageVersion]?.versionedBuckets ?? false; + + return new WalStreamTestContext(f, connectionManager, options?.walStreamOptions, storageVersion, versionedBuckets); } constructor( public factory: BucketStorageFactory, public connectionManager: PgManager, - private walStreamOptions?: Partial + private walStreamOptions?: Partial, + private storageVersion: number = LEGACY_STORAGE_VERSION, + private versionedBuckets: boolean = STORAGE_VERSION_CONFIG[storageVersion]?.versionedBuckets ?? false ) { createCoreReplicationMetrics(METRICS_HELPER.metricsEngine); initializeCoreReplicationMetrics(METRICS_HELPER.metricsEngine); @@ -95,7 +103,12 @@ export class WalStreamTestContext implements AsyncDisposable { } async updateSyncRules(content: string) { - const syncRules = await this.factory.updateSyncRules({ content: content, validate: true }); + const syncRules = await this.factory.updateSyncRules({ + content: content, + validate: true, + storageVersion: this.storageVersion + }); + this.syncRulesId = syncRules.id; this.storage = this.factory.getInstance(syncRules); return this.storage!; } @@ -106,6 +119,7 @@ export class WalStreamTestContext implements AsyncDisposable { throw new Error(`Next sync rules not available`); } + this.syncRulesId = syncRules.id; this.storage = this.factory.getInstance(syncRules); return this.storage!; } @@ -116,6 +130,7 @@ export class WalStreamTestContext implements AsyncDisposable { throw new Error(`Active sync rules not available`); } + this.syncRulesId = syncRules.id; this.storage = this.factory.getInstance(syncRules); return this.storage!; } @@ -177,9 +192,21 @@ export class WalStreamTestContext implements AsyncDisposable { return checkpoint; } + private resolveBucketName(bucket: string) { + if (!this.versionedBuckets || /^\d+#/.test(bucket)) { + return bucket; + } + if (this.syncRulesId == null) { + throw new Error('Sync rules not configured - call updateSyncRules() first'); + } + return `${this.syncRulesId}#${bucket}`; + } + async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { let checkpoint = await this.getCheckpoint(options); - const map = new Map(Object.entries(buckets)); + const map = new Map( + Object.entries(buckets).map(([bucket, opId]) => [this.resolveBucketName(bucket), opId]) + ); return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); } @@ -191,8 +218,9 @@ export class WalStreamTestContext implements AsyncDisposable { if (typeof start == 'string') { start = BigInt(start); } + const resolvedBucket = this.resolveBucketName(bucket); const checkpoint = await this.getCheckpoint(options); - const map = new Map([[bucket, start]]); + const map = new Map([[resolvedBucket, start]]); let data: OplogEntry[] = []; while (true) { const batch = this.storage!.getBucketDataBatch(checkpoint, map); @@ -202,11 +230,29 @@ export class WalStreamTestContext implements AsyncDisposable { if (batches.length == 0 || !batches[0]!.chunkData.has_more) { break; } - map.set(bucket, BigInt(batches[0]!.chunkData.next_after)); + map.set(resolvedBucket, BigInt(batches[0]!.chunkData.next_after)); } return data; } + async getChecksums(buckets: string[], options?: { timeout?: number }) { + const checkpoint = await this.getCheckpoint(options); + const versionedBuckets = buckets.map((bucket) => this.resolveBucketName(bucket)); + const checksums = await this.storage!.getChecksums(checkpoint, versionedBuckets); + + const unversioned = new Map(); + for (let i = 0; i < buckets.length; i++) { + unversioned.set(buckets[i], checksums.get(versionedBuckets[i])!); + } + + return unversioned; + } + + async getChecksum(bucket: string, options?: { timeout?: number }) { + const checksums = await this.getChecksums([bucket], options); + return checksums.get(bucket); + } + /** * This does not wait for a client checkpoint. */ @@ -215,8 +261,9 @@ export class WalStreamTestContext implements AsyncDisposable { if (typeof start == 'string') { start = BigInt(start); } + const resolvedBucket = this.resolveBucketName(bucket); const { checkpoint } = await this.storage!.getCheckpoint(); - const map = new Map([[bucket, start]]); + const map = new Map([[resolvedBucket, start]]); const batch = this.storage!.getBucketDataBatch(checkpoint, map); const batches = await test_utils.fromAsync(batch); return batches[0]?.chunkData.data ?? []; diff --git a/packages/service-core-tests/src/test-utils/general-utils.ts b/packages/service-core-tests/src/test-utils/general-utils.ts index 0c6128909..1deab5e85 100644 --- a/packages/service-core-tests/src/test-utils/general-utils.ts +++ b/packages/service-core-tests/src/test-utils/general-utils.ts @@ -1,6 +1,5 @@ import { storage, utils } from '@powersync/service-core'; -import { GetQuerierOptions, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules'; -import { versionedHydrationState } from '@powersync/service-sync-rules/src/HydrationState.js'; +import { GetQuerierOptions, RequestParameters } from '@powersync/service-sync-rules'; import * as bson from 'bson'; export const ZERO_LSN = '0/0'; @@ -15,29 +14,6 @@ export const BATCH_OPTIONS: storage.StartBatchOptions = { storeCurrentData: true }; -export function testRules(content: string): storage.PersistedSyncRulesContent { - return { - id: 1, - sync_rules_content: content, - slot_name: 'test', - active: true, - last_checkpoint_lsn: '', - parsed(options) { - return { - id: 1, - sync_rules: SqlSyncRules.fromYaml(content, options), - slot_name: 'test', - hydratedSyncRules() { - return this.sync_rules.config.hydrate({ hydrationState: versionedHydrationState(1) }); - } - }; - }, - lock() { - throw new Error('Not implemented'); - } - }; -} - export function makeTestTable(name: string, replicaIdColumns?: string[] | undefined) { const relId = utils.hashData('table', name, (replicaIdColumns ?? ['id']).join(',')); const id = new bson.ObjectId('6544e3899293153fa7b38331'); diff --git a/packages/service-core-tests/src/tests/register-compacting-tests.ts b/packages/service-core-tests/src/tests/register-compacting-tests.ts index 4c3683712..bac08de74 100644 --- a/packages/service-core-tests/src/tests/register-compacting-tests.ts +++ b/packages/service-core-tests/src/tests/register-compacting-tests.ts @@ -1,6 +1,7 @@ import { storage } from '@powersync/service-core'; import { expect, test } from 'vitest'; import * as test_utils from '../test-utils/test-utils-index.js'; +import { bucketRequest, bucketRequestMap, bucketRequests } from './util.js'; const TEST_TABLE = test_utils.makeTestTable('test', ['id']); @@ -50,10 +51,10 @@ bucket_definitions: const checkpoint = result!.flushed_op; const batchBefore = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]])) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) ); const dataBefore = batchBefore.chunkData.data; - const checksumBefore = await bucketStorage.getChecksums(checkpoint, ['global[]']); + const checksumBefore = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]'])); expect(dataBefore).toMatchObject([ { @@ -86,12 +87,12 @@ bucket_definitions: }); const batchAfter = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]])) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) ); const dataAfter = batchAfter.chunkData.data; - const checksumAfter = await bucketStorage.getChecksums(checkpoint, ['global[]']); + const checksumAfter = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]'])); bucketStorage.clearChecksumCache(); - const checksumAfter2 = await bucketStorage.getChecksums(checkpoint, ['global[]']); + const checksumAfter2 = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]'])); expect(batchAfter.targetOp).toEqual(3n); expect(dataAfter).toMatchObject([ @@ -114,8 +115,12 @@ bucket_definitions: } ]); - expect(checksumAfter.get('global[]')).toEqual(checksumBefore.get('global[]')); - expect(checksumAfter2.get('global[]')).toEqual(checksumBefore.get('global[]')); + expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual( + checksumBefore.get(bucketRequest(syncRules, 'global[]')) + ); + expect(checksumAfter2.get(bucketRequest(syncRules, 'global[]'))).toEqual( + checksumBefore.get(bucketRequest(syncRules, 'global[]')) + ); test_utils.validateCompactedBucket(dataBefore, dataAfter); }); @@ -174,10 +179,10 @@ bucket_definitions: const checkpoint = result!.flushed_op; const batchBefore = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]])) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) ); const dataBefore = batchBefore.chunkData.data; - const checksumBefore = await bucketStorage.getChecksums(checkpoint, ['global[]']); + const checksumBefore = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]'])); expect(dataBefore).toMatchObject([ { @@ -215,11 +220,11 @@ bucket_definitions: }); const batchAfter = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]])) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) ); const dataAfter = batchAfter.chunkData.data; bucketStorage.clearChecksumCache(); - const checksumAfter = await bucketStorage.getChecksums(checkpoint, ['global[]']); + const checksumAfter = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]'])); expect(batchAfter.targetOp).toEqual(4n); expect(dataAfter).toMatchObject([ @@ -235,8 +240,8 @@ bucket_definitions: op_id: '4' } ]); - expect(checksumAfter.get('global[]')).toEqual({ - ...checksumBefore.get('global[]'), + expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual({ + ...checksumBefore.get(bucketRequest(syncRules, 'global[]')), count: 2 }); @@ -286,7 +291,7 @@ bucket_definitions: }); const checkpoint1 = result!.flushed_op; - const checksumBefore = await bucketStorage.getChecksums(checkpoint1, ['global[]']); + const checksumBefore = await bucketStorage.getChecksums(checkpoint1, bucketRequests(syncRules, ['global[]'])); const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ @@ -310,11 +315,11 @@ bucket_definitions: }); const batchAfter = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', 0n]])) + bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', 0n]])) ); const dataAfter = batchAfter.chunkData.data; await bucketStorage.clearChecksumCache(); - const checksumAfter = await bucketStorage.getChecksums(checkpoint2, ['global[]']); + const checksumAfter = await bucketStorage.getChecksums(checkpoint2, bucketRequests(syncRules, ['global[]'])); expect(batchAfter.targetOp).toEqual(4n); expect(dataAfter).toMatchObject([ @@ -324,8 +329,8 @@ bucket_definitions: op_id: '4' } ]); - expect(checksumAfter.get('global[]')).toEqual({ - bucket: 'global[]', + expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual({ + bucket: bucketRequest(syncRules, 'global[]'), count: 1, checksum: 1874612650 }); @@ -426,7 +431,7 @@ bucket_definitions: const batchAfter = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([ + bucketRequestMap(syncRules, [ ['grouped["b1"]', 0n], ['grouped["b2"]', 0n] ]) @@ -524,9 +529,9 @@ bucket_definitions: }); const checkpoint2 = result2!.flushed_op; await bucketStorage.clearChecksumCache(); - const checksumAfter = await bucketStorage.getChecksums(checkpoint2, ['global[]']); - expect(checksumAfter.get('global[]')).toEqual({ - bucket: 'global[]', + const checksumAfter = await bucketStorage.getChecksums(checkpoint2, bucketRequests(syncRules, ['global[]'])); + expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual({ + bucket: bucketRequest(syncRules, 'global[]'), count: 4, checksum: 1874612650 }); @@ -566,7 +571,7 @@ bucket_definitions: }); // Get checksums here just to populate the cache - await bucketStorage.getChecksums(result!.flushed_op, ['global[]']); + await bucketStorage.getChecksums(result!.flushed_op, bucketRequests(syncRules, ['global[]'])); const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, @@ -589,9 +594,9 @@ bucket_definitions: const checkpoint2 = result2!.flushed_op; // Check that the checksum was correctly updated with the clear operation after having a cached checksum - const checksumAfter = await bucketStorage.getChecksums(checkpoint2, ['global[]']); - expect(checksumAfter.get('global[]')).toMatchObject({ - bucket: 'global[]', + const checksumAfter = await bucketStorage.getChecksums(checkpoint2, bucketRequests(syncRules, ['global[]'])); + expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toMatchObject({ + bucket: bucketRequest(syncRules, 'global[]'), count: 1, checksum: -1481659821 }); diff --git a/packages/service-core-tests/src/tests/register-data-storage-data-tests.ts b/packages/service-core-tests/src/tests/register-data-storage-data-tests.ts index f9729f21d..26ded434d 100644 --- a/packages/service-core-tests/src/tests/register-data-storage-data-tests.ts +++ b/packages/service-core-tests/src/tests/register-data-storage-data-tests.ts @@ -1,7 +1,7 @@ import { BucketDataBatchOptions, getUuidReplicaIdentityBson, OplogEntry, storage } from '@powersync/service-core'; import { describe, expect, test } from 'vitest'; import * as test_utils from '../test-utils/test-utils-index.js'; -import { TEST_TABLE } from './util.js'; +import { bucketRequest, bucketRequestMap, bucketRequests, TEST_TABLE } from './util.js'; /** * Normalize data from OplogEntries for comparison in tests. @@ -59,7 +59,9 @@ bucket_definitions: const { checkpoint } = await bucketStorage.getCheckpoint(); - const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) + ); const data = batch[0].chunkData.data.map((d) => { return { op: d.op, @@ -76,10 +78,12 @@ bucket_definitions: { op: 'REMOVE', object_id: 'test1', checksum: c2 } ]); - const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; + const checksums = [ + ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values() + ]; expect(checksums).toEqual([ { - bucket: 'global[]', + bucket: bucketRequest(syncRules, 'global[]'), checksum: (c1 + c2) & 0xffffffff, count: 2 } @@ -135,7 +139,9 @@ bucket_definitions: await batch.commit('1/1'); }); const { checkpoint } = await bucketStorage.getCheckpoint(); - const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) + ); const data = batch[0].chunkData.data.map((d) => { return { op: d.op, @@ -201,7 +207,9 @@ bucket_definitions: const { checkpoint } = await bucketStorage.getCheckpoint(); - const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) + ); const data = batch[0].chunkData.data.map((d) => { return { op: d.op, @@ -218,10 +226,12 @@ bucket_definitions: { op: 'REMOVE', object_id: 'test1', checksum: c2 } ]); - const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; + const checksums = [ + ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values() + ]; expect(checksums).toEqual([ { - bucket: 'global[]', + bucket: bucketRequest(syncRules, 'global[]'), checksum: (c1 + c2) & 0xffffffff, count: 2 } @@ -320,7 +330,9 @@ bucket_definitions: const { checkpoint } = await bucketStorage.getCheckpoint(); - const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])) + ); const data = batch[0].chunkData.data.map((d) => { return { @@ -340,10 +352,12 @@ bucket_definitions: { op: 'REMOVE', object_id: 'test1', checksum: c2 } ]); - const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; + const checksums = [ + ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values() + ]; expect(checksums).toEqual([ { - bucket: 'global[]', + bucket: bucketRequest(syncRules, 'global[]'), checksum: (c1 + c1 + c1 + c2) & 0xffffffff, count: 4 } @@ -480,7 +494,7 @@ bucket_definitions: const checkpoint2 = result2!.flushed_op; const batch = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]])) + bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', checkpoint1]])) ); const data = batch[0].chunkData.data.map((d) => { @@ -580,7 +594,7 @@ bucket_definitions: const checkpoint3 = result3!.flushed_op; const batch = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])) + bucketStorage.getBucketDataBatch(checkpoint3, bucketRequestMap(syncRules, [['global[]', checkpoint1]])) ); const data = batch[0].chunkData.data.map((d) => { return { @@ -688,7 +702,7 @@ bucket_definitions: const checkpoint3 = result3!.flushed_op; const batch = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])) + bucketStorage.getBucketDataBatch(checkpoint3, bucketRequestMap(syncRules, [['global[]', checkpoint1]])) ); const data = batch[0].chunkData.data.map((d) => { return { @@ -790,7 +804,7 @@ bucket_definitions: }; const batch1 = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]), options) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]), options) ); expect(test_utils.getBatchData(batch1)).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }, @@ -805,7 +819,7 @@ bucket_definitions: const batch2 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch1[0].chunkData.next_after)]]), + bucketRequestMap(syncRules, [['global[]', BigInt(batch1[0].chunkData.next_after)]]), options ) ); @@ -822,7 +836,7 @@ bucket_definitions: const batch3 = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([['global[]', BigInt(batch2[0].chunkData.next_after)]]), + bucketRequestMap(syncRules, [['global[]', BigInt(batch2[0].chunkData.next_after)]]), options ) ); @@ -864,7 +878,7 @@ bucket_definitions: const { checkpoint } = await bucketStorage.getCheckpoint(); const batch1 = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]), { limit: 4 }) + bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]), { limit: 4 }) ); expect(test_utils.getBatchData(batch1)).toEqual([ @@ -881,9 +895,13 @@ bucket_definitions: }); const batch2 = await test_utils.oneFromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch1.chunkData.next_after)]]), { - limit: 4 - }) + bucketStorage.getBucketDataBatch( + checkpoint, + bucketRequestMap(syncRules, [['global[]', BigInt(batch1.chunkData.next_after)]]), + { + limit: 4 + } + ) ); expect(test_utils.getBatchData(batch2)).toEqual([ { op_id: '5', op: 'PUT', object_id: 'test5', checksum: 3686902721 }, @@ -897,9 +915,13 @@ bucket_definitions: }); const batch3 = await test_utils.fromAsync( - bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch2.chunkData.next_after)]]), { - limit: 4 - }) + bucketStorage.getBucketDataBatch( + checkpoint, + bucketRequestMap(syncRules, [['global[]', BigInt(batch2.chunkData.next_after)]]), + { + limit: 4 + } + ) ); expect(test_utils.getBatchData(batch3)).toEqual([]); @@ -942,24 +964,26 @@ bucket_definitions: }); const { checkpoint } = await bucketStorage.getCheckpoint(); - return await test_utils.fromAsync( + const batch = await test_utils.fromAsync( bucketStorage.getBucketDataBatch( checkpoint, - new Map([ + bucketRequestMap(syncRules, [ ['global1[]', 0n], ['global2[]', 0n] ]), options ) ); + + return { syncRules, batch }; }; test('batch has_more (1)', async () => { - const batch = await setup({ limit: 5 }); + const { batch, syncRules } = await setup({ limit: 5 }); expect(batch.length).toEqual(2); - expect(batch[0].chunkData.bucket).toEqual('global1[]'); - expect(batch[1].chunkData.bucket).toEqual('global2[]'); + expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]')); + expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]')); expect(test_utils.getBatchData(batch[0])).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 } @@ -986,11 +1010,11 @@ bucket_definitions: }); test('batch has_more (2)', async () => { - const batch = await setup({ limit: 11 }); + const { batch, syncRules } = await setup({ limit: 11 }); expect(batch.length).toEqual(2); - expect(batch[0].chunkData.bucket).toEqual('global1[]'); - expect(batch[1].chunkData.bucket).toEqual('global2[]'); + expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]')); + expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]')); expect(test_utils.getBatchData(batch[0])).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 } @@ -1023,12 +1047,12 @@ bucket_definitions: test('batch has_more (3)', async () => { // 50 bytes is more than 1 row, less than 2 rows - const batch = await setup({ limit: 3, chunkLimitBytes: 50 }); + const { batch, syncRules } = await setup({ limit: 3, chunkLimitBytes: 50 }); expect(batch.length).toEqual(3); - expect(batch[0].chunkData.bucket).toEqual('global1[]'); - expect(batch[1].chunkData.bucket).toEqual('global2[]'); - expect(batch[2].chunkData.bucket).toEqual('global2[]'); + expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]')); + expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]')); + expect(batch[2].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]')); expect(test_utils.getBatchData(batch[0])).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 } @@ -1076,8 +1100,8 @@ bucket_definitions: await batch.keepalive('1/0'); }); - const metrics2 = await f.getStorageMetrics(); - expect(metrics2).toMatchSnapshot(); + await f.getStorageMetrics(); + // We don't care about the specific values here }); test('op_id initialization edge case', async () => { @@ -1157,10 +1181,14 @@ bucket_definitions: }); const { checkpoint } = await bucketStorage.getCheckpoint(); - const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; - expect(checksums).toEqual([{ bucket: 'global[]', checksum: 1917136889, count: 1 }]); - const checksums2 = [...(await bucketStorage.getChecksums(checkpoint + 1n, ['global[]'])).values()]; - expect(checksums2).toEqual([{ bucket: 'global[]', checksum: 1917136889, count: 1 }]); + const checksums = [ + ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values() + ]; + expect(checksums).toEqual([{ bucket: bucketRequest(syncRules, 'global[]'), checksum: 1917136889, count: 1 }]); + const checksums2 = [ + ...(await bucketStorage.getChecksums(checkpoint + 1n, bucketRequests(syncRules, ['global[]']))).values() + ]; + expect(checksums2).toEqual([{ bucket: bucketRequest(syncRules, 'global[]'), checksum: 1917136889, count: 1 }]); }); testChecksumBatching(generateStorageFactory); @@ -1207,14 +1235,14 @@ bucket_definitions: const { checkpoint } = await bucketStorage.getCheckpoint(); bucketStorage.clearChecksumCache(); - const buckets = ['user["u1"]', 'user["u2"]', 'user["u3"]', 'user["u4"]']; + const buckets = bucketRequests(syncRules, ['user["u1"]', 'user["u2"]', 'user["u3"]', 'user["u4"]']); const checksums = [...(await bucketStorage.getChecksums(checkpoint, buckets)).values()]; checksums.sort((a, b) => a.bucket.localeCompare(b.bucket)); expect(checksums).toEqual([ - { bucket: 'user["u1"]', count: 4, checksum: 346204588 }, - { bucket: 'user["u2"]', count: 4, checksum: 5261081 }, - { bucket: 'user["u3"]', count: 4, checksum: 134760718 }, - { bucket: 'user["u4"]', count: 4, checksum: -302639724 } + { bucket: bucketRequest(syncRules, 'user["u1"]'), count: 4, checksum: 346204588 }, + { bucket: bucketRequest(syncRules, 'user["u2"]'), count: 4, checksum: 5261081 }, + { bucket: bucketRequest(syncRules, 'user["u3"]'), count: 4, checksum: 134760718 }, + { bucket: bucketRequest(syncRules, 'user["u4"]'), count: 4, checksum: -302639724 } ]); }); } diff --git a/packages/service-core-tests/src/tests/register-data-storage-parameter-tests.ts b/packages/service-core-tests/src/tests/register-data-storage-parameter-tests.ts index b031457d6..5618110a6 100644 --- a/packages/service-core-tests/src/tests/register-data-storage-parameter-tests.ts +++ b/packages/service-core-tests/src/tests/register-data-storage-parameter-tests.ts @@ -2,8 +2,8 @@ import { JwtPayload, storage } from '@powersync/service-core'; import { RequestParameters, ScopedParameterLookup, SqliteJsonRow } from '@powersync/service-sync-rules'; import { expect, test } from 'vitest'; import * as test_utils from '../test-utils/test-utils-index.js'; -import { TEST_TABLE } from './util.js'; -import { ParameterLookupScope } from '@powersync/service-sync-rules/src/HydrationState.js'; +import { bucketRequest, TEST_TABLE } from './util.js'; +import { ParameterLookupScope } from '@powersync/service-sync-rules'; /** * @example @@ -345,7 +345,12 @@ bucket_definitions: } }); expect(buckets).toEqual([ - { bucket: 'by_workspace["workspace1"]', priority: 3, definition: 'by_workspace', inclusion_reasons: ['default'] } + { + bucket: bucketRequest(syncRules, 'by_workspace["workspace1"]'), + priority: 3, + definition: 'by_workspace', + inclusion_reasons: ['default'] + } ]); }); @@ -421,13 +426,13 @@ bucket_definitions: buckets.sort((a, b) => a.bucket.localeCompare(b.bucket)); expect(buckets).toEqual([ { - bucket: 'by_public_workspace["workspace1"]', + bucket: bucketRequest(syncRules, 'by_public_workspace["workspace1"]'), priority: 3, definition: 'by_public_workspace', inclusion_reasons: ['default'] }, { - bucket: 'by_public_workspace["workspace3"]', + bucket: bucketRequest(syncRules, 'by_public_workspace["workspace3"]'), priority: 3, definition: 'by_public_workspace', inclusion_reasons: ['default'] @@ -528,7 +533,10 @@ bucket_definitions: expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]); buckets.sort(); - expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']); + expect(buckets).toEqual([ + bucketRequest(syncRules, 'by_workspace["workspace1"]'), + bucketRequest(syncRules, 'by_workspace["workspace3"]') + ]); }); test('truncate parameters', async () => { diff --git a/packages/service-core-tests/src/tests/register-sync-tests.ts b/packages/service-core-tests/src/tests/register-sync-tests.ts index 404850b7f..3a693ba85 100644 --- a/packages/service-core-tests/src/tests/register-sync-tests.ts +++ b/packages/service-core-tests/src/tests/register-sync-tests.ts @@ -1,6 +1,7 @@ import { createCoreAPIMetrics, JwtPayload, + LEGACY_STORAGE_VERSION, storage, StreamingSyncCheckpoint, StreamingSyncCheckpointDiff, @@ -15,6 +16,7 @@ import { fileURLToPath } from 'url'; import { expect, test } from 'vitest'; import * as test_utils from '../test-utils/test-utils-index.js'; import { METRICS_HELPER } from '../test-utils/test-utils-index.js'; +import { bucketRequest } from './util.js'; const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); @@ -38,7 +40,7 @@ export const SYNC_SNAPSHOT_PATH = path.resolve(__dirname, '../__snapshots/sync.t * }); * ``` */ -export function registerSyncTests(factory: storage.TestStorageFactory) { +export function registerSyncTests(factory: storage.TestStorageFactory, options: { storageVersion?: number } = {}) { createCoreAPIMetrics(METRICS_HELPER.metricsEngine); const tracker = new sync.RequestTracker(METRICS_HELPER.metricsEngine); const syncContext = new sync.SyncContext({ @@ -47,10 +49,20 @@ export function registerSyncTests(factory: storage.TestStorageFactory) { maxDataFetchConcurrency: 2 }); + const updateSyncRules = ( + bucketStorageFactory: storage.BucketStorageFactory, + updateOptions: storage.UpdateSyncRulesOptions + ) => { + return bucketStorageFactory.updateSyncRules({ + ...updateOptions, + storageVersion: updateOptions.storageVersion ?? options.storageVersion + }); + }; + test('sync global data', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -101,7 +113,7 @@ export function registerSyncTests(factory: storage.TestStorageFactory) { test('sync buckets in order', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: ` bucket_definitions: b0: @@ -162,7 +174,7 @@ bucket_definitions: test('sync interrupts low-priority buckets on new checkpoints', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: ` bucket_definitions: b0: @@ -271,7 +283,7 @@ bucket_definitions: test('sync interruptions with unrelated data', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: ` bucket_definitions: b0: @@ -409,7 +421,7 @@ bucket_definitions: // then interrupt checkpoint with new data for all buckets // -> data for all buckets should be sent in the new checkpoint - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: ` bucket_definitions: b0a: @@ -553,7 +565,7 @@ bucket_definitions: test('sends checkpoint complete line for empty checkpoint', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); const bucketStorage = f.getInstance(syncRules); @@ -616,7 +628,7 @@ bucket_definitions: test('sync legacy non-raw data', async () => { const f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -660,7 +672,7 @@ bucket_definitions: test('expired token', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -687,7 +699,7 @@ bucket_definitions: test('sync updates to global data', async (context) => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -753,7 +765,7 @@ bucket_definitions: test('sync updates to parameter query only', async (context) => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: `bucket_definitions: by_user: parameters: select users.id as user_id from users where users.id = request.user_id() @@ -812,14 +824,14 @@ bucket_definitions: const checkpoint2 = await getCheckpointLines(iter); expect( (checkpoint2[0] as StreamingSyncCheckpointDiff).checkpoint_diff?.updated_buckets?.map((b) => b.bucket) - ).toEqual(['by_user["user1"]']); + ).toEqual([bucketRequest(syncRules, 'by_user["user1"]')]); expect(checkpoint2).toMatchSnapshot(); }); test('sync updates to data query only', async (context) => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: `bucket_definitions: by_user: parameters: select users.id as user_id from users where users.id = request.user_id() @@ -867,7 +879,7 @@ bucket_definitions: const checkpoint1 = await getCheckpointLines(iter); expect((checkpoint1[0] as StreamingSyncCheckpoint).checkpoint?.buckets?.map((b) => b.bucket)).toEqual([ - 'by_user["user1"]' + bucketRequest(syncRules, 'by_user["user1"]') ]); expect(checkpoint1).toMatchSnapshot(); @@ -889,14 +901,14 @@ bucket_definitions: const checkpoint2 = await getCheckpointLines(iter); expect( (checkpoint2[0] as StreamingSyncCheckpointDiff).checkpoint_diff?.updated_buckets?.map((b) => b.bucket) - ).toEqual(['by_user["user1"]']); + ).toEqual([bucketRequest(syncRules, 'by_user["user1"]')]); expect(checkpoint2).toMatchSnapshot(); }); test('sync updates to parameter query + data', async (context) => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: `bucket_definitions: by_user: parameters: select users.id as user_id from users where users.id = request.user_id() @@ -963,14 +975,14 @@ bucket_definitions: const checkpoint2 = await getCheckpointLines(iter); expect( (checkpoint2[0] as StreamingSyncCheckpointDiff).checkpoint_diff?.updated_buckets?.map((b) => b.bucket) - ).toEqual(['by_user["user1"]']); + ).toEqual([bucketRequest(syncRules, 'by_user["user1"]')]); expect(checkpoint2).toMatchSnapshot(); }); test('expiring token', async (context) => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -1015,7 +1027,7 @@ bucket_definitions: await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -1158,7 +1170,7 @@ bucket_definitions: test('write checkpoint', async () => { await using f = await factory(); - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: BASIC_SYNC_RULES }); @@ -1229,7 +1241,7 @@ config: `; for (let i = 0; i < 2; i++) { - const syncRules = await f.updateSyncRules({ + const syncRules = await updateSyncRules(f, { content: rules }); const bucketStorage = f.getInstance(syncRules); diff --git a/packages/service-core-tests/src/tests/util.ts b/packages/service-core-tests/src/tests/util.ts index 67e90f11c..188d566da 100644 --- a/packages/service-core-tests/src/tests/util.ts +++ b/packages/service-core-tests/src/tests/util.ts @@ -1,3 +1,24 @@ +import { storage } from '@powersync/service-core'; import { test_utils } from '../index.js'; export const TEST_TABLE = test_utils.makeTestTable('test', ['id']); + +export function bucketRequest(syncRules: storage.PersistedSyncRulesContent, bucketName: string): string { + if (/^\d+#/.test(bucketName)) { + return bucketName; + } + + const versionedBuckets = storage.STORAGE_VERSION_CONFIG[syncRules.storageVersion]?.versionedBuckets ?? false; + return versionedBuckets ? `${syncRules.id}#${bucketName}` : bucketName; +} + +export function bucketRequests(syncRules: storage.PersistedSyncRulesContent, bucketNames: string[]): string[] { + return bucketNames.map((bucketName) => bucketRequest(syncRules, bucketName)); +} + +export function bucketRequestMap( + syncRules: storage.PersistedSyncRulesContent, + buckets: Iterable +): Map { + return new Map(Array.from(buckets, ([bucketName, opId]) => [bucketRequest(syncRules, bucketName), opId])); +} diff --git a/packages/service-core/src/routes/endpoints/admin.ts b/packages/service-core/src/routes/endpoints/admin.ts index 04aba5e8e..c84f2264f 100644 --- a/packages/service-core/src/routes/endpoints/admin.ts +++ b/packages/service-core/src/routes/endpoints/admin.ts @@ -2,6 +2,7 @@ import { ErrorCode, errors, router, schema } from '@powersync/lib-services-frame import { SqlSyncRules, StaticSchema } from '@powersync/service-sync-rules'; import { internal_routes } from '@powersync/service-types'; +import { DEFAULT_HYDRATION_STATE } from '@powersync/service-sync-rules'; import * as api from '../../api/api-index.js'; import * as storage from '../../storage/storage-index.js'; import { authApi } from '../auth.js'; @@ -173,6 +174,7 @@ export const validate = routeDefinition({ slot_name: '', active: false, last_checkpoint_lsn: '', + storageVersion: storage.LEGACY_STORAGE_VERSION, parsed() { return { @@ -182,7 +184,7 @@ export const validate = routeDefinition({ schema }), hydratedSyncRules() { - return this.sync_rules.config.hydrate(); + return this.sync_rules.config.hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); } }; }, diff --git a/packages/service-core/src/storage/BucketStorageFactory.ts b/packages/service-core/src/storage/BucketStorageFactory.ts index 355efec84..bc2f9bde5 100644 --- a/packages/service-core/src/storage/BucketStorageFactory.ts +++ b/packages/service-core/src/storage/BucketStorageFactory.ts @@ -122,6 +122,7 @@ export interface UpdateSyncRulesOptions { content: string; lock?: boolean; validate?: boolean; + storageVersion?: number; } export interface GetIntanceOptions { diff --git a/packages/service-core/src/storage/PersistedSyncRulesContent.ts b/packages/service-core/src/storage/PersistedSyncRulesContent.ts index b8b40f7f9..66b0d4f7a 100644 --- a/packages/service-core/src/storage/PersistedSyncRulesContent.ts +++ b/packages/service-core/src/storage/PersistedSyncRulesContent.ts @@ -14,6 +14,8 @@ export interface PersistedSyncRulesContent { */ readonly active: boolean; + readonly storageVersion: number; + readonly last_checkpoint_lsn: string | null; readonly last_fatal_error?: string | null; diff --git a/packages/service-core/src/storage/StorageVersionConfig.ts b/packages/service-core/src/storage/StorageVersionConfig.ts new file mode 100644 index 000000000..a9f171459 --- /dev/null +++ b/packages/service-core/src/storage/StorageVersionConfig.ts @@ -0,0 +1,30 @@ +export interface StorageVersionConfig { + /** + * Whether versioned bucket names are automatically enabled. + * + * If this is false, bucket names may still be versioned depending on the sync config. + */ + versionedBuckets: boolean; +} + +/** + * Oldest supported storage version. + */ +export const LEGACY_STORAGE_VERSION = 1; + +/** + * Default storage version for newly persisted sync rules. + */ +export const CURRENT_STORAGE_VERSION = 2; + +/** + * Shared storage-version behavior across storage implementations. + */ +export const STORAGE_VERSION_CONFIG: Record = { + [LEGACY_STORAGE_VERSION]: { + versionedBuckets: false + }, + [CURRENT_STORAGE_VERSION]: { + versionedBuckets: true + } +}; diff --git a/packages/service-core/src/storage/storage-index.ts b/packages/service-core/src/storage/storage-index.ts index b83a2fb2f..7c9845550 100644 --- a/packages/service-core/src/storage/storage-index.ts +++ b/packages/service-core/src/storage/storage-index.ts @@ -14,3 +14,4 @@ export * from './SyncRulesBucketStorage.js'; export * from './PersistedSyncRulesContent.js'; export * from './ReplicationLock.js'; export * from './ReportStorage.js'; +export * from './StorageVersionConfig.js'; diff --git a/packages/service-core/src/sync/BucketChecksumState.ts b/packages/service-core/src/sync/BucketChecksumState.ts index 0130ab85c..253c5dabd 100644 --- a/packages/service-core/src/sync/BucketChecksumState.ts +++ b/packages/service-core/src/sync/BucketChecksumState.ts @@ -1,8 +1,10 @@ import { BucketDescription, + BucketParameterQuerier, BucketPriority, BucketSource, HydratedSyncRules, + QuerierError, RequestedStream, RequestParameters, ResolvedBucket @@ -18,7 +20,6 @@ import { ServiceAssertionError, ServiceError } from '@powersync/lib-services-framework'; -import { BucketParameterQuerier, QuerierError } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js'; import { JwtPayload } from '../auth/JwtPayload.js'; import { SyncContext } from './SyncContext.js'; import { getIntersection, hasIntersection } from './util.js'; diff --git a/packages/service-core/test/src/routes/stream.test.ts b/packages/service-core/test/src/routes/stream.test.ts index b59a2764a..dce17e55b 100644 --- a/packages/service-core/test/src/routes/stream.test.ts +++ b/packages/service-core/test/src/routes/stream.test.ts @@ -8,6 +8,7 @@ import winston from 'winston'; import { syncStreamed } from '../../../src/routes/endpoints/sync-stream.js'; import { DEFAULT_PARAM_LOGGING_FORMAT_OPTIONS, limitParamsForLogging } from '../../../src/util/param-logging.js'; import { mockServiceContext } from './mocks.js'; +import { DEFAULT_HYDRATION_STATE } from '@powersync/service-sync-rules'; describe('Stream Route', () => { describe('compressed stream', () => { @@ -45,7 +46,7 @@ describe('Stream Route', () => { const storage = { getParsedSyncRules() { - return new SqlSyncRules('bucket_definitions: {}').hydrate(); + return new SqlSyncRules('bucket_definitions: {}').hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); }, watchCheckpointChanges: async function* (options) { throw new Error('Simulated storage error'); @@ -83,7 +84,7 @@ describe('Stream Route', () => { it('logs the application metadata', async () => { const storage = { getParsedSyncRules() { - return new SqlSyncRules('bucket_definitions: {}').hydrate(); + return new SqlSyncRules('bucket_definitions: {}').hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); }, watchCheckpointChanges: async function* (options) { throw new Error('Simulated storage error'); diff --git a/packages/service-core/test/src/sync/BucketChecksumState.test.ts b/packages/service-core/test/src/sync/BucketChecksumState.test.ts index 95d36df88..ed4211c0e 100644 --- a/packages/service-core/test/src/sync/BucketChecksumState.test.ts +++ b/packages/service-core/test/src/sync/BucketChecksumState.test.ts @@ -13,8 +13,13 @@ import { WatchFilterEvent } from '@/index.js'; import { JSONBig } from '@powersync/service-jsonbig'; -import { RequestJwtPayload, ScopedParameterLookup, SqliteJsonRow, SqlSyncRules } from '@powersync/service-sync-rules'; -import { versionedHydrationState } from '@powersync/service-sync-rules/src/HydrationState.js'; +import { + RequestJwtPayload, + ScopedParameterLookup, + SqliteJsonRow, + SqlSyncRules, + versionedHydrationState +} from '@powersync/service-sync-rules'; import { beforeEach, describe, expect, test } from 'vitest'; describe('BucketChecksumState', () => { @@ -65,7 +70,7 @@ bucket_definitions: test('global bucket with update', async () => { const storage = new MockBucketChecksumStateStorage(); // Set intial state - storage.updateTestChecksum({ bucket: 'global[]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '1#global[]', checksum: 1, count: 1 }); const state = new BucketChecksumState({ syncContext, @@ -83,7 +88,7 @@ bucket_definitions: line.advance(); expect(line.checkpointLine).toEqual({ checkpoint: { - buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }], + buckets: [{ bucket: '1#global[]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }], last_op_id: '1', write_checkpoint: undefined, streams: [{ name: 'global', is_default: true, errors: [] }] @@ -91,26 +96,26 @@ bucket_definitions: }); expect(line.bucketsToFetch).toEqual([ { - bucket: 'global[]', + bucket: '1#global[]', priority: 3 } ]); // This is the bucket data to be fetched - expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 0n]])); + expect(line.getFilteredBucketPositions()).toEqual(new Map([['1#global[]', 0n]])); // This similuates the bucket data being sent line.advance(); - line.updateBucketPosition({ bucket: 'global[]', nextAfter: 1n, hasMore: false }); + line.updateBucketPosition({ bucket: '1#global[]', nextAfter: 1n, hasMore: false }); // Update bucket storage state - storage.updateTestChecksum({ bucket: 'global[]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '1#global[]', checksum: 2, count: 2 }); // Now we get a new line const line2 = (await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(2n), writeCheckpoint: null, update: { - updatedDataBuckets: new Set(['global[]']), + updatedDataBuckets: new Set(['1#global[]']), invalidateDataBuckets: false, updatedParameterLookups: new Set(), invalidateParameterBuckets: false @@ -120,12 +125,14 @@ bucket_definitions: expect(line2.checkpointLine).toEqual({ checkpoint_diff: { removed_buckets: [], - updated_buckets: [{ bucket: 'global[]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] }], + updated_buckets: [ + { bucket: '1#global[]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } + ], last_op_id: '2', write_checkpoint: undefined } }); - expect(line2.getFilteredBucketPositions()).toEqual(new Map([['global[]', 1n]])); + expect(line2.getFilteredBucketPositions()).toEqual(new Map([['1#global[]', 1n]])); }); test('global bucket with initial state', async () => { @@ -134,13 +141,13 @@ bucket_definitions: /// (getFilteredBucketStates) const storage = new MockBucketChecksumStateStorage(); // Set intial state - storage.updateTestChecksum({ bucket: 'global[]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '1#global[]', checksum: 1, count: 1 }); const state = new BucketChecksumState({ syncContext, tokenPayload, // Client sets the initial state here - syncRequest: { buckets: [{ name: 'global[]', after: '1' }] }, + syncRequest: { buckets: [{ name: '1#global[]', after: '1' }] }, syncRules: SYNC_RULES_GLOBAL, bucketStorage: storage }); @@ -153,7 +160,7 @@ bucket_definitions: line.advance(); expect(line.checkpointLine).toEqual({ checkpoint: { - buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }], + buckets: [{ bucket: '1#global[]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }], last_op_id: '1', write_checkpoint: undefined, streams: [{ name: 'global', is_default: true, errors: [] }] @@ -161,19 +168,19 @@ bucket_definitions: }); expect(line.bucketsToFetch).toEqual([ { - bucket: 'global[]', + bucket: '1#global[]', priority: 3 } ]); // This is the main difference between this and the previous test - expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 1n]])); + expect(line.getFilteredBucketPositions()).toEqual(new Map([['1#global[]', 1n]])); }); test('multiple static buckets', async () => { const storage = new MockBucketChecksumStateStorage(); // Set intial state - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 1, count: 1 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 1, count: 1 }); const state = new BucketChecksumState({ syncContext, @@ -191,8 +198,8 @@ bucket_definitions: expect(line.checkpointLine).toEqual({ checkpoint: { buckets: [ - { bucket: 'global[1]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }, - { bucket: 'global[2]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] } + { bucket: '2#global[1]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }, + { bucket: '2#global[2]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] } ], last_op_id: '1', write_checkpoint: undefined, @@ -201,25 +208,25 @@ bucket_definitions: }); expect(line.bucketsToFetch).toEqual([ { - bucket: 'global[1]', + bucket: '2#global[1]', priority: 3 }, { - bucket: 'global[2]', + bucket: '2#global[2]', priority: 3 } ]); line.advance(); - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 2, count: 2 }); const line2 = (await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(2n), writeCheckpoint: null, update: { ...CHECKPOINT_INVALIDATE_ALL, - updatedDataBuckets: new Set(['global[1]', 'global[2]']), + updatedDataBuckets: new Set(['2#global[1]', '2#global[2]']), invalidateDataBuckets: false } }))!; @@ -227,8 +234,8 @@ bucket_definitions: checkpoint_diff: { removed_buckets: [], updated_buckets: [ - { bucket: 'global[1]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] }, - { bucket: 'global[2]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } + { bucket: '2#global[1]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] }, + { bucket: '2#global[2]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } ], last_op_id: '2', write_checkpoint: undefined @@ -251,7 +258,7 @@ bucket_definitions: bucketStorage: storage }); - storage.updateTestChecksum({ bucket: 'global[]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '1#global[]', checksum: 1, count: 1 }); const line = (await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(1n), @@ -261,7 +268,7 @@ bucket_definitions: line.advance(); expect(line.checkpointLine).toEqual({ checkpoint: { - buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }], + buckets: [{ bucket: '1#global[]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }], last_op_id: '1', write_checkpoint: undefined, streams: [{ name: 'global', is_default: true, errors: [] }] @@ -269,11 +276,11 @@ bucket_definitions: }); expect(line.bucketsToFetch).toEqual([ { - bucket: 'global[]', + bucket: '1#global[]', priority: 3 } ]); - expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 0n]])); + expect(line.getFilteredBucketPositions()).toEqual(new Map([['1#global[]', 0n]])); }); test('invalidating individual bucket', async () => { @@ -281,8 +288,8 @@ bucket_definitions: const storage = new MockBucketChecksumStateStorage(); // Set initial state - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 1, count: 1 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 1, count: 1 }); const state = new BucketChecksumState({ syncContext, @@ -301,11 +308,11 @@ bucket_definitions: update: CHECKPOINT_INVALIDATE_ALL }); line!.advance(); - line!.updateBucketPosition({ bucket: 'global[1]', nextAfter: 1n, hasMore: false }); - line!.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: false }); + line!.updateBucketPosition({ bucket: '2#global[1]', nextAfter: 1n, hasMore: false }); + line!.updateBucketPosition({ bucket: '2#global[2]', nextAfter: 1n, hasMore: false }); - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 2, count: 2 }); const line2 = (await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(2n), @@ -315,7 +322,7 @@ bucket_definitions: // Invalidate the state for global[1] - will only re-check the single bucket. // This is essentially inconsistent state, but is the simplest way to test that // the filter is working. - updatedDataBuckets: new Set(['global[1]']), + updatedDataBuckets: new Set(['2#global[1]']), invalidateDataBuckets: false } }))!; @@ -324,13 +331,13 @@ bucket_definitions: removed_buckets: [], updated_buckets: [ // This does not include global[2], since it was not invalidated. - { bucket: 'global[1]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } + { bucket: '2#global[1]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } ], last_op_id: '2', write_checkpoint: undefined } }); - expect(line2.bucketsToFetch).toEqual([{ bucket: 'global[1]', priority: 3 }]); + expect(line2.bucketsToFetch).toEqual([{ bucket: '2#global[1]', priority: 3 }]); }); test('invalidating all buckets', async () => { @@ -349,8 +356,8 @@ bucket_definitions: // storage.filter = state.checkpointFilter; // Set initial state - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 1, count: 1 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 1, count: 1 }); const line = await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(1n), @@ -360,8 +367,8 @@ bucket_definitions: line!.advance(); - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 2, count: 2 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 2, count: 2 }); const line2 = (await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(2n), @@ -373,24 +380,24 @@ bucket_definitions: checkpoint_diff: { removed_buckets: [], updated_buckets: [ - { bucket: 'global[1]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] }, - { bucket: 'global[2]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } + { bucket: '2#global[1]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] }, + { bucket: '2#global[2]', checksum: 2, count: 2, priority: 3, subscriptions: [{ default: 0 }] } ], last_op_id: '2', write_checkpoint: undefined } }); expect(line2.bucketsToFetch).toEqual([ - { bucket: 'global[1]', priority: 3 }, - { bucket: 'global[2]', priority: 3 } + { bucket: '2#global[1]', priority: 3 }, + { bucket: '2#global[2]', priority: 3 } ]); }); test('interrupt and resume static buckets checkpoint', async () => { const storage = new MockBucketChecksumStateStorage(); // Set intial state - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 3, count: 3 }); - storage.updateTestChecksum({ bucket: 'global[2]', checksum: 3, count: 3 }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 3, count: 3 }); + storage.updateTestChecksum({ bucket: '2#global[2]', checksum: 3, count: 3 }); const state = new BucketChecksumState({ syncContext, @@ -409,8 +416,8 @@ bucket_definitions: expect(line.checkpointLine).toEqual({ checkpoint: { buckets: [ - { bucket: 'global[1]', checksum: 3, count: 3, priority: 3, subscriptions: [{ default: 0 }] }, - { bucket: 'global[2]', checksum: 3, count: 3, priority: 3, subscriptions: [{ default: 0 }] } + { bucket: '2#global[1]', checksum: 3, count: 3, priority: 3, subscriptions: [{ default: 0 }] }, + { bucket: '2#global[2]', checksum: 3, count: 3, priority: 3, subscriptions: [{ default: 0 }] } ], last_op_id: '3', write_checkpoint: undefined, @@ -419,11 +426,11 @@ bucket_definitions: }); expect(line.bucketsToFetch).toEqual([ { - bucket: 'global[1]', + bucket: '2#global[1]', priority: 3 }, { - bucket: 'global[2]', + bucket: '2#global[2]', priority: 3 } ]); @@ -431,17 +438,17 @@ bucket_definitions: // This is the bucket data to be fetched expect(line.getFilteredBucketPositions()).toEqual( new Map([ - ['global[1]', 0n], - ['global[2]', 0n] + ['2#global[1]', 0n], + ['2#global[2]', 0n] ]) ); // No data changes here. // We simulate partial data sent, before a checkpoint is interrupted. line.advance(); - line.updateBucketPosition({ bucket: 'global[1]', nextAfter: 3n, hasMore: false }); - line.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: true }); - storage.updateTestChecksum({ bucket: 'global[1]', checksum: 4, count: 4 }); + line.updateBucketPosition({ bucket: '2#global[1]', nextAfter: 3n, hasMore: false }); + line.updateBucketPosition({ bucket: '2#global[2]', nextAfter: 1n, hasMore: true }); + storage.updateTestChecksum({ bucket: '2#global[1]', checksum: 4, count: 4 }); const line2 = (await state.buildNextCheckpointLine({ base: storage.makeCheckpoint(4n), @@ -449,7 +456,7 @@ bucket_definitions: update: { ...CHECKPOINT_INVALIDATE_ALL, invalidateDataBuckets: false, - updatedDataBuckets: new Set(['global[1]']) + updatedDataBuckets: new Set(['2#global[1]']) } }))!; line2.advance(); @@ -458,7 +465,7 @@ bucket_definitions: removed_buckets: [], updated_buckets: [ { - bucket: 'global[1]', + bucket: '2#global[1]', checksum: 4, count: 4, priority: 3, @@ -472,19 +479,19 @@ bucket_definitions: // This should contain both buckets, even though only one changed. expect(line2.bucketsToFetch).toEqual([ { - bucket: 'global[1]', + bucket: '2#global[1]', priority: 3 }, { - bucket: 'global[2]', + bucket: '2#global[2]', priority: 3 } ]); expect(line2.getFilteredBucketPositions()).toEqual( new Map([ - ['global[1]', 3n], - ['global[2]', 1n] + ['2#global[1]', 3n], + ['2#global[2]', 1n] ]) ); }); @@ -492,9 +499,9 @@ bucket_definitions: test('dynamic buckets with updates', async () => { const storage = new MockBucketChecksumStateStorage(); // Set intial state - storage.updateTestChecksum({ bucket: 'by_project[1]', checksum: 1, count: 1 }); - storage.updateTestChecksum({ bucket: 'by_project[2]', checksum: 1, count: 1 }); - storage.updateTestChecksum({ bucket: 'by_project[3]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '3#by_project[1]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '3#by_project[2]', checksum: 1, count: 1 }); + storage.updateTestChecksum({ bucket: '3#by_project[3]', checksum: 1, count: 1 }); const state = new BucketChecksumState({ syncContext, @@ -516,14 +523,14 @@ bucket_definitions: checkpoint: { buckets: [ { - bucket: 'by_project[1]', + bucket: '3#by_project[1]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }, { - bucket: 'by_project[2]', + bucket: '3#by_project[2]', checksum: 1, count: 1, priority: 3, @@ -543,11 +550,11 @@ bucket_definitions: }); expect(line.bucketsToFetch).toEqual([ { - bucket: 'by_project[1]', + bucket: '3#by_project[1]', priority: 3 }, { - bucket: 'by_project[2]', + bucket: '3#by_project[2]', priority: 3 } ]); @@ -555,14 +562,14 @@ bucket_definitions: // This is the bucket data to be fetched expect(line.getFilteredBucketPositions()).toEqual( new Map([ - ['by_project[1]', 0n], - ['by_project[2]', 0n] + ['3#by_project[1]', 0n], + ['3#by_project[2]', 0n] ]) ); line.advance(); - line.updateBucketPosition({ bucket: 'by_project[1]', nextAfter: 1n, hasMore: false }); - line.updateBucketPosition({ bucket: 'by_project[2]', nextAfter: 1n, hasMore: false }); + line.updateBucketPosition({ bucket: '3#by_project[1]', nextAfter: 1n, hasMore: false }); + line.updateBucketPosition({ bucket: '3#by_project[2]', nextAfter: 1n, hasMore: false }); // Now we get a new line const line2 = (await state.buildNextCheckpointLine({ @@ -584,7 +591,7 @@ bucket_definitions: removed_buckets: [], updated_buckets: [ { - bucket: 'by_project[3]', + bucket: '3#by_project[3]', checksum: 1, count: 1, priority: 3, @@ -595,7 +602,7 @@ bucket_definitions: write_checkpoint: undefined } }); - expect(line2.getFilteredBucketPositions()).toEqual(new Map([['by_project[3]', 0n]])); + expect(line2.getFilteredBucketPositions()).toEqual(new Map([['3#by_project[3]', 0n]])); }); describe('streams', () => { diff --git a/packages/service-errors/src/codes.ts b/packages/service-errors/src/codes.ts index 9ca5e2a1f..8a7b1a9e3 100644 --- a/packages/service-errors/src/codes.ts +++ b/packages/service-errors/src/codes.ts @@ -73,6 +73,12 @@ export enum ErrorCode { */ PSYNC_S1004 = 'PSYNC_S1004', + /** + * Storage version not supported. This could be caused by a downgrade to a version + * that does not support the current storage version. + */ + PSYNC_S1005 = 'PSYNC_S1005', + // ## PSYNC_S11xx: Postgres replication issues /** diff --git a/packages/sync-rules/src/SyncConfig.ts b/packages/sync-rules/src/SyncConfig.ts index a8103131d..19f42722f 100644 --- a/packages/sync-rules/src/SyncConfig.ts +++ b/packages/sync-rules/src/SyncConfig.ts @@ -33,17 +33,15 @@ export abstract class SyncConfig { /** * Hydrate the sync rule definitions with persisted state into runnable sync rules. * - * @param params.hydrationState Transforms bucket ids based on persisted state. May omit for tests. + * Note: versionedBucketIds is not checked here: It is set at a higher level based + * on the storage version of the persisted sync rules, and used in hydrationState. + * + * @param params.hydrationState Transforms bucket ids based on persisted state. */ - hydrate(params?: CreateSourceParams): HydratedSyncRules { - let hydrationState = params?.hydrationState; - if (hydrationState == null || !this.compatibility.isEnabled(CompatibilityOption.versionedBucketIds)) { - hydrationState = DEFAULT_HYDRATION_STATE; - } - const resolvedParams = { hydrationState }; + hydrate(params: CreateSourceParams): HydratedSyncRules { return new HydratedSyncRules({ definition: this, - createParams: resolvedParams, + createParams: params, bucketDataSources: this.bucketDataSources, bucketParameterIndexLookupCreators: this.bucketParameterLookupSources, eventDescriptors: this.eventDescriptors, diff --git a/packages/sync-rules/src/index.ts b/packages/sync-rules/src/index.ts index 7e37329e6..3c68e85e2 100644 --- a/packages/sync-rules/src/index.ts +++ b/packages/sync-rules/src/index.ts @@ -28,7 +28,7 @@ export * from './types.js'; export * from './types/custom_sqlite_value.js'; export * from './types/time.js'; export * from './utils.js'; -export { versionedHydrationState } from './HydrationState.js'; +export * from './HydrationState.js'; export * from './HydratedSyncRules.js'; export * from './compiler/compiler.js'; diff --git a/packages/sync-rules/test/src/compatibility.test.ts b/packages/sync-rules/test/src/compatibility.test.ts index 16973cf33..a7880dd31 100644 --- a/packages/sync-rules/test/src/compatibility.test.ts +++ b/packages/sync-rules/test/src/compatibility.test.ts @@ -9,7 +9,7 @@ import { toSyncRulesValue } from '../../src/index.js'; -import { versionedHydrationState } from '../../src/HydrationState.js'; +import { DEFAULT_HYDRATION_STATE, versionedHydrationState } from '../../src/HydrationState.js'; import { ASSETS, normalizeQuerierOptions, PARSE_OPTIONS } from './util.js'; describe('compatibility options', () => { @@ -28,7 +28,7 @@ bucket_definitions: - SELECT id, description FROM assets `, PARSE_OPTIONS - ).config.hydrate(); + ).config.hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); expect( rules.evaluateRow({ @@ -55,7 +55,7 @@ config: timestamps_iso8601: true `, PARSE_OPTIONS - ).config.hydrate(); + ).config.hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); expect( rules.evaluateRow({ @@ -120,7 +120,12 @@ config: versioned_bucket_ids: false `, PARSE_OPTIONS - ).config.hydrate({ hydrationState: versionedHydrationState(1) }); + ).config.hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); + + expect(rules.compatibility.isEnabled(CompatibilityOption.timestampsIso8601)).toStrictEqual(false); + // This does not have a direct effect on the hydration here anymore - we now do that one level higher. + // We do still check that the option is parsed correctly. + expect(rules.compatibility.isEnabled(CompatibilityOption.versionedBucketIds)).toStrictEqual(false); expect( rules.evaluateRow({ @@ -144,32 +149,6 @@ config: }); }); - test('can use versioned bucket ids', () => { - const rules = SqlSyncRules.fromYaml( - ` -bucket_definitions: - mybucket: - data: - - SELECT id, description FROM assets - -config: - edition: 1 - versioned_bucket_ids: true - `, - PARSE_OPTIONS - ).config.hydrate({ hydrationState: versionedHydrationState(1) }); - - expect( - rules.evaluateRow({ - sourceTable: ASSETS, - record: { - id: 'id', - description: 'desc' - } - }) - ).toStrictEqual([{ bucket: '1#mybucket[]', data: { description: 'desc', id: 'id' }, id: 'id', table: 'assets' }]); - }); - test('streams use new options by default', () => { const rules = SqlSyncRules.fromYaml( ` @@ -211,7 +190,7 @@ bucket_definitions: - SELECT id, description ->> 'foo.bar' AS "desc" FROM assets `, PARSE_OPTIONS - ).config.hydrate(); + ).config.hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); expect( rules.evaluateRow({ @@ -235,7 +214,7 @@ config: fixed_json_extract: true `, PARSE_OPTIONS - ).config.hydrate(); + ).config.hydrate({ hydrationState: DEFAULT_HYDRATION_STATE }); expect( rules.evaluateRow({ @@ -291,7 +270,7 @@ config: } const rules = SqlSyncRules.fromYaml(syncRules, PARSE_OPTIONS).config.hydrate({ - hydrationState: versionedHydrationState(1) + hydrationState: DEFAULT_HYDRATION_STATE }); expect( rules.evaluateRow({ @@ -303,7 +282,7 @@ config: }) ).toStrictEqual([ { - bucket: withFixedQuirk ? '1#mybucket[]' : 'mybucket[]', + bucket: 'mybucket[]', data: { description: withFixedQuirk ? '["static value","2025-08-19T09:21:00Z"]' @@ -317,7 +296,7 @@ config: expect(rules.getBucketParameterQuerier(normalizeQuerierOptions({}, {}, {})).querier.staticBuckets).toStrictEqual([ { - bucket: withFixedQuirk ? '1#mybucket[]' : 'mybucket[]', + bucket: 'mybucket[]', definition: 'mybucket', inclusion_reasons: ['default'], priority: 3 diff --git a/packages/sync-rules/test/src/sync_plan/evaluator/utils.ts b/packages/sync-rules/test/src/sync_plan/evaluator/utils.ts index 29bc3e31a..2dc6c65d1 100644 --- a/packages/sync-rules/test/src/sync_plan/evaluator/utils.ts +++ b/packages/sync-rules/test/src/sync_plan/evaluator/utils.ts @@ -1,15 +1,16 @@ +import { test } from 'vitest'; import { - HydratedSyncRules, - versionedHydrationState, - javaScriptExpressionEngine, CompatibilityContext, CompatibilityEdition, - SyncConfig, - PrecompiledSyncConfig + CreateSourceParams, + DEFAULT_HYDRATION_STATE, + HydratedSyncRules, + javaScriptExpressionEngine, + PrecompiledSyncConfig, + SyncConfig } from '../../../../src/index.js'; -import { compileToSyncPlanWithoutErrors } from '../../compiler/utils.js'; -import { test } from 'vitest'; import { ScalarExpressionEngine } from '../../../../src/sync_plan/engine/scalar_expression_engine.js'; +import { compileToSyncPlanWithoutErrors } from '../../compiler/utils.js'; interface SyncTest { engine: ScalarExpressionEngine; @@ -27,8 +28,8 @@ export const syncTest = test.extend<{ sync: SyncTest }>({ const plan = compileToSyncPlanWithoutErrors(inputs); return new PrecompiledSyncConfig(plan, { engine, sourceText: '', defaultSchema: 'test_schema' }); }, - prepareSyncStreams(inputs) { - return this.prepareWithoutHydration(inputs).hydrate({ hydrationState: versionedHydrationState(1) }); + prepareSyncStreams(inputs, params?: CreateSourceParams) { + return this.prepareWithoutHydration(inputs).hydrate(params ?? { hydrationState: DEFAULT_HYDRATION_STATE }); } });