diff --git a/.changeset/big-guests-hunt.md b/.changeset/big-guests-hunt.md new file mode 100644 index 00000000..eee9c040 --- /dev/null +++ b/.changeset/big-guests-hunt.md @@ -0,0 +1,7 @@ +--- +'@powersync/service-core': patch +'@powersync/lib-services-framework': patch +'@powersync/service-sync-rules': patch +--- + +Updated ts-codec to 1.3.0 for better decode error responses diff --git a/.changeset/hungry-brooms-sniff.md b/.changeset/hungry-brooms-sniff.md new file mode 100644 index 00000000..0c5f7549 --- /dev/null +++ b/.changeset/hungry-brooms-sniff.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-module-postgres': minor +--- + +Added minor typing utilities diff --git a/.changeset/ninety-cycles-accept.md b/.changeset/ninety-cycles-accept.md new file mode 100644 index 00000000..f8d8ea18 --- /dev/null +++ b/.changeset/ninety-cycles-accept.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-core-tests': minor +--- + +Initial release of shared tests for different sync bucket storage providers diff --git a/.changeset/poor-pandas-obey.md b/.changeset/poor-pandas-obey.md new file mode 100644 index 00000000..066af947 --- /dev/null +++ b/.changeset/poor-pandas-obey.md @@ -0,0 +1,9 @@ +--- +'@powersync/service-module-mongodb': minor +'@powersync/service-core': minor +'@powersync/service-types': minor +'@powersync/service-module-mongodb-storage': minor +'@powersync/lib-service-mongodb': minor +--- + +Moved MongoDB sync bucket storage implementation to the MongoDB module. diff --git a/.changeset/wicked-papayas-hide.md b/.changeset/wicked-papayas-hide.md new file mode 100644 index 00000000..5be20687 --- /dev/null +++ b/.changeset/wicked-papayas-hide.md @@ -0,0 +1,5 @@ +--- +'@powersync/lib-services-framework': minor +--- + +Made migrations more pluggable diff --git a/.github/workflows/packages_release.yaml b/.github/workflows/packages_release.yaml index 47c00f7e..ecefe02c 100644 --- a/.github/workflows/packages_release.yaml +++ b/.github/workflows/packages_release.yaml @@ -55,7 +55,6 @@ jobs: # This expects you to have a script called release which does a build for your packages and calls changeset publish publish: pnpm release env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Check if @powersync/service-image Released diff --git a/README.md b/README.md index 1e9ac4fb..109346b9 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,28 @@ The service can be started using the public Docker image. See the image [notes]( - A light-weight set of definitions and utilities for micro services +- [libs/lib-mongodb](./libs/lib-mongodb/README.md) + + - A light-weight set of common logic and types for the MongoDB replication and storage modules. + +## Modules + +- [modules/module-mongodb](./modules/module-mongodb/README.md) + + - MongoDB replication module. + +- [modules/module-mongodb-storage](./modules/module-mongodb-storage/README.md) + + - MongoDB bucket storage module. + +- [modules/module-mysql](./modules/module-mysql/README.md) + + - MySQL replication module. + +- [modules/module-postgres](./modules/module-postgres/README.md) + + - Postgres replication module. + ## Service - [service](./service/README.md) diff --git a/libs/lib-mongodb/CHANGELOG.md b/libs/lib-mongodb/CHANGELOG.md new file mode 100644 index 00000000..ea9b3e1b --- /dev/null +++ b/libs/lib-mongodb/CHANGELOG.md @@ -0,0 +1 @@ +# @powersync/lib-service-mongodb diff --git a/libs/lib-mongodb/LICENSE b/libs/lib-mongodb/LICENSE new file mode 100644 index 00000000..c8efd46c --- /dev/null +++ b/libs/lib-mongodb/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, Apache 2.0 Future License + +## Abbreviation + +FSL-1.1-Apache-2.0 + +## Notice + +Copyright 2023-2024 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/libs/lib-mongodb/README.md b/libs/lib-mongodb/README.md new file mode 100644 index 00000000..cfe412d5 --- /dev/null +++ b/libs/lib-mongodb/README.md @@ -0,0 +1,3 @@ +# PowerSync Service MongoDB + +Library for common MongoDB logic used in the PowerSync service. diff --git a/libs/lib-mongodb/package.json b/libs/lib-mongodb/package.json new file mode 100644 index 00000000..6963d388 --- /dev/null +++ b/libs/lib-mongodb/package.json @@ -0,0 +1,38 @@ +{ + "name": "@powersync/lib-service-mongodb", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "version": "0.1.8", + "main": "dist/index.js", + "license": "FSL-1.1-Apache-2.0", + "type": "module", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "bson": "^6.8.0", + "mongodb": "^6.11.0", + "ts-codec": "^1.3.0", + "uri-js": "^4.4.1" + }, + "devDependencies": {} +} diff --git a/libs/lib-mongodb/src/db/db-index.ts b/libs/lib-mongodb/src/db/db-index.ts new file mode 100644 index 00000000..d57b1427 --- /dev/null +++ b/libs/lib-mongodb/src/db/db-index.ts @@ -0,0 +1 @@ +export * from './mongo.js'; diff --git a/packages/service-core/src/db/mongo.ts b/libs/lib-mongodb/src/db/mongo.ts similarity index 91% rename from packages/service-core/src/db/mongo.ts rename to libs/lib-mongodb/src/db/mongo.ts index f687705b..28801408 100644 --- a/packages/service-core/src/db/mongo.ts +++ b/libs/lib-mongodb/src/db/mongo.ts @@ -1,8 +1,6 @@ import * as mongo from 'mongodb'; import * as timers from 'timers/promises'; - -import { configFile } from '@powersync/service-types'; -import { normalizeMongoConfig } from '../storage/storage-index.js'; +import { BaseMongoConfigDecoded, normalizeMongoConfig } from '../types/types.js'; /** * Time for new connection to timeout. @@ -30,7 +28,7 @@ export const MONGO_OPERATION_TIMEOUT_MS = 30_000; */ export const MONGO_CLEAR_OPERATION_TIMEOUT_MS = 5_000; -export function createMongoClient(config: configFile.PowerSyncConfig['storage']) { +export function createMongoClient(config: BaseMongoConfigDecoded) { const normalized = normalizeMongoConfig(config); return new mongo.MongoClient(normalized.uri, { auth: { diff --git a/libs/lib-mongodb/src/index.ts b/libs/lib-mongodb/src/index.ts new file mode 100644 index 00000000..6e824c53 --- /dev/null +++ b/libs/lib-mongodb/src/index.ts @@ -0,0 +1,8 @@ +export * from './db/db-index.js'; +export * as db from './db/db-index.js'; + +export * from './locks/locks-index.js'; +export * as locks from './locks/locks-index.js'; + +export * from './types/types.js'; +export * as types from './types/types.js'; diff --git a/libs/lib-mongodb/src/locks/MongoLockManager.ts b/libs/lib-mongodb/src/locks/MongoLockManager.ts new file mode 100644 index 00000000..de9f0c6f --- /dev/null +++ b/libs/lib-mongodb/src/locks/MongoLockManager.ts @@ -0,0 +1,123 @@ +import * as framework from '@powersync/lib-services-framework'; +import * as bson from 'bson'; +import * as mongo from 'mongodb'; + +/** + * Lock Document Schema + */ +export type Lock = { + name: string; + active_lock?: { + lock_id: bson.ObjectId; + ts: Date; + }; +}; + +export type Collection = mongo.Collection; + +export type MongoLockManagerParams = framework.locks.LockManagerParams & { + collection: Collection; +}; + +const DEFAULT_LOCK_TIMEOUT = 60 * 1000; // 1 minute + +export class MongoLockManager extends framework.locks.AbstractLockManager { + collection: Collection; + constructor(params: MongoLockManagerParams) { + super(params); + this.collection = params.collection; + } + + protected async acquireHandle(options?: framework.LockAcquireOptions): Promise { + const lock_id = await this.getHandle(); + if (!lock_id) { + return null; + } + return { + refresh: () => this.refreshHandle(lock_id), + release: () => this.releaseHandle(lock_id) + }; + } + + protected async refreshHandle(lock_id: bson.ObjectId) { + const res = await this.collection.updateOne( + { + 'active_lock.lock_id': lock_id + }, + { + $set: { + 'active_lock.ts': new Date() + } + } + ); + + if (res.modifiedCount === 0) { + throw new Error('Lock not found, could not refresh'); + } + } + + protected async getHandle() { + const now = new Date(); + const lock_timeout = this.params.timeout ?? DEFAULT_LOCK_TIMEOUT; + const lock_id = new bson.ObjectId(); + + const { name } = this.params; + await this.collection.updateOne( + { + name + }, + { + $setOnInsert: { + name + } + }, + { + upsert: true + } + ); + + const expired_ts = now.getTime() - lock_timeout; + + const res = await this.collection.updateOne( + { + $and: [ + { name: name }, + { + $or: [{ active_lock: { $exists: false } }, { 'active_lock.ts': { $lte: new Date(expired_ts) } }] + } + ] + }, + { + $set: { + active_lock: { + lock_id: lock_id, + ts: now + } + } + } + ); + + if (res.modifiedCount === 0) { + return null; + } + + return lock_id; + } + + protected async releaseHandle(lock_id: bson.ObjectId) { + const res = await this.collection.updateOne( + { + 'active_lock.lock_id': lock_id + }, + { + $unset: { + active_lock: true + } + } + ); + + if (res.modifiedCount === 0) { + throw new Error('Lock not found, could not release'); + } + } +} diff --git a/libs/lib-mongodb/src/locks/locks-index.ts b/libs/lib-mongodb/src/locks/locks-index.ts new file mode 100644 index 00000000..f64c493c --- /dev/null +++ b/libs/lib-mongodb/src/locks/locks-index.ts @@ -0,0 +1 @@ +export * from './MongoLockManager.js'; diff --git a/libs/lib-mongodb/src/types/types.ts b/libs/lib-mongodb/src/types/types.ts new file mode 100644 index 00000000..3875d108 --- /dev/null +++ b/libs/lib-mongodb/src/types/types.ts @@ -0,0 +1,56 @@ +import * as t from 'ts-codec'; +import * as urijs from 'uri-js'; + +export const MONGO_CONNECTION_TYPE = 'mongodb' as const; + +export const BaseMongoConfig = t.object({ + type: t.literal(MONGO_CONNECTION_TYPE), + uri: t.string, + database: t.string.optional(), + username: t.string.optional(), + password: t.string.optional() +}); + +export type BaseMongoConfig = t.Encoded; +export type BaseMongoConfigDecoded = t.Decoded; + +/** + * Construct a mongodb URI, without username, password or ssl options. + * + * Only contains hostname, port, database. + */ +export function baseUri(options: BaseMongoConfig) { + return options.uri; +} + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + * + * For use by both storage and mongo module. + */ +export function normalizeMongoConfig(options: BaseMongoConfigDecoded) { + let uri = urijs.parse(options.uri); + + const database = options.database ?? uri.path?.substring(1) ?? ''; + + const userInfo = uri.userinfo?.split(':'); + + const username = options.username ?? userInfo?.[0]; + const password = options.password ?? userInfo?.[1]; + + if (database == '') { + throw new Error(`database required`); + } + + delete uri.userinfo; + + return { + uri: urijs.serialize(uri), + database, + + username, + password + }; +} diff --git a/libs/lib-mongodb/test/src/config.test.ts b/libs/lib-mongodb/test/src/config.test.ts new file mode 100644 index 00000000..c946cf12 --- /dev/null +++ b/libs/lib-mongodb/test/src/config.test.ts @@ -0,0 +1,12 @@ +import { describe, expect, test } from 'vitest'; +import { normalizeMongoConfig } from '../../src/types/types.js'; + +describe('config', () => { + test('Should resolve database', () => { + const normalized = normalizeMongoConfig({ + type: 'mongodb', + uri: 'mongodb://localhost:27017/powersync_test' + }); + expect(normalized.database).equals('powersync_test'); + }); +}); diff --git a/libs/lib-mongodb/test/tsconfig.json b/libs/lib-mongodb/test/tsconfig.json new file mode 100644 index 00000000..4ce40817 --- /dev/null +++ b/libs/lib-mongodb/test/tsconfig.json @@ -0,0 +1,18 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": {} + }, + "include": ["src"], + "references": [ + { + "path": "../" + } + ] +} diff --git a/libs/lib-mongodb/tsconfig.json b/libs/lib-mongodb/tsconfig.json new file mode 100644 index 00000000..a0ae425c --- /dev/null +++ b/libs/lib-mongodb/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true + }, + "include": ["src"], + "references": [] +} diff --git a/libs/lib-mongodb/vitest.config.ts b/libs/lib-mongodb/vitest.config.ts new file mode 100644 index 00000000..94ede10e --- /dev/null +++ b/libs/lib-mongodb/vitest.config.ts @@ -0,0 +1,3 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({}); diff --git a/libs/lib-services/package.json b/libs/lib-services/package.json index c606944b..a37348d6 100644 --- a/libs/lib-services/package.json +++ b/libs/lib-services/package.json @@ -22,10 +22,10 @@ "dependencies": { "ajv": "^8.12.0", "better-ajv-errors": "^1.2.0", - "bson": "^6.6.0", + "bson": "^6.8.0", "dotenv": "^16.4.5", "lodash": "^4.17.21", - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uuid": "^9.0.1", "winston": "^3.13.0", "zod": "^3.23.8" diff --git a/libs/lib-services/src/container.ts b/libs/lib-services/src/container.ts index 4e015284..20f0f4db 100644 --- a/libs/lib-services/src/container.ts +++ b/libs/lib-services/src/container.ts @@ -1,18 +1,21 @@ import _ from 'lodash'; import { ErrorReporter } from './alerts/definitions.js'; import { NoOpReporter } from './alerts/no-op-reporter.js'; +import { MigrationManager } from './migrations/MigrationManager.js'; import { ProbeModule, TerminationHandler, createFSProbe, createTerminationHandler } from './signals/signals-index.js'; export enum ContainerImplementation { REPORTER = 'reporter', PROBES = 'probes', - TERMINATION_HANDLER = 'termination-handler' + TERMINATION_HANDLER = 'termination-handler', + MIGRATION_MANAGER = 'migration-manager' } export type ContainerImplementationTypes = { [ContainerImplementation.REPORTER]: ErrorReporter; [ContainerImplementation.PROBES]: ProbeModule; [ContainerImplementation.TERMINATION_HANDLER]: TerminationHandler; + [ContainerImplementation.MIGRATION_MANAGER]: MigrationManager; }; export type RegisterDefaultsOptions = { @@ -42,7 +45,8 @@ export type ServiceIdentifier = string | symbol | Newable | Abst const DEFAULT_GENERATORS: ContainerImplementationDefaultGenerators = { [ContainerImplementation.REPORTER]: () => NoOpReporter, [ContainerImplementation.PROBES]: () => createFSProbe(), - [ContainerImplementation.TERMINATION_HANDLER]: () => createTerminationHandler() + [ContainerImplementation.TERMINATION_HANDLER]: () => createTerminationHandler(), + [ContainerImplementation.MIGRATION_MANAGER]: () => new MigrationManager() }; /** @@ -73,6 +77,13 @@ export class Container { return this.getImplementation(ContainerImplementation.TERMINATION_HANDLER); } + /** + * Manager for system migrations. + */ + get migrationManager() { + return this.getImplementation(ContainerImplementation.MIGRATION_MANAGER); + } + constructor() { this.implementations = new Map(); } diff --git a/libs/lib-services/src/index.ts b/libs/lib-services/src/index.ts index fcbc2b21..3efc0de1 100644 --- a/libs/lib-services/src/index.ts +++ b/libs/lib-services/src/index.ts @@ -11,6 +11,12 @@ export * as errors from './errors/errors-index.js'; export * from './logger/Logger.js'; +export * from './locks/locks-index.js'; +export * as locks from './locks/locks-index.js'; + +export * from './migrations/migrations-index.js'; +export * as migrations from './migrations/migrations-index.js'; + export * from './schema/schema-index.js'; export * as schema from './schema/schema-index.js'; diff --git a/libs/lib-services/src/locks/AbstractLockManager.ts b/libs/lib-services/src/locks/AbstractLockManager.ts new file mode 100644 index 00000000..09c2dc56 --- /dev/null +++ b/libs/lib-services/src/locks/AbstractLockManager.ts @@ -0,0 +1,49 @@ +import { LockAcquireOptions, LockActiveError, LockCallback, LockHandle, LockManager } from './LockManager.js'; + +export type LockManagerParams = { + /** + * Name of the process/user trying to acquire the lock. + */ + name: string; + /** + * The TTL of the lock (ms). Default: 60000 ms (1 min) + */ + timeout?: number; +}; + +export abstract class AbstractLockManager implements LockManager { + constructor(protected params: LockManagerParams) {} + + /** + * Implementation specific method for acquiring a lock handle. + */ + protected abstract acquireHandle(): Promise; + + async acquire(options?: LockAcquireOptions): Promise { + const { max_wait_ms = 0 } = options ?? {}; + let handle: LockHandle | null = null; + const start = new Date(); + do { + handle = await this.acquireHandle(); + if (handle) { + return handle; + } else if (max_wait_ms) { + await new Promise((r) => setTimeout(r, max_wait_ms / 10)); + } + } while (new Date().getTime() - start.getTime() < max_wait_ms); + + return handle; + } + + async lock(handler: LockCallback, options?: LockAcquireOptions): Promise { + const handle = await this.acquire(options); + if (!handle) { + throw new LockActiveError(); + } + try { + await handler(() => handle.refresh()); + } finally { + await handle.release(); + } + } +} diff --git a/libs/lib-services/src/locks/LockManager.ts b/libs/lib-services/src/locks/LockManager.ts new file mode 100644 index 00000000..6323c929 --- /dev/null +++ b/libs/lib-services/src/locks/LockManager.ts @@ -0,0 +1,33 @@ +export class LockActiveError extends Error { + constructor() { + super('Lock is already active'); + this.name = this.constructor.name; + } +} + +export type LockAcquireOptions = { + /** + * Optionally retry and wait for the lock to be acquired + */ + max_wait_ms?: number; +}; + +export type LockHandle = { + refresh(): Promise; + release(): Promise; +}; + +export type LockCallback = (refresh: () => Promise) => Promise; + +export type LockManager = { + init?(): Promise; + /** + * Attempts to acquire a lock handle. + * @returns null if the lock is in use and either no `max_wait_ms` was provided or the timeout has elapsed. + */ + acquire(options?: LockAcquireOptions): Promise; + /** + * Acquires a lock, executes the given handler callback then automatically releases the lock. + */ + lock(handler: LockCallback, options?: LockAcquireOptions): Promise; +}; diff --git a/libs/lib-services/src/locks/locks-index.ts b/libs/lib-services/src/locks/locks-index.ts new file mode 100644 index 00000000..57dd2744 --- /dev/null +++ b/libs/lib-services/src/locks/locks-index.ts @@ -0,0 +1,2 @@ +export * from './AbstractLockManager.js'; +export * from './LockManager.js'; diff --git a/libs/lib-services/src/migrations/AbstractMigrationAgent.ts b/libs/lib-services/src/migrations/AbstractMigrationAgent.ts new file mode 100644 index 00000000..bfb46550 --- /dev/null +++ b/libs/lib-services/src/migrations/AbstractMigrationAgent.ts @@ -0,0 +1,172 @@ +import { LockManager } from '../locks/LockManager.js'; +import { logger } from '../logger/Logger.js'; +import * as defs from './migration-definitions.js'; + +export type MigrationParams = { + count?: number; + direction: defs.Direction; + migrationContext?: Generics['MIGRATION_CONTEXT']; +}; + +type WriteLogsParams = { + state?: defs.MigrationState; + log_stream: Iterable | AsyncIterable; +}; + +export type MigrationAgentGenerics = { + MIGRATION_CONTEXT?: {}; +}; + +export type RunMigrationParams = MigrationParams & { + migrations: defs.Migration[]; + maxLockWaitMs?: number; +}; + +type ExecuteParams = RunMigrationParams & { + state?: defs.MigrationState; +}; + +export const DEFAULT_MAX_LOCK_WAIT_MS = 3 * 60 * 1000; // 3 minutes + +export abstract class AbstractMigrationAgent + implements AsyncDisposable +{ + abstract get store(): defs.MigrationStore; + abstract get locks(): LockManager; + + abstract loadInternalMigrations(): Promise[]>; + + abstract [Symbol.asyncDispose](): Promise; + + protected async init() { + await this.locks.init?.(); + await this.store.init?.(); + } + + async run(params: RunMigrationParams) { + await this.init(); + + const { direction, migrations, migrationContext } = params; + // Only one process should execute this at a time. + logger.info('Acquiring lock for migrations'); + const lockHandle = await this.locks.acquire({ max_wait_ms: params.maxLockWaitMs ?? DEFAULT_MAX_LOCK_WAIT_MS }); + + if (!lockHandle) { + throw new Error('Could not acquire lock'); + } + + let isReleased = false; + const releaseLock = async () => { + if (isReleased) { + return; + } + await lockHandle.release(); + isReleased = true; + }; + + // For the case where the migration is terminated + process.addListener('beforeExit', releaseLock); + + try { + const state = await this.store.load(); + + logger.info('Running migrations'); + const logStream = this.execute({ + direction, + migrations, + state, + migrationContext + }); + + await this.writeLogsToStore({ + log_stream: logStream, + state + }); + } finally { + logger.info('Releasing migration lock'); + await releaseLock(); + process.removeListener('beforeExit', releaseLock); + logger.info('Done with migrations'); + } + } + + protected async *execute(params: ExecuteParams): AsyncGenerator { + const internalMigrations = await this.loadInternalMigrations(); + let migrations = [...internalMigrations, ...params.migrations]; + + if (params.direction === defs.Direction.Down) { + migrations.reverse(); + } + + let index = 0; + + if (params.state) { + // Find the index of the last run + index = migrations.findIndex((migration) => { + return migration.name === params.state!.last_run; + }); + + if (index === -1) { + throw new Error( + `The last run migration ${params.state?.last_run} was not found in the given set of migrations` + ); + } + + // If we are migrating down then we want to include the last run migration, otherwise we want to start at the next one + if (params.direction === defs.Direction.Up) { + index += 1; + } + } + + migrations = migrations.slice(index); + + let i = 0; + const { migrationContext } = params; + for (const migration of migrations) { + if (params.count && params.count === i) { + return; + } + + logger.info(`Executing ${migration.name} (${params.direction})`); + try { + switch (params.direction) { + case defs.Direction.Up: { + await migration.up(migrationContext); + break; + } + case defs.Direction.Down: { + await migration.down(migrationContext); + break; + } + } + logger.debug(`Success`); + } catch (err) { + logger.error(`Failed`, err); + process.exit(1); + } + + yield { + name: migration.name, + direction: params.direction, + timestamp: new Date() + }; + + i++; + } + } + + resetStore() { + return this.store.clear(); + } + + protected writeLogsToStore = async (params: WriteLogsParams): Promise => { + const log = [...(params.state?.log || [])]; + for await (const migration of params.log_stream) { + log.push(migration); + await this.store.save({ + last_run: migration.name, + log: log + }); + } + }; +} diff --git a/libs/lib-services/src/migrations/MigrationManager.ts b/libs/lib-services/src/migrations/MigrationManager.ts new file mode 100644 index 00000000..7d736587 --- /dev/null +++ b/libs/lib-services/src/migrations/MigrationManager.ts @@ -0,0 +1,39 @@ +import { AbstractMigrationAgent, MigrationAgentGenerics, MigrationParams } from './AbstractMigrationAgent.js'; +import * as defs from './migration-definitions.js'; + +export class MigrationManager + implements AsyncDisposable +{ + private migrations: defs.Migration[]; + private _agent: AbstractMigrationAgent | null; + + constructor() { + this.migrations = []; + this._agent = null; + } + + registerMigrationAgent(agent: AbstractMigrationAgent) { + if (this._agent) { + throw new Error(`A migration agent has already been registered. Only a single agent is supported.`); + } + this._agent = agent; + } + + registerMigrations(migrations: defs.Migration[]) { + this.migrations.push(...migrations); + } + + async migrate(params: MigrationParams) { + if (!this._agent) { + throw new Error(`A migration agent has not been registered yet.`); + } + return this._agent.run({ + ...params, + migrations: this.migrations + }); + } + + async [Symbol.asyncDispose]() { + return this._agent?.[Symbol.asyncDispose](); + } +} diff --git a/libs/lib-services/src/migrations/migration-definitions.ts b/libs/lib-services/src/migrations/migration-definitions.ts new file mode 100644 index 00000000..41bd8e4f --- /dev/null +++ b/libs/lib-services/src/migrations/migration-definitions.ts @@ -0,0 +1,33 @@ +export type MigrationFunction = (context: Context) => Promise; + +export type Migration = { + name: string; + up: MigrationFunction; + down: MigrationFunction; +}; + +export enum Direction { + Up = 'up', + Down = 'down' +} + +export type ExecutedMigration = { + name: string; + direction: Direction; + timestamp: Date; +}; + +export type MigrationState = { + last_run: string; + log: ExecutedMigration[]; +}; + +export type MigrationStore = { + init?: () => Promise; + load: () => Promise; + save: (state: MigrationState) => Promise; + /** + * Resets the migration store state. Mostly used for tests. + */ + clear: () => Promise; +}; diff --git a/libs/lib-services/src/migrations/migrations-index.ts b/libs/lib-services/src/migrations/migrations-index.ts new file mode 100644 index 00000000..ea9828c6 --- /dev/null +++ b/libs/lib-services/src/migrations/migrations-index.ts @@ -0,0 +1,3 @@ +export * from './AbstractMigrationAgent.js'; +export * from './migration-definitions.js'; +export * from './MigrationManager.js'; diff --git a/libs/lib-services/test/src/__mocks__/MockLockManager.ts b/libs/lib-services/test/src/__mocks__/MockLockManager.ts new file mode 100644 index 00000000..b4d6919d --- /dev/null +++ b/libs/lib-services/test/src/__mocks__/MockLockManager.ts @@ -0,0 +1,68 @@ +import { AbstractLockManager } from '../../../src/locks/AbstractLockManager.js'; +import { LockActiveError, LockHandle } from '../../../src/locks/LockManager.js'; + +export class MockLockManager extends AbstractLockManager { + private currentLockId: string | null = null; + private lockTimeout?: NodeJS.Timeout; + + /** + * Acquires a lock if no other lock is active. + * @returns A promise resolving to the lock ID if successful, or null if a lock is already active. + */ + async acquireHandle(): Promise { + if (this.currentLockId) { + return null; + } + + const id = this.generateLockId(); + this.currentLockId = id; + return { + refresh: () => this.refresh(id), + release: () => this.release(id) + }; + } + + /** + * Refreshes the lock to extend its lifetime. + * @param lock_id The lock ID to refresh. + * @throws LockActiveError if the lock ID does not match the active lock. + */ + protected async refresh(lock_id: string): Promise { + if (this.currentLockId !== lock_id) { + throw new LockActiveError(); + } + + // Simulate refreshing the lock (e.g., resetting a timeout). + if (this.lockTimeout) { + clearTimeout(this.lockTimeout); + } + this.lockTimeout = setTimeout(() => { + this.currentLockId = null; + }, 30000); // Example: 30 seconds lock lifetime. + } + + /** + * Releases the active lock. + * @param lock_id The lock ID to release. + * @throws LockActiveError if the lock ID does not match the active lock. + */ + protected async release(lock_id: string): Promise { + if (this.currentLockId !== lock_id) { + throw new LockActiveError(); + } + + this.currentLockId = null; + if (this.lockTimeout) { + clearTimeout(this.lockTimeout); + this.lockTimeout = undefined; + } + } + + /** + * Generates a unique lock ID. + * @returns A unique string representing the lock ID. + */ + private generateLockId(): string { + return Math.random().toString(36).substr(2, 10); + } +} diff --git a/libs/lib-services/test/src/migrations.test.ts b/libs/lib-services/test/src/migrations.test.ts new file mode 100644 index 00000000..db9426f1 --- /dev/null +++ b/libs/lib-services/test/src/migrations.test.ts @@ -0,0 +1,93 @@ +import { describe, expect, test, vi } from 'vitest'; +import { AbstractMigrationAgent } from '../../src/migrations/AbstractMigrationAgent.js'; +import { Direction, Migration, MigrationState, MigrationStore } from '../../src/migrations/migration-definitions.js'; +import { MigrationManager } from '../../src/migrations/MigrationManager.js'; +import { MockLockManager } from './__mocks__/MockLockManager.js'; + +class MockMigrationAgent extends AbstractMigrationAgent { + locks = new MockLockManager({ name: 'mock_migrations' }); + + store: MigrationStore = { + clear: async () => {}, + load: async () => { + // No state stored + return undefined; + }, + save: async (state: MigrationState) => {} + }; + + async loadInternalMigrations(): Promise[]> { + return []; + } + + async [Symbol.asyncDispose](): Promise {} +} + +describe('Migrations', () => { + test('should allow additional registered migrations', async () => { + const manager = new MigrationManager(); + manager.registerMigrationAgent(new MockMigrationAgent()); + + const additionalMigrationName = 'additional'; + + const mockMigration = { + name: additionalMigrationName, + up: vi.fn(), + down: vi.fn() + }; + manager.registerMigrations([mockMigration]); + + await manager.migrate({ + direction: Direction.Up + }); + + expect(mockMigration.up.mock.calls.length).eq(1); + }); + + test('should run internal migrations', async () => { + const manager = new MigrationManager(); + const agent = new MockMigrationAgent(); + manager.registerMigrationAgent(agent); + + const internalMigrationName = 'internal'; + + const mockMigration = { + name: internalMigrationName, + up: vi.fn(), + down: vi.fn() + }; + + vi.spyOn(agent, 'loadInternalMigrations').mockImplementation(async () => { + return [mockMigration]; + }); + + await manager.migrate({ + direction: Direction.Up + }); + + expect(mockMigration.up.mock.calls.length).eq(1); + }); + + test('should log migration state to store', async () => { + const manager = new MigrationManager(); + const agent = new MockMigrationAgent(); + manager.registerMigrationAgent(agent); + + const internalMigrationName = 'internal'; + const mockMigration = { + name: internalMigrationName, + up: vi.fn(), + down: vi.fn() + }; + + manager.registerMigrations([mockMigration]); + + const spy = vi.spyOn(agent.store, 'save'); + + await manager.migrate({ + direction: Direction.Up + }); + + expect(spy.mock.calls[0][0].last_run).eq(internalMigrationName); + }); +}); diff --git a/modules/module-mongodb-storage/CHANGELOG.md b/modules/module-mongodb-storage/CHANGELOG.md new file mode 100644 index 00000000..4a1042e2 --- /dev/null +++ b/modules/module-mongodb-storage/CHANGELOG.md @@ -0,0 +1 @@ +# @powersync/service-module-mongodb-storage diff --git a/modules/module-mongodb-storage/LICENSE b/modules/module-mongodb-storage/LICENSE new file mode 100644 index 00000000..c8efd46c --- /dev/null +++ b/modules/module-mongodb-storage/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, Apache 2.0 Future License + +## Abbreviation + +FSL-1.1-Apache-2.0 + +## Notice + +Copyright 2023-2024 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/modules/module-mongodb-storage/README.md b/modules/module-mongodb-storage/README.md new file mode 100644 index 00000000..d0e54ef1 --- /dev/null +++ b/modules/module-mongodb-storage/README.md @@ -0,0 +1,3 @@ +# PowerSync Service Module MongoDB Storage + +MongoDB bucket storage module for PowerSync diff --git a/modules/module-mongodb-storage/package.json b/modules/module-mongodb-storage/package.json new file mode 100644 index 00000000..b397020d --- /dev/null +++ b/modules/module-mongodb-storage/package.json @@ -0,0 +1,48 @@ +{ + "name": "@powersync/service-module-mongodb-storage", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "version": "0.0.1", + "main": "dist/index.js", + "license": "FSL-1.1-Apache-2.0", + "type": "module", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", + "@powersync/service-jsonbig": "workspace:*", + "@powersync/service-sync-rules": "workspace:*", + "@powersync/service-types": "workspace:*", + "@powersync/lib-service-mongodb": "workspace:*", + "mongodb": "^6.11.0", + "bson": "^6.8.0", + "ts-codec": "^1.3.0", + "ix": "^5.0.0", + "lru-cache": "^10.2.2", + "uuid": "^9.0.1" + }, + "devDependencies": { + "@types/uuid": "^9.0.4", + "@powersync/service-core-tests": "workspace:*" + } +} diff --git a/modules/module-mongodb-storage/src/index.ts b/modules/module-mongodb-storage/src/index.ts new file mode 100644 index 00000000..54ac0f55 --- /dev/null +++ b/modules/module-mongodb-storage/src/index.ts @@ -0,0 +1,7 @@ +export * from './module/MongoStorageModule.js'; + +export * from './storage/storage-index.js'; +export * as storage from './storage/storage-index.js'; + +export * from './types/types.js'; +export * as types from './types/types.js'; diff --git a/modules/module-mongodb-storage/src/migrations/MongoMigrationAgent.ts b/modules/module-mongodb-storage/src/migrations/MongoMigrationAgent.ts new file mode 100644 index 00000000..bb149163 --- /dev/null +++ b/modules/module-mongodb-storage/src/migrations/MongoMigrationAgent.ts @@ -0,0 +1,39 @@ +import * as framework from '@powersync/lib-services-framework'; + +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import { migrations } from '@powersync/service-core'; +import * as path from 'path'; +import { fileURLToPath } from 'url'; +import { createPowerSyncMongo, PowerSyncMongo } from '../storage/storage-index.js'; +import { MongoStorageConfig } from '../types/types.js'; +import { createMongoMigrationStore } from './mongo-migration-store.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +const MONGO_LOCK_PROCESS = 'migrations'; +const MIGRATIONS_DIR = path.join(__dirname, '/db/migrations'); + +export class MongoMigrationAgent extends migrations.AbstractPowerSyncMigrationAgent { + store: framework.MigrationStore; + locks: framework.LockManager; + + protected client: PowerSyncMongo; + + constructor(mongoConfig: MongoStorageConfig) { + super(); + + this.client = createPowerSyncMongo(mongoConfig); + + this.store = createMongoMigrationStore(this.client.db); + this.locks = new lib_mongo.locks.MongoLockManager({ collection: this.client.locks, name: MONGO_LOCK_PROCESS }); + } + + getInternalScriptsDir(): string { + return MIGRATIONS_DIR; + } + + async [Symbol.asyncDispose](): Promise { + await this.client.client.close(); + } +} diff --git a/modules/module-mongodb-storage/src/migrations/db/migrations/1684951997326-init.ts b/modules/module-mongodb-storage/src/migrations/db/migrations/1684951997326-init.ts new file mode 100644 index 00000000..e13004a9 --- /dev/null +++ b/modules/module-mongodb-storage/src/migrations/db/migrations/1684951997326-init.ts @@ -0,0 +1,39 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import { migrations } from '@powersync/service-core'; +import * as storage from '../../../storage/storage-index.js'; +import { MongoStorageConfig } from '../../../types/types.js'; + +export const up: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + const database = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + await lib_mongo.waitForAuth(database.db); + try { + await database.bucket_parameters.createIndex( + { + 'key.g': 1, + lookup: 1, + _id: 1 + }, + { name: 'lookup1' } + ); + } finally { + await database.client.close(); + } +}; + +export const down: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + + const database = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + try { + if (await database.bucket_parameters.indexExists('lookup')) { + await database.bucket_parameters.dropIndex('lookup1'); + } + } finally { + await database.client.close(); + } +}; diff --git a/packages/service-core/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts b/modules/module-mongodb-storage/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts similarity index 100% rename from packages/service-core/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts rename to modules/module-mongodb-storage/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts diff --git a/packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts b/modules/module-mongodb-storage/src/migrations/db/migrations/1702295701188-sync-rule-state.ts similarity index 66% rename from packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts rename to modules/module-mongodb-storage/src/migrations/db/migrations/1702295701188-sync-rule-state.ts index d0782ce5..1a24d302 100644 --- a/packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +++ b/modules/module-mongodb-storage/src/migrations/db/migrations/1702295701188-sync-rule-state.ts @@ -1,6 +1,7 @@ -import * as mongo from '../../../db/mongo.js'; +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import { storage as core_storage, migrations } from '@powersync/service-core'; import * as storage from '../../../storage/storage-index.js'; -import * as utils from '../../../util/util-index.js'; +import { MongoStorageConfig } from '../../../types/types.js'; interface LegacySyncRulesDocument extends storage.SyncRuleDocument { /** @@ -23,12 +24,13 @@ interface LegacySyncRulesDocument extends storage.SyncRuleDocument { auto_activate?: boolean; } -export const up = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); - const db = storage.createPowerSyncMongo(config.storage); +export const up: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + const db = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); - await mongo.waitForAuth(db.db); + await lib_mongo.waitForAuth(db.db); try { // We keep the old flags for existing deployments still shutting down. @@ -39,7 +41,7 @@ export const up = async (context: utils.MigrationContext) => { replicating: true, auto_activate: true }, - { $set: { state: storage.SyncRuleState.PROCESSING } } + { $set: { state: core_storage.SyncRuleState.PROCESSING } } ); // 2. Snapshot done: `active = true, snapshot_done = true, replicating = true, auto_activate = false` @@ -47,7 +49,7 @@ export const up = async (context: utils.MigrationContext) => { { active: true }, - { $set: { state: storage.SyncRuleState.ACTIVE } } + { $set: { state: core_storage.SyncRuleState.ACTIVE } } ); // 3. Stopped: `active = false, snapshot_done = true, replicating = false, auto_activate = false`. @@ -57,7 +59,7 @@ export const up = async (context: utils.MigrationContext) => { replicating: { $ne: true }, auto_activate: { $ne: true } }, - { $set: { state: storage.SyncRuleState.STOP } } + { $set: { state: core_storage.SyncRuleState.STOP } } ); const remaining = await db.sync_rules.find({ state: null as any }).toArray(); @@ -70,29 +72,30 @@ export const up = async (context: utils.MigrationContext) => { } }; -export const down = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); +export const down: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; - const db = storage.createPowerSyncMongo(config.storage); + const db = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); try { await db.sync_rules.updateMany( { - state: storage.SyncRuleState.ACTIVE + state: core_storage.SyncRuleState.ACTIVE }, { $set: { active: true, replicating: true } } ); await db.sync_rules.updateMany( { - state: storage.SyncRuleState.PROCESSING + state: core_storage.SyncRuleState.PROCESSING }, { $set: { active: false, replicating: true, auto_activate: true } } ); await db.sync_rules.updateMany( { - $or: [{ state: storage.SyncRuleState.STOP }, { state: storage.SyncRuleState.TERMINATED }] + $or: [{ state: core_storage.SyncRuleState.STOP }, { state: core_storage.SyncRuleState.TERMINATED }] }, { $set: { active: false, replicating: false, auto_activate: false } } ); diff --git a/modules/module-mongodb-storage/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts b/modules/module-mongodb-storage/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts new file mode 100644 index 00000000..5bb358f8 --- /dev/null +++ b/modules/module-mongodb-storage/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts @@ -0,0 +1,38 @@ +import { migrations } from '@powersync/service-core'; + +import * as storage from '../../../storage/storage-index.js'; +import { MongoStorageConfig } from '../../../types/types.js'; + +export const up: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + const db = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + + try { + await db.write_checkpoints.createIndex( + { + user_id: 1 + }, + { name: 'user_id' } + ); + } finally { + await db.client.close(); + } +}; + +export const down: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + + const db = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + + try { + if (await db.write_checkpoints.indexExists('user_id')) { + await db.write_checkpoints.dropIndex('user_id'); + } + } finally { + await db.client.close(); + } +}; diff --git a/modules/module-mongodb-storage/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts b/modules/module-mongodb-storage/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts new file mode 100644 index 00000000..bac572d0 --- /dev/null +++ b/modules/module-mongodb-storage/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts @@ -0,0 +1,40 @@ +import { migrations } from '@powersync/service-core'; +import * as storage from '../../../storage/storage-index.js'; +import { MongoStorageConfig } from '../../../types/types.js'; + +const INDEX_NAME = 'user_sync_rule_unique'; + +export const up: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + const db = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + + try { + await db.custom_write_checkpoints.createIndex( + { + user_id: 1, + sync_rules_id: 1 + }, + { name: INDEX_NAME, unique: true } + ); + } finally { + await db.client.close(); + } +}; + +export const down: migrations.PowerSyncMigrationFunction = async (context) => { + const { + service_context: { configuration } + } = context; + + const db = storage.createPowerSyncMongo(configuration.storage as MongoStorageConfig); + + try { + if (await db.custom_write_checkpoints.indexExists(INDEX_NAME)) { + await db.custom_write_checkpoints.dropIndex(INDEX_NAME); + } + } finally { + await db.client.close(); + } +}; diff --git a/packages/service-core/src/migrations/store/migration-store.ts b/modules/module-mongodb-storage/src/migrations/mongo-migration-store.ts similarity index 76% rename from packages/service-core/src/migrations/store/migration-store.ts rename to modules/module-mongodb-storage/src/migrations/mongo-migration-store.ts index 9ade3b31..57ea8952 100644 --- a/packages/service-core/src/migrations/store/migration-store.ts +++ b/modules/module-mongodb-storage/src/migrations/mongo-migration-store.ts @@ -1,18 +1,13 @@ +import { migrations } from '@powersync/lib-services-framework'; import { Db } from 'mongodb'; import * as path from 'path'; -import * as defs from '../definitions.js'; - -export type MigrationStore = { - load: () => Promise; - save: (state: defs.MigrationState) => Promise; -}; /** * A custom store for node-migrate which is used to save and load migrations that have * been operated on to mongo. */ -export const createMongoMigrationStore = (db: Db): MigrationStore => { - const collection = db.collection('migrations'); +export const createMongoMigrationStore = (db: Db): migrations.MigrationStore => { + const collection = db.collection('migrations'); return { load: async () => { @@ -47,7 +42,11 @@ export const createMongoMigrationStore = (db: Db): MigrationStore => { }; }, - save: async (state: defs.MigrationState) => { + clear: async () => { + await collection.deleteMany({}); + }, + + save: async (state: migrations.MigrationState) => { await collection.replaceOne( {}, { diff --git a/modules/module-mongodb-storage/src/module/MongoStorageModule.ts b/modules/module-mongodb-storage/src/module/MongoStorageModule.ts new file mode 100644 index 00000000..3fd63dd3 --- /dev/null +++ b/modules/module-mongodb-storage/src/module/MongoStorageModule.ts @@ -0,0 +1,37 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import * as core from '@powersync/service-core'; +import { MongoMigrationAgent } from '../migrations/MongoMigrationAgent.js'; +import { MongoStorageProvider } from '../storage/storage-index.js'; +import * as types from '../types/types.js'; + +export class MongoStorageModule extends core.modules.AbstractModule { + constructor() { + super({ + name: 'MongoDB Storage' + }); + } + + async initialize(context: core.system.ServiceContextContainer): Promise { + context.storageEngine.registerProvider(new MongoStorageProvider()); + + if (types.isMongoStorageConfig(context.configuration.storage)) { + context.migrations.registerMigrationAgent( + new MongoMigrationAgent(this.resolveConfig(context.configuration.storage)) + ); + } + } + + /** + * Combines base config with normalized connection settings + */ + private resolveConfig(config: types.MongoStorageConfig) { + return { + ...config, + ...lib_mongo.normalizeMongoConfig(config) + }; + } + + async teardown(options: core.modules.TearDownOptions): Promise { + // teardown is implemented in the storage engine + } +} diff --git a/packages/service-core/src/storage/MongoBucketStorage.ts b/modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts similarity index 84% rename from packages/service-core/src/storage/MongoBucketStorage.ts rename to modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts index e4a492bd..00ad68f4 100644 --- a/packages/service-core/src/storage/MongoBucketStorage.ts +++ b/modules/module-mongodb-storage/src/storage/MongoBucketStorage.ts @@ -4,32 +4,21 @@ import { LRUCache } from 'lru-cache/min'; import * as mongo from 'mongodb'; import * as timers from 'timers/promises'; -import * as locks from '../locks/locks-index.js'; -import * as sync from '../sync/sync-index.js'; -import * as util from '../util/util-index.js'; +import { storage, sync, utils } from '@powersync/service-core'; import { DisposableObserver, logger } from '@powersync/lib-services-framework'; import { v4 as uuid } from 'uuid'; -import { - ActiveCheckpoint, - BucketStorageFactory, - BucketStorageFactoryListener, - ParseSyncRulesOptions, - PersistedSyncRules, - PersistedSyncRulesContent, - StorageMetrics, - UpdateSyncRulesOptions, - WriteCheckpoint -} from './BucketStorage.js'; -import { PowerSyncMongo } from './mongo/db.js'; -import { SyncRuleDocument, SyncRuleState } from './mongo/models.js'; -import { MongoPersistedSyncRulesContent } from './mongo/MongoPersistedSyncRulesContent.js'; -import { MongoSyncBucketStorage } from './mongo/MongoSyncBucketStorage.js'; -import { generateSlotName } from './mongo/util.js'; + +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import { PowerSyncMongo } from './implementation/db.js'; +import { SyncRuleDocument } from './implementation/models.js'; +import { MongoPersistedSyncRulesContent } from './implementation/MongoPersistedSyncRulesContent.js'; +import { MongoSyncBucketStorage } from './implementation/MongoSyncBucketStorage.js'; +import { generateSlotName } from './implementation/util.js'; export class MongoBucketStorage - extends DisposableObserver - implements BucketStorageFactory + extends DisposableObserver + implements storage.BucketStorageFactory { private readonly client: mongo.MongoClient; private readonly session: mongo.ClientSession; @@ -72,7 +61,7 @@ export class MongoBucketStorage this.slot_name_prefix = options.slot_name_prefix; } - getInstance(options: PersistedSyncRulesContent): MongoSyncBucketStorage { + getInstance(options: storage.PersistedSyncRulesContent): MongoSyncBucketStorage { let { id, slot_name } = options; if ((typeof id as any) == 'bigint') { id = Number(id); @@ -126,11 +115,11 @@ export class MongoBucketStorage await this.db.sync_rules.updateOne( { _id: next.id, - state: SyncRuleState.PROCESSING + state: storage.SyncRuleState.PROCESSING }, { $set: { - state: SyncRuleState.STOP + state: storage.SyncRuleState.STOP } } ); @@ -144,18 +133,18 @@ export class MongoBucketStorage await this.db.sync_rules.updateOne( { _id: active.id, - state: SyncRuleState.ACTIVE + state: storage.SyncRuleState.ACTIVE }, { $set: { - state: SyncRuleState.STOP + state: storage.SyncRuleState.STOP } } ); } } - async updateSyncRules(options: UpdateSyncRulesOptions): Promise { + async updateSyncRules(options: storage.UpdateSyncRulesOptions): Promise { // Parse and validate before applying any changes const parsed = SqlSyncRules.fromYaml(options.content, { // No schema-based validation at this point @@ -170,9 +159,9 @@ export class MongoBucketStorage // Only have a single set of sync rules with PROCESSING. await this.db.sync_rules.updateMany( { - state: SyncRuleState.PROCESSING + state: storage.SyncRuleState.PROCESSING }, - { $set: { state: SyncRuleState.STOP } } + { $set: { state: storage.SyncRuleState.STOP } } ); const id_doc = await this.db.op_id_sequence.findOneAndUpdate( @@ -201,7 +190,7 @@ export class MongoBucketStorage no_checkpoint_before: null, keepalive_op: null, snapshot_done: false, - state: SyncRuleState.PROCESSING, + state: storage.SyncRuleState.PROCESSING, slot_name: slot_name, last_checkpoint_ts: null, last_fatal_error: null, @@ -220,7 +209,7 @@ export class MongoBucketStorage async getActiveSyncRulesContent(): Promise { const doc = await this.db.sync_rules.findOne( { - state: SyncRuleState.ACTIVE + state: storage.SyncRuleState.ACTIVE }, { sort: { _id: -1 }, limit: 1 } ); @@ -231,7 +220,7 @@ export class MongoBucketStorage return new MongoPersistedSyncRulesContent(this.db, doc); } - async getActiveSyncRules(options: ParseSyncRulesOptions): Promise { + async getActiveSyncRules(options: storage.ParseSyncRulesOptions): Promise { const content = await this.getActiveSyncRulesContent(); return content?.parsed(options) ?? null; } @@ -239,7 +228,7 @@ export class MongoBucketStorage async getNextSyncRulesContent(): Promise { const doc = await this.db.sync_rules.findOne( { - state: SyncRuleState.PROCESSING + state: storage.SyncRuleState.PROCESSING }, { sort: { _id: -1 }, limit: 1 } ); @@ -250,15 +239,15 @@ export class MongoBucketStorage return new MongoPersistedSyncRulesContent(this.db, doc); } - async getNextSyncRules(options: ParseSyncRulesOptions): Promise { + async getNextSyncRules(options: storage.ParseSyncRulesOptions): Promise { const content = await this.getNextSyncRulesContent(); return content?.parsed(options) ?? null; } - async getReplicatingSyncRules(): Promise { + async getReplicatingSyncRules(): Promise { const docs = await this.db.sync_rules .find({ - $or: [{ state: SyncRuleState.ACTIVE }, { state: SyncRuleState.PROCESSING }] + $or: [{ state: storage.SyncRuleState.ACTIVE }, { state: storage.SyncRuleState.PROCESSING }] }) .toArray(); @@ -267,10 +256,10 @@ export class MongoBucketStorage }); } - async getStoppedSyncRules(): Promise { + async getStoppedSyncRules(): Promise { const docs = await this.db.sync_rules .find({ - state: SyncRuleState.STOP + state: storage.SyncRuleState.STOP }) .toArray(); @@ -279,10 +268,10 @@ export class MongoBucketStorage }); } - async getActiveCheckpoint(): Promise { + async getActiveCheckpoint(): Promise { const doc = await this.db.sync_rules.findOne( { - state: SyncRuleState.ACTIVE + state: storage.SyncRuleState.ACTIVE }, { sort: { _id: -1 }, @@ -294,7 +283,7 @@ export class MongoBucketStorage return this.makeActiveCheckpoint(doc); } - async getStorageMetrics(): Promise { + async getStorageMetrics(): Promise { const ignoreNotExiting = (e: unknown) => { if (e instanceof mongo.MongoServerError && e.codeName == 'NamespaceNotFound') { // Collection doesn't exist - return 0 @@ -359,7 +348,8 @@ export class MongoBucketStorage }); if (!instance) { - const manager = locks.createMongoLockManager(this.db.locks, { + const manager = new lib_mongo.locks.MongoLockManager({ + collection: this.db.locks, name: `instance-id-insertion-lock` }); @@ -379,7 +369,7 @@ export class MongoBucketStorage private makeActiveCheckpoint(doc: SyncRuleDocument | null) { return { - checkpoint: util.timestampToOpId(doc?.last_checkpoint ?? 0n), + checkpoint: utils.timestampToOpId(doc?.last_checkpoint ?? 0n), lsn: doc?.last_checkpoint_lsn ?? null, hasSyncRules() { return doc != null; @@ -390,13 +380,13 @@ export class MongoBucketStorage } return (await this.storageCache.fetch(doc._id)) ?? null; } - } satisfies ActiveCheckpoint; + } satisfies storage.ActiveCheckpoint; } /** * Instance-wide watch on the latest available checkpoint (op_id + lsn). */ - private async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable { + private async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable { const pipeline: mongo.Document[] = [ { $match: { @@ -422,7 +412,7 @@ export class MongoBucketStorage await this.client.withSession(async (session) => { doc = await this.db.sync_rules.findOne( { - state: SyncRuleState.ACTIVE + state: storage.SyncRuleState.ACTIVE }, { session, @@ -467,7 +457,7 @@ export class MongoBucketStorage { once: true } ); - let lastOp: ActiveCheckpoint | null = null; + let lastOp: storage.ActiveCheckpoint | null = null; for await (const update of stream.stream()) { if (signal.aborted) { @@ -498,8 +488,8 @@ export class MongoBucketStorage /** * User-specific watch on the latest checkpoint and/or write checkpoint. */ - async *watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable { - let lastCheckpoint: util.OpId | null = null; + async *watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable { + let lastCheckpoint: utils.OpId | null = null; let lastWriteCheckpoint: bigint | null = null; const iter = wrapWithAbort(this.sharedIter, signal); diff --git a/packages/service-core/src/storage/mongo/MongoBucketBatch.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts similarity index 92% rename from packages/service-core/src/storage/mongo/MongoBucketBatch.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts index 0d44bc21..2686ed22 100644 --- a/packages/service-core/src/storage/mongo/MongoBucketBatch.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts @@ -3,25 +3,15 @@ import * as bson from 'bson'; import * as mongo from 'mongodb'; import { container, DisposableObserver, errors, logger } from '@powersync/lib-services-framework'; -import * as util from '../../util/util-index.js'; -import { - BucketBatchStorageListener, - BucketStorageBatch, - FlushedResult, - mergeToast, - SaveOperationTag, - SaveOptions -} from '../BucketStorage.js'; -import { SourceTable } from '../SourceTable.js'; -import { BatchedCustomWriteCheckpointOptions, CustomWriteCheckpointOptions } from '../WriteCheckpointAPI.js'; +import { SaveOperationTag, storage, utils } from '@powersync/service-core'; +import * as timers from 'node:timers/promises'; import { PowerSyncMongo } from './db.js'; import { CurrentBucket, CurrentDataDocument, SourceKey, SyncRuleDocument } from './models.js'; import { MongoIdSequence } from './MongoIdSequence.js'; import { batchCreateCustomWriteCheckpoints } from './MongoWriteCheckpointAPI.js'; import { cacheKey, OperationBatch, RecordOperation } from './OperationBatch.js'; import { PersistedBatch } from './PersistedBatch.js'; -import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, replicaIdEquals, serializeLookup } from './util.js'; -import * as timers from 'node:timers/promises'; +import { idPrefixFilter } from './util.js'; /** * 15MB @@ -33,7 +23,7 @@ const MAX_ROW_SIZE = 15 * 1024 * 1024; // makes it more fair and has less overhead. // // In the future, we can investigate allowing multiple replication streams operating independently. -const replicationMutex = new util.Mutex(); +const replicationMutex = new utils.Mutex(); export interface MongoBucketBatchOptions { db: PowerSyncMongo; @@ -50,7 +40,10 @@ export interface MongoBucketBatchOptions { skipExistingRows: boolean; } -export class MongoBucketBatch extends DisposableObserver implements BucketStorageBatch { +export class MongoBucketBatch + extends DisposableObserver + implements storage.BucketStorageBatch +{ private readonly client: mongo.MongoClient; public readonly db: PowerSyncMongo; public readonly session: mongo.ClientSession; @@ -63,7 +56,7 @@ export class MongoBucketBatch extends DisposableObserver { - let result: FlushedResult | null = null; + async flush(): Promise { + let result: storage.FlushedResult | null = null; // One flush may be split over multiple transactions. // Each flushInner() is one transaction. while (this.batch != null) { @@ -128,7 +121,7 @@ export class MongoBucketBatch extends DisposableObserver { + private async flushInner(): Promise { const batch = this.batch; if (batch == null) { return null; @@ -321,8 +314,11 @@ export class MongoBucketBatch extends DisposableObserver { - return serializeLookup(p.lookup); + return storage.serializeLookup(p.lookup); }); } } @@ -507,7 +503,7 @@ export class MongoBucketBatch extends DisposableObserver { + async save(record: storage.SaveOptions): Promise { const { after, before, sourceTable, tag } = record; for (const event of this.getTableEvents(sourceTable)) { this.iterateListeners((cb) => @@ -722,8 +718,8 @@ export class MongoBucketBatch extends DisposableObserver { + async drop(sourceTables: storage.SourceTable[]): Promise { await this.truncate(sourceTables); const result = await this.flush(); @@ -766,7 +762,7 @@ export class MongoBucketBatch extends DisposableObserver { + async truncate(sourceTables: storage.SourceTable[]): Promise { await this.flush(); let last_op: bigint | null = null; @@ -783,7 +779,7 @@ export class MongoBucketBatch extends DisposableObserver { + async truncateSingle(sourceTable: storage.SourceTable): Promise { let last_op: bigint | null = null; // To avoid too large transactions, we limit the amount of data we delete per transaction. @@ -837,7 +833,7 @@ export class MongoBucketBatch extends DisposableObserver table.id); @@ -870,7 +866,7 @@ export class MongoBucketBatch extends DisposableObserver { - const copy = new SourceTable( + const copy = new storage.SourceTable( table.id, table.connectionTag, table.objectId, @@ -888,7 +884,7 @@ export class MongoBucketBatch extends DisposableObserver [...evt.getSourceTables()].some((sourceTable) => sourceTable.matches(table)) ); diff --git a/packages/service-core/src/storage/mongo/MongoCompactor.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoCompactor.ts similarity index 95% rename from packages/service-core/src/storage/mongo/MongoCompactor.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoCompactor.ts index 7024e5e2..7891804e 100644 --- a/packages/service-core/src/storage/mongo/MongoCompactor.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoCompactor.ts @@ -1,9 +1,8 @@ import { logger } from '@powersync/lib-services-framework'; +import { storage, utils } from '@powersync/service-core'; import { AnyBulkWriteOperation, MaxKey, MinKey } from 'mongodb'; -import { addChecksums } from '../../util/utils.js'; import { PowerSyncMongo } from './db.js'; import { BucketDataDocument, BucketDataKey } from './models.js'; -import { CompactOptions } from '../BucketStorage.js'; import { cacheKey } from './OperationBatch.js'; import { safeBulkWrite } from './util.js'; @@ -33,7 +32,7 @@ interface CurrentBucketState { /** * Additional options, primarily for testing. */ -export interface MongoCompactOptions extends CompactOptions { +export interface MongoCompactOptions extends storage.CompactOptions { /** Minimum of 2 */ clearBatchLimit?: number; /** Minimum of 1 */ @@ -222,7 +221,7 @@ export class MongoCompactor { // Keep the highest seen values in this case. } else { // flatstr reduces the memory usage by flattening the string - currentState.seen.set(flatstr(key), doc._id.o); + currentState.seen.set(utils.flatstr(key), doc._id.o); // length + 16 for the string // 24 for the bigint // 50 for map overhead @@ -324,7 +323,7 @@ export class MongoCompactor { let gotAnOp = false; for await (let op of query.stream()) { if (op.op == 'MOVE' || op.op == 'REMOVE' || op.op == 'CLEAR') { - checksum = addChecksums(checksum, op.checksum); + checksum = utils.addChecksums(checksum, op.checksum); lastOpId = op._id; if (op.op != 'CLEAR') { gotAnOp = true; @@ -380,14 +379,3 @@ export class MongoCompactor { } } } - -/** - * Flattens string to reduce memory usage (around 320 bytes -> 120 bytes), - * at the cost of some upfront CPU usage. - * - * From: https://github.com/davidmarkclements/flatstr/issues/8 - */ -function flatstr(s: string) { - s.match(/\n/g); - return s; -} diff --git a/packages/service-core/src/storage/mongo/MongoIdSequence.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoIdSequence.ts similarity index 100% rename from packages/service-core/src/storage/mongo/MongoIdSequence.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoIdSequence.ts diff --git a/packages/service-core/src/storage/mongo/MongoPersistedSyncRules.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts similarity index 72% rename from packages/service-core/src/storage/mongo/MongoPersistedSyncRules.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts index e7777658..ce38cb68 100644 --- a/packages/service-core/src/storage/mongo/MongoPersistedSyncRules.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRules.ts @@ -1,8 +1,8 @@ import { SqlSyncRules } from '@powersync/service-sync-rules'; -import { PersistedSyncRules } from '../BucketStorage.js'; +import { storage } from '@powersync/service-core'; -export class MongoPersistedSyncRules implements PersistedSyncRules { +export class MongoPersistedSyncRules implements storage.PersistedSyncRules { public readonly slot_name: string; constructor( diff --git a/packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts similarity index 87% rename from packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts index 0a68fe29..0a39e1e3 100644 --- a/packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoPersistedSyncRulesContent.ts @@ -1,13 +1,12 @@ +import { storage } from '@powersync/service-core'; import { SqlSyncRules } from '@powersync/service-sync-rules'; import * as mongo from 'mongodb'; - -import { ParseSyncRulesOptions, PersistedSyncRulesContent } from '../BucketStorage.js'; import { MongoPersistedSyncRules } from './MongoPersistedSyncRules.js'; import { MongoSyncRulesLock } from './MongoSyncRulesLock.js'; import { PowerSyncMongo } from './db.js'; import { SyncRuleDocument } from './models.js'; -export class MongoPersistedSyncRulesContent implements PersistedSyncRulesContent { +export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRulesContent { public readonly slot_name: string; public readonly id: number; @@ -33,7 +32,7 @@ export class MongoPersistedSyncRulesContent implements PersistedSyncRulesContent this.last_keepalive_ts = doc.last_keepalive_ts; } - parsed(options: ParseSyncRulesOptions) { + parsed(options: storage.ParseSyncRulesOptions) { return new MongoPersistedSyncRules( this.id, SqlSyncRules.fromYaml(this.sync_rules_content, options), diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoStorageProvider.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoStorageProvider.ts new file mode 100644 index 00000000..a147bfe0 --- /dev/null +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoStorageProvider.ts @@ -0,0 +1,39 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import { logger } from '@powersync/lib-services-framework'; +import { storage } from '@powersync/service-core'; +import { MongoStorageConfig } from '../../types/types.js'; +import { MongoBucketStorage } from '../MongoBucketStorage.js'; +import { PowerSyncMongo } from './db.js'; + +export class MongoStorageProvider implements storage.BucketStorageProvider { + get type() { + return lib_mongo.MONGO_CONNECTION_TYPE; + } + + async getStorage(options: storage.GetStorageOptions): Promise { + const { resolvedConfig } = options; + + const { storage } = resolvedConfig; + if (storage.type != this.type) { + // This should not be reached since the generation should be managed externally. + throw new Error(`Cannot create MongoDB bucket storage with provided config ${storage.type} !== ${this.type}`); + } + + const decodedConfig = MongoStorageConfig.decode(storage as any); + const client = lib_mongo.db.createMongoClient(decodedConfig); + + const database = new PowerSyncMongo(client, { database: resolvedConfig.storage.database }); + + return { + storage: new MongoBucketStorage(database, { + // TODO currently need the entire resolved config due to this + slot_name_prefix: resolvedConfig.slot_name_prefix + }), + shutDown: () => client.close(), + tearDown: () => { + logger.info(`Tearing down storage: ${database.db.namespace}...`); + return database.db.dropDatabase(); + } + } satisfies storage.ActiveStorage; + } +} diff --git a/packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts similarity index 78% rename from packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts index 0e8f92f8..738ed3fd 100644 --- a/packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoSyncBucketStorage.ts @@ -2,66 +2,38 @@ import { SqliteJsonRow, SqliteJsonValue, SqlSyncRules } from '@powersync/service import * as bson from 'bson'; import * as mongo from 'mongodb'; +import * as lib_mongo from '@powersync/lib-service-mongodb'; import { DisposableObserver, logger } from '@powersync/lib-services-framework'; +import { storage, utils } from '@powersync/service-core'; import * as timers from 'timers/promises'; -import * as db from '../../db/db-index.js'; -import * as util from '../../util/util-index.js'; -import { - BucketDataBatchOptions, - BucketStorageBatch, - CompactOptions, - DEFAULT_DOCUMENT_BATCH_LIMIT, - DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES, - FlushedResult, - ParseSyncRulesOptions, - PersistedSyncRulesContent, - ReplicationCheckpoint, - ResolveTableOptions, - ResolveTableResult, - StartBatchOptions, - SyncBucketDataBatch, - SyncRulesBucketStorage, - SyncRulesBucketStorageListener, - SyncRuleStatus, - TerminateOptions -} from '../BucketStorage.js'; -import { ChecksumCache, FetchPartialBucketChecksum, PartialChecksum, PartialChecksumMap } from '../ChecksumCache.js'; import { MongoBucketStorage } from '../MongoBucketStorage.js'; -import { SourceTable } from '../SourceTable.js'; -import { - BatchedCustomWriteCheckpointOptions, - ManagedWriteCheckpointOptions, - SyncStorageLastWriteCheckpointFilters, - WriteCheckpointAPI, - WriteCheckpointMode -} from '../WriteCheckpointAPI.js'; import { PowerSyncMongo } from './db.js'; -import { BucketDataDocument, BucketDataKey, SourceKey, SyncRuleState } from './models.js'; +import { BucketDataDocument, BucketDataKey, SourceKey } from './models.js'; import { MongoBucketBatch } from './MongoBucketBatch.js'; import { MongoCompactor } from './MongoCompactor.js'; import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js'; -import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, mapOpEntry, readSingleBatch, serializeLookup } from './util.js'; +import { idPrefixFilter, mapOpEntry, readSingleBatch } from './util.js'; export class MongoSyncBucketStorage - extends DisposableObserver - implements SyncRulesBucketStorage + extends DisposableObserver + implements storage.SyncRulesBucketStorage { private readonly db: PowerSyncMongo; - private checksumCache = new ChecksumCache({ + private checksumCache = new storage.ChecksumCache({ fetchChecksums: (batch) => { return this.getChecksumsInternal(batch); } }); - private parsedSyncRulesCache: { parsed: SqlSyncRules; options: ParseSyncRulesOptions } | undefined; - private writeCheckpointAPI: WriteCheckpointAPI; + private parsedSyncRulesCache: { parsed: SqlSyncRules; options: storage.ParseSyncRulesOptions } | undefined; + private writeCheckpointAPI: storage.WriteCheckpointAPI; constructor( public readonly factory: MongoBucketStorage, public readonly group_id: number, - private readonly sync_rules: PersistedSyncRulesContent, + private readonly sync_rules: storage.PersistedSyncRulesContent, public readonly slot_name: string, - writeCheckpointMode: WriteCheckpointMode = WriteCheckpointMode.MANAGED + writeCheckpointMode: storage.WriteCheckpointMode = storage.WriteCheckpointMode.MANAGED ) { super(); this.db = factory.db; @@ -75,35 +47,35 @@ export class MongoSyncBucketStorage return this.writeCheckpointAPI.writeCheckpointMode; } - setWriteCheckpointMode(mode: WriteCheckpointMode): void { + setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void { this.writeCheckpointAPI.setWriteCheckpointMode(mode); } - batchCreateCustomWriteCheckpoints(checkpoints: BatchedCustomWriteCheckpointOptions[]): Promise { + batchCreateCustomWriteCheckpoints(checkpoints: storage.BatchedCustomWriteCheckpointOptions[]): Promise { return this.writeCheckpointAPI.batchCreateCustomWriteCheckpoints( checkpoints.map((checkpoint) => ({ ...checkpoint, sync_rules_id: this.group_id })) ); } - createCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): Promise { + createCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): Promise { return this.writeCheckpointAPI.createCustomWriteCheckpoint({ ...checkpoint, sync_rules_id: this.group_id }); } - createManagedWriteCheckpoint(checkpoint: ManagedWriteCheckpointOptions): Promise { + createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise { return this.writeCheckpointAPI.createManagedWriteCheckpoint(checkpoint); } - lastWriteCheckpoint(filters: SyncStorageLastWriteCheckpointFilters): Promise { + lastWriteCheckpoint(filters: storage.SyncStorageLastWriteCheckpointFilters): Promise { return this.writeCheckpointAPI.lastWriteCheckpoint({ ...filters, sync_rules_id: this.group_id }); } - getParsedSyncRules(options: ParseSyncRulesOptions): SqlSyncRules { + getParsedSyncRules(options: storage.ParseSyncRulesOptions): SqlSyncRules { const { parsed, options: cachedOptions } = this.parsedSyncRulesCache ?? {}; /** * Check if the cached sync rules, if present, had the same options. @@ -116,7 +88,7 @@ export class MongoSyncBucketStorage return this.parsedSyncRulesCache!.parsed; } - async getCheckpoint(): Promise { + async getCheckpoint(): Promise { const doc = await this.db.sync_rules.findOne( { _id: this.group_id }, { @@ -124,15 +96,15 @@ export class MongoSyncBucketStorage } ); return { - checkpoint: util.timestampToOpId(doc?.last_checkpoint ?? 0n), + checkpoint: utils.timestampToOpId(doc?.last_checkpoint ?? 0n), lsn: doc?.last_checkpoint_lsn ?? null }; } async startBatch( - options: StartBatchOptions, - callback: (batch: BucketStorageBatch) => Promise - ): Promise { + options: storage.StartBatchOptions, + callback: (batch: storage.BucketStorageBatch) => Promise + ): Promise { const doc = await this.db.sync_rules.findOne( { _id: this.group_id @@ -163,7 +135,7 @@ export class MongoSyncBucketStorage } } - async resolveTable(options: ResolveTableOptions): Promise { + async resolveTable(options: storage.ResolveTableOptions): Promise { const { group_id, connection_id, connection_tag, entity_descriptor } = options; const { schema, name: table, objectId, replicationColumns } = entity_descriptor; @@ -173,7 +145,7 @@ export class MongoSyncBucketStorage type: column.type, type_oid: column.typeId })); - let result: ResolveTableResult | null = null; + let result: storage.ResolveTableResult | null = null; await this.db.client.withSession(async (session) => { const col = this.db.source_tables; let doc = await col.findOne( @@ -202,7 +174,7 @@ export class MongoSyncBucketStorage await col.insertOne(doc, { session }); } - const sourceTable = new SourceTable( + const sourceTable = new storage.SourceTable( doc._id, connection_tag, objectId, @@ -230,7 +202,7 @@ export class MongoSyncBucketStorage table: sourceTable, dropTables: truncate.map( (doc) => - new SourceTable( + new storage.SourceTable( doc._id, connection_tag, doc.relation_id ?? 0, @@ -245,9 +217,9 @@ export class MongoSyncBucketStorage return result!; } - async getParameterSets(checkpoint: util.OpId, lookups: SqliteJsonValue[][]): Promise { + async getParameterSets(checkpoint: utils.OpId, lookups: SqliteJsonValue[][]): Promise { const lookupFilter = lookups.map((lookup) => { - return serializeLookup(lookup); + return storage.serializeLookup(lookup); }); const rows = await this.db.bucket_parameters .aggregate([ @@ -280,10 +252,10 @@ export class MongoSyncBucketStorage } async *getBucketDataBatch( - checkpoint: util.OpId, + checkpoint: utils.OpId, dataBuckets: Map, - options?: BucketDataBatchOptions - ): AsyncIterable { + options?: storage.BucketDataBatchOptions + ): AsyncIterable { if (dataBuckets.size == 0) { return; } @@ -307,8 +279,8 @@ export class MongoSyncBucketStorage }); } - const limit = options?.limit ?? DEFAULT_DOCUMENT_BATCH_LIMIT; - const sizeLimit = options?.chunkLimitBytes ?? DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES; + const limit = options?.limit ?? storage.DEFAULT_DOCUMENT_BATCH_LIMIT; + const sizeLimit = options?.chunkLimitBytes ?? storage.DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES; const cursor = this.db.bucket_data.find( { @@ -348,12 +320,12 @@ export class MongoSyncBucketStorage } let batchSize = 0; - let currentBatch: util.SyncBucketData | null = null; + let currentBatch: utils.SyncBucketData | null = null; let targetOp: bigint | null = null; // Ordered by _id, meaning buckets are grouped together for (let rawData of data) { - const row = bson.deserialize(rawData, BSON_DESERIALIZE_OPTIONS) as BucketDataDocument; + const row = bson.deserialize(rawData, storage.BSON_DESERIALIZE_OPTIONS) as BucketDataDocument; const bucket = row._id.b; if (currentBatch == null || currentBatch.bucket != bucket || batchSize >= sizeLimit) { @@ -408,11 +380,11 @@ export class MongoSyncBucketStorage } } - async getChecksums(checkpoint: util.OpId, buckets: string[]): Promise { + async getChecksums(checkpoint: utils.OpId, buckets: string[]): Promise { return this.checksumCache.getChecksumMap(checkpoint, buckets); } - private async getChecksumsInternal(batch: FetchPartialBucketChecksum[]): Promise { + private async getChecksumsInternal(batch: storage.FetchPartialBucketChecksum[]): Promise { if (batch.length == 0) { return new Map(); } @@ -460,7 +432,7 @@ export class MongoSyncBucketStorage ) .toArray(); - return new Map( + return new Map( aggregate.map((doc) => { return [ doc._id, @@ -469,13 +441,13 @@ export class MongoSyncBucketStorage partialCount: doc.count, partialChecksum: Number(BigInt(doc.checksum_total) & 0xffffffffn) & 0xffffffff, isFullChecksum: doc.has_clear_op == 1 - } satisfies PartialChecksum + } satisfies storage.PartialChecksum ]; }) ); } - async terminate(options?: TerminateOptions) { + async terminate(options?: storage.TerminateOptions) { // Default is to clear the storage except when explicitly requested not to. if (!options || options?.clearStorage) { await this.clear(); @@ -486,7 +458,7 @@ export class MongoSyncBucketStorage }, { $set: { - state: SyncRuleState.TERMINATED, + state: storage.SyncRuleState.TERMINATED, persisted_lsn: null, snapshot_done: false } @@ -494,7 +466,7 @@ export class MongoSyncBucketStorage ); } - async getStatus(): Promise { + async getStatus(): Promise { const doc = await this.db.sync_rules.findOne( { _id: this.group_id @@ -528,9 +500,9 @@ export class MongoSyncBucketStorage } catch (e: unknown) { if (e instanceof mongo.MongoServerError && e.codeName == 'MaxTimeMSExpired') { logger.info( - `${this.slot_name} Cleared batch of data in ${db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...` + `${this.slot_name} Cleared batch of data in ${lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...` ); - await timers.setTimeout(db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5); + await timers.setTimeout(lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5); continue; } else { throw e; @@ -556,33 +528,33 @@ export class MongoSyncBucketStorage no_checkpoint_before: null } }, - { maxTimeMS: db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS } + { maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS } ); await this.db.bucket_data.deleteMany( { _id: idPrefixFilter({ g: this.group_id }, ['b', 'o']) }, - { maxTimeMS: db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS } + { maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS } ); await this.db.bucket_parameters.deleteMany( { key: idPrefixFilter({ g: this.group_id }, ['t', 'k']) }, - { maxTimeMS: db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS } + { maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS } ); await this.db.current_data.deleteMany( { _id: idPrefixFilter({ g: this.group_id }, ['t', 'k']) }, - { maxTimeMS: db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS } + { maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS } ); await this.db.source_tables.deleteMany( { group_id: this.group_id }, - { maxTimeMS: db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS } + { maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS } ); } @@ -597,7 +569,7 @@ export class MongoSyncBucketStorage }, { $set: { - state: SyncRuleState.ACTIVE + state: storage.SyncRuleState.ACTIVE } }, { session } @@ -606,11 +578,11 @@ export class MongoSyncBucketStorage await this.db.sync_rules.updateMany( { _id: { $ne: this.group_id }, - state: SyncRuleState.ACTIVE + state: storage.SyncRuleState.ACTIVE }, { $set: { - state: SyncRuleState.STOP + state: storage.SyncRuleState.STOP } }, { session } @@ -634,7 +606,7 @@ export class MongoSyncBucketStorage ); } - async compact(options?: CompactOptions) { + async compact(options?: storage.CompactOptions) { return new MongoCompactor(this.db, this.group_id, options).compact(); } } diff --git a/packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoSyncRulesLock.ts similarity index 89% rename from packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoSyncRulesLock.ts index 76fee7e1..bf69a085 100644 --- a/packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoSyncRulesLock.ts @@ -1,17 +1,20 @@ import crypto from 'crypto'; -import { PersistedSyncRulesContent, ReplicationLock } from '../BucketStorage.js'; -import { PowerSyncMongo } from './db.js'; import { logger } from '@powersync/lib-services-framework'; +import { storage } from '@powersync/service-core'; +import { PowerSyncMongo } from './db.js'; /** * Manages a lock on a sync rules document, so that only one process * replicates those sync rules at a time. */ -export class MongoSyncRulesLock implements ReplicationLock { +export class MongoSyncRulesLock implements storage.ReplicationLock { private readonly refreshInterval: NodeJS.Timeout; - static async createLock(db: PowerSyncMongo, sync_rules: PersistedSyncRulesContent): Promise { + static async createLock( + db: PowerSyncMongo, + sync_rules: storage.PersistedSyncRulesContent + ): Promise { const lockId = crypto.randomBytes(8).toString('hex'); const doc = await db.sync_rules.findOneAndUpdate( { _id: sync_rules.id, $or: [{ lock: null }, { 'lock.expires_at': { $lt: new Date() } }] }, diff --git a/modules/module-mongodb-storage/src/storage/implementation/MongoTestStorageFactoryGenerator.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoTestStorageFactoryGenerator.ts new file mode 100644 index 00000000..2fa11dfc --- /dev/null +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoTestStorageFactoryGenerator.ts @@ -0,0 +1,25 @@ +import { TestStorageOptions } from '@powersync/service-core'; +import { MongoBucketStorage } from '../MongoBucketStorage.js'; +import { connectMongoForTests } from './util.js'; + +export type MongoTestStorageOptions = { + url: string; + isCI: boolean; +}; + +export const MongoTestStorageFactoryGenerator = (factoryOptions: MongoTestStorageOptions) => { + return async (options?: TestStorageOptions) => { + const db = connectMongoForTests(factoryOptions.url, factoryOptions.isCI); + + // None of the tests insert data into this collection, so it was never created + if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { + await db.db.createCollection('bucket_parameters'); + } + + if (!options?.doNotClear) { + await db.clear(); + } + + return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); + }; +}; diff --git a/packages/service-core/src/storage/mongo/MongoWriteCheckpointAPI.ts b/modules/module-mongodb-storage/src/storage/implementation/MongoWriteCheckpointAPI.ts similarity index 73% rename from packages/service-core/src/storage/mongo/MongoWriteCheckpointAPI.ts rename to modules/module-mongodb-storage/src/storage/implementation/MongoWriteCheckpointAPI.ts index 6bf4418a..b2d6fff6 100644 --- a/packages/service-core/src/storage/mongo/MongoWriteCheckpointAPI.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/MongoWriteCheckpointAPI.ts @@ -1,24 +1,16 @@ import * as framework from '@powersync/lib-services-framework'; -import { - CustomWriteCheckpointFilters, - CustomWriteCheckpointOptions, - LastWriteCheckpointFilters, - ManagedWriteCheckpointFilters, - ManagedWriteCheckpointOptions, - WriteCheckpointAPI, - WriteCheckpointMode -} from '../WriteCheckpointAPI.js'; +import { storage } from '@powersync/service-core'; import { PowerSyncMongo } from './db.js'; import { safeBulkWrite } from './util.js'; export type MongoCheckpointAPIOptions = { db: PowerSyncMongo; - mode: WriteCheckpointMode; + mode: storage.WriteCheckpointMode; }; -export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { +export class MongoWriteCheckpointAPI implements storage.WriteCheckpointAPI { readonly db: PowerSyncMongo; - private _mode: WriteCheckpointMode; + private _mode: storage.WriteCheckpointMode; constructor(options: MongoCheckpointAPIOptions) { this.db = options.db; @@ -29,16 +21,16 @@ export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { return this._mode; } - setWriteCheckpointMode(mode: WriteCheckpointMode): void { + setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void { this._mode = mode; } - async batchCreateCustomWriteCheckpoints(checkpoints: CustomWriteCheckpointOptions[]): Promise { + async batchCreateCustomWriteCheckpoints(checkpoints: storage.CustomWriteCheckpointOptions[]): Promise { return batchCreateCustomWriteCheckpoints(this.db, checkpoints); } - async createCustomWriteCheckpoint(options: CustomWriteCheckpointOptions): Promise { - if (this.writeCheckpointMode !== WriteCheckpointMode.CUSTOM) { + async createCustomWriteCheckpoint(options: storage.CustomWriteCheckpointOptions): Promise { + if (this.writeCheckpointMode !== storage.WriteCheckpointMode.CUSTOM) { throw new framework.errors.ValidationError( `Creating a custom Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"` ); @@ -60,8 +52,8 @@ export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { return doc!.checkpoint; } - async createManagedWriteCheckpoint(checkpoint: ManagedWriteCheckpointOptions): Promise { - if (this.writeCheckpointMode !== WriteCheckpointMode.MANAGED) { + async createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise { + if (this.writeCheckpointMode !== storage.WriteCheckpointMode.MANAGED) { throw new framework.errors.ValidationError( `Attempting to create a managed Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"` ); @@ -85,14 +77,14 @@ export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { return doc!.client_id; } - async lastWriteCheckpoint(filters: LastWriteCheckpointFilters): Promise { + async lastWriteCheckpoint(filters: storage.LastWriteCheckpointFilters): Promise { switch (this.writeCheckpointMode) { - case WriteCheckpointMode.CUSTOM: + case storage.WriteCheckpointMode.CUSTOM: if (false == 'sync_rules_id' in filters) { throw new framework.errors.ValidationError(`Sync rules ID is required for custom Write Checkpoint filtering`); } return this.lastCustomWriteCheckpoint(filters); - case WriteCheckpointMode.MANAGED: + case storage.WriteCheckpointMode.MANAGED: if (false == 'heads' in filters) { throw new framework.errors.ValidationError( `Replication HEAD is required for managed Write Checkpoint filtering` @@ -102,7 +94,7 @@ export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { } } - protected async lastCustomWriteCheckpoint(filters: CustomWriteCheckpointFilters) { + protected async lastCustomWriteCheckpoint(filters: storage.CustomWriteCheckpointFilters) { const { user_id, sync_rules_id } = filters; const lastWriteCheckpoint = await this.db.custom_write_checkpoints.findOne({ user_id, @@ -111,7 +103,7 @@ export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { return lastWriteCheckpoint?.checkpoint ?? null; } - protected async lastManagedWriteCheckpoint(filters: ManagedWriteCheckpointFilters) { + protected async lastManagedWriteCheckpoint(filters: storage.ManagedWriteCheckpointFilters) { const { user_id, heads } = filters; // TODO: support multiple heads when we need to support multiple connections const lsn = heads['1']; @@ -129,7 +121,7 @@ export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { export async function batchCreateCustomWriteCheckpoints( db: PowerSyncMongo, - checkpoints: CustomWriteCheckpointOptions[] + checkpoints: storage.CustomWriteCheckpointOptions[] ): Promise { if (!checkpoints.length) { return; diff --git a/packages/service-core/src/storage/mongo/OperationBatch.ts b/modules/module-mongodb-storage/src/storage/implementation/OperationBatch.ts similarity index 90% rename from packages/service-core/src/storage/mongo/OperationBatch.ts rename to modules/module-mongodb-storage/src/storage/implementation/OperationBatch.ts index 127562d5..886f98e7 100644 --- a/packages/service-core/src/storage/mongo/OperationBatch.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/OperationBatch.ts @@ -1,9 +1,7 @@ import { ToastableSqliteRow } from '@powersync/service-sync-rules'; import * as bson from 'bson'; -import { SaveOptions } from '../BucketStorage.js'; -import { isUUID } from './util.js'; -import { ReplicaId } from './models.js'; +import { storage } from '@powersync/service-core'; /** * Maximum number of operations in a batch. @@ -73,13 +71,13 @@ export class OperationBatch { } export class RecordOperation { - public readonly afterId: ReplicaId | null; - public readonly beforeId: ReplicaId; + public readonly afterId: storage.ReplicaId | null; + public readonly beforeId: storage.ReplicaId; public readonly internalBeforeKey: string; public readonly internalAfterKey: string | null; public readonly estimatedSize: number; - constructor(public readonly record: SaveOptions) { + constructor(public readonly record: storage.SaveOptions) { const afterId = record.afterReplicaId ?? null; const beforeId = record.beforeReplicaId ?? record.afterReplicaId; this.afterId = afterId; @@ -94,8 +92,8 @@ export class RecordOperation { /** * In-memory cache key - must not be persisted. */ -export function cacheKey(table: bson.ObjectId, id: ReplicaId) { - if (isUUID(id)) { +export function cacheKey(table: bson.ObjectId, id: storage.ReplicaId) { + if (storage.isUUID(id)) { return `${table.toHexString()}.${id.toHexString()}`; } else if (typeof id == 'string') { return `${table.toHexString()}.${id}`; diff --git a/packages/service-core/src/storage/mongo/PersistedBatch.ts b/modules/module-mongodb-storage/src/storage/implementation/PersistedBatch.ts similarity index 93% rename from packages/service-core/src/storage/mongo/PersistedBatch.ts rename to modules/module-mongodb-storage/src/storage/implementation/PersistedBatch.ts index 77bf76fa..7b5c3082 100644 --- a/packages/service-core/src/storage/mongo/PersistedBatch.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/PersistedBatch.ts @@ -3,8 +3,8 @@ import { EvaluatedParameters, EvaluatedRow } from '@powersync/service-sync-rules import * as bson from 'bson'; import * as mongo from 'mongodb'; -import * as util from '../../util/util-index.js'; -import { SourceTable } from '../SourceTable.js'; +import { logger } from '@powersync/lib-services-framework'; +import { storage, utils } from '@powersync/service-core'; import { currentBucketKey } from './MongoBucketBatch.js'; import { MongoIdSequence } from './MongoIdSequence.js'; import { PowerSyncMongo } from './db.js'; @@ -13,11 +13,9 @@ import { BucketParameterDocument, CurrentBucket, CurrentDataDocument, - SourceKey, - ReplicaId + SourceKey } from './models.js'; -import { replicaIdToSubkey, safeBulkWrite, serializeLookup } from './util.js'; -import { logger } from '@powersync/lib-services-framework'; +import { replicaIdToSubkey, safeBulkWrite } from './util.js'; /** * Maximum size of operations we write in a single transaction. @@ -70,8 +68,8 @@ export class PersistedBatch { saveBucketData(options: { op_seq: MongoIdSequence; - sourceKey: ReplicaId; - table: SourceTable; + sourceKey: storage.ReplicaId; + table: storage.SourceTable; evaluated: EvaluatedRow[]; before_buckets: CurrentBucket[]; }) { @@ -81,15 +79,15 @@ export class PersistedBatch { remaining_buckets.set(key, b); } - const dchecksum = util.hashDelete(replicaIdToSubkey(options.table.id, options.sourceKey)); + const dchecksum = utils.hashDelete(replicaIdToSubkey(options.table.id, options.sourceKey)); - for (let k of options.evaluated) { + for (const k of options.evaluated) { const key = currentBucketKey(k); remaining_buckets.delete(key); // INSERT const recordData = JSONBig.stringify(k.data); - const checksum = util.hashData(k.table, k.id, recordData); + const checksum = utils.hashData(k.table, k.id, recordData); this.currentSize += recordData.length + 200; const op_id = options.op_seq.next(); @@ -145,8 +143,8 @@ export class PersistedBatch { saveParameterData(data: { op_seq: MongoIdSequence; - sourceKey: ReplicaId; - sourceTable: SourceTable; + sourceKey: storage.ReplicaId; + sourceTable: storage.SourceTable; evaluated: EvaluatedParameters[]; existing_lookups: bson.Binary[]; }) { @@ -166,7 +164,7 @@ export class PersistedBatch { // 1. Insert new entries for (let result of evaluated) { - const binLookup = serializeLookup(result.lookup); + const binLookup = storage.serializeLookup(result.lookup); const hex = binLookup.toString('base64'); remaining_lookups.delete(hex); diff --git a/packages/service-core/src/storage/mongo/db.ts b/modules/module-mongodb-storage/src/storage/implementation/db.ts similarity index 84% rename from packages/service-core/src/storage/mongo/db.ts rename to modules/module-mongodb-storage/src/storage/implementation/db.ts index 99bad094..5e20d1b8 100644 --- a/packages/service-core/src/storage/mongo/db.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/db.ts @@ -1,8 +1,8 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import { storage } from '@powersync/service-core'; import * as mongo from 'mongodb'; -import { configFile } from '@powersync/service-types'; -import * as db from '../../db/db-index.js'; -import * as locks from '../../locks/locks-index.js'; +import { MongoStorageConfig } from '../../types/types.js'; import { BucketDataDocument, BucketParameterDocument, @@ -14,7 +14,6 @@ import { SyncRuleDocument, WriteCheckpointDocument } from './models.js'; -import { BSON_DESERIALIZE_OPTIONS } from './util.js'; export interface PowerSyncMongoOptions { /** @@ -23,10 +22,6 @@ export interface PowerSyncMongoOptions { database?: string; } -export function createPowerSyncMongo(config: configFile.PowerSyncConfig['storage']) { - return new PowerSyncMongo(db.mongo.createMongoClient(config), { database: config.database }); -} - export class PowerSyncMongo { readonly current_data: mongo.Collection; readonly bucket_data: mongo.Collection; @@ -37,7 +32,7 @@ export class PowerSyncMongo { readonly custom_write_checkpoints: mongo.Collection; readonly write_checkpoints: mongo.Collection; readonly instance: mongo.Collection; - readonly locks: mongo.Collection; + readonly locks: mongo.Collection; readonly client: mongo.MongoClient; readonly db: mongo.Db; @@ -46,7 +41,7 @@ export class PowerSyncMongo { this.client = client; const db = client.db(options?.database, { - ...BSON_DESERIALIZE_OPTIONS + ...storage.BSON_DESERIALIZE_OPTIONS }); this.db = db; @@ -86,3 +81,7 @@ export class PowerSyncMongo { await this.db.dropDatabase(); } } + +export function createPowerSyncMongo(config: MongoStorageConfig) { + return new PowerSyncMongo(lib_mongo.createMongoClient(config), { database: config.database }); +} diff --git a/packages/service-core/src/storage/mongo/models.ts b/modules/module-mongodb-storage/src/storage/implementation/models.ts similarity index 82% rename from packages/service-core/src/storage/mongo/models.ts rename to modules/module-mongodb-storage/src/storage/implementation/models.ts index a33f81fa..b24e9595 100644 --- a/packages/service-core/src/storage/mongo/models.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/models.ts @@ -1,3 +1,4 @@ +import { storage } from '@powersync/service-core'; import { SqliteJsonValue } from '@powersync/service-sync-rules'; import * as bson from 'bson'; @@ -79,37 +80,10 @@ export interface IdSequenceDocument { op_id: bigint; } -export enum SyncRuleState { - /** - * New sync rules - needs to be processed (initial replication). - * - * While multiple sets of sync rules _can_ be in PROCESSING, - * it's generally pointless, so we only keep one in that state. - */ - PROCESSING = 'PROCESSING', - - /** - * Sync rule processing is done, and can be used for sync. - * - * Only one set of sync rules should be in ACTIVE state. - */ - ACTIVE = 'ACTIVE', - /** - * This state is used when the sync rules has been replaced, - * and replication is or should be stopped. - */ - STOP = 'STOP', - /** - * After sync rules have been stopped, the data needs to be - * deleted. Once deleted, the state is TERMINATED. - */ - TERMINATED = 'TERMINATED' -} - export interface SyncRuleDocument { _id: number; - state: SyncRuleState; + state: storage.SyncRuleState; /** * True if initial snapshot has been replicated. diff --git a/packages/service-core/src/storage/mongo/util.ts b/modules/module-mongodb-storage/src/storage/implementation/util.ts similarity index 68% rename from packages/service-core/src/storage/mongo/util.ts rename to modules/module-mongodb-storage/src/storage/implementation/util.ts index cbfe5e4d..e475217b 100644 --- a/packages/service-core/src/storage/mongo/util.ts +++ b/modules/module-mongodb-storage/src/storage/implementation/util.ts @@ -1,27 +1,10 @@ -import { SqliteJsonValue } from '@powersync/service-sync-rules'; +import { storage, utils } from '@powersync/service-core'; import * as bson from 'bson'; import * as crypto from 'crypto'; import * as mongo from 'mongodb'; import * as uuid from 'uuid'; -import { OplogEntry } from '../../util/protocol-types.js'; -import { ID_NAMESPACE, timestampToOpId } from '../../util/utils.js'; -import { BucketDataDocument, ReplicaId } from './models.js'; - -/** - * Lookup serialization must be number-agnostic. I.e. normalize numbers, instead of preserving numbers. - * @param lookup - */ - -export function serializeLookup(lookup: SqliteJsonValue[]) { - const normalized = lookup.map((value) => { - if (typeof value == 'number' && Number.isInteger(value)) { - return BigInt(value); - } else { - return value; - } - }); - return new bson.Binary(bson.serialize({ l: normalized })); -} +import { PowerSyncMongo } from './db.js'; +import { BucketDataDocument } from './models.js'; export function idPrefixFilter(prefix: Partial, rest: (keyof T)[]): mongo.Condition { let filter = { @@ -86,15 +69,10 @@ export async function readSingleBatch(cursor: mongo.FindCursor): Promise<{ } } -export const BSON_DESERIALIZE_OPTIONS: bson.DeserializeOptions = { - // use bigint instead of Long - useBigInt64: true -}; - -export function mapOpEntry(row: BucketDataDocument): OplogEntry { +export function mapOpEntry(row: BucketDataDocument): utils.OplogEntry { if (row.op == 'PUT' || row.op == 'REMOVE') { return { - op_id: timestampToOpId(row._id.o), + op_id: utils.timestampToOpId(row._id.o), op: row.op, object_type: row.table, object_id: row.row_id, @@ -106,56 +84,44 @@ export function mapOpEntry(row: BucketDataDocument): OplogEntry { // MOVE, CLEAR return { - op_id: timestampToOpId(row._id.o), + op_id: utils.timestampToOpId(row._id.o), op: row.op, checksum: Number(row.checksum) }; } } -/** - * Returns true if two ReplicaId values are the same (serializes to the same BSON value). - */ -export function replicaIdEquals(a: ReplicaId, b: ReplicaId) { - if (a === b) { - return true; - } else if (typeof a == 'string' && typeof b == 'string') { - return a == b; - } else if (isUUID(a) && isUUID(b)) { - return a.equals(b); - } else if (a == null && b == null) { - return true; - } else if (a != null || b != null) { - return false; - } else { - // There are many possible primitive values, this covers them all - return (bson.serialize({ id: a }) as Buffer).equals(bson.serialize({ id: b })); - } -} - -export function replicaIdToSubkey(table: bson.ObjectId, id: ReplicaId): string { - if (isUUID(id)) { +export function replicaIdToSubkey(table: bson.ObjectId, id: storage.ReplicaId): string { + if (storage.isUUID(id)) { // Special case for UUID for backwards-compatiblity return `${table.toHexString()}/${id.toHexString()}`; } else { // Hashed UUID from the table and id const repr = bson.serialize({ table, id }); - return uuid.v5(repr, ID_NAMESPACE); + return uuid.v5(repr, utils.ID_NAMESPACE); } } /** - * True if this is a bson.UUID. - * - * Works even with multiple copies of the bson package. + * Helper function for creating a MongoDB client from consumers of this package */ -export function isUUID(value: any): value is bson.UUID { - if (value == null || typeof value != 'object') { - return false; - } - const uuid = value as bson.UUID; - return uuid._bsontype == 'Binary' && uuid.sub_type == bson.Binary.SUBTYPE_UUID; -} +export const createMongoClient = (url: string, options?: mongo.MongoClientOptions) => { + return new mongo.MongoClient(url, options); +}; + +/** + * Helper for unit tests + */ +export const connectMongoForTests = (url: string, isCI: boolean) => { + // Short timeout for tests, to fail fast when the server is not available. + // Slightly longer timeouts for CI, to avoid arbitrary test failures + const client = createMongoClient(url, { + connectTimeoutMS: isCI ? 15_000 : 5_000, + socketTimeoutMS: isCI ? 15_000 : 5_000, + serverSelectionTimeoutMS: isCI ? 15_000 : 2_500 + }); + return new PowerSyncMongo(client); +}; /** * MongoDB bulkWrite internally splits the operations into batches diff --git a/modules/module-mongodb-storage/src/storage/storage-index.ts b/modules/module-mongodb-storage/src/storage/storage-index.ts new file mode 100644 index 00000000..d4d3373b --- /dev/null +++ b/modules/module-mongodb-storage/src/storage/storage-index.ts @@ -0,0 +1,14 @@ +export * from './implementation/db.js'; +export * from './implementation/models.js'; +export * from './implementation/MongoBucketBatch.js'; +export * from './implementation/MongoIdSequence.js'; +export * from './implementation/MongoPersistedSyncRules.js'; +export * from './implementation/MongoPersistedSyncRulesContent.js'; +export * from './implementation/MongoStorageProvider.js'; +export * from './implementation/MongoSyncBucketStorage.js'; +export * from './implementation/MongoSyncRulesLock.js'; +export * from './implementation/MongoTestStorageFactoryGenerator.js'; +export * from './implementation/OperationBatch.js'; +export * from './implementation/PersistedBatch.js'; +export * from './implementation/util.js'; +export * from './MongoBucketStorage.js'; diff --git a/modules/module-mongodb-storage/src/types/types.ts b/modules/module-mongodb-storage/src/types/types.ts new file mode 100644 index 00000000..50e6d358 --- /dev/null +++ b/modules/module-mongodb-storage/src/types/types.ts @@ -0,0 +1,18 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; +import * as service_types from '@powersync/service-types'; +import * as t from 'ts-codec'; + +export const MongoStorageConfig = lib_mongo.BaseMongoConfig.and( + t.object({ + // Add any mongo specific storage settings here in future + }) +); + +export type MongoStorageConfig = t.Encoded; +export type MongoStorageConfigDecoded = t.Decoded; + +export function isMongoStorageConfig( + config: service_types.configFile.GenericStorageConfig +): config is MongoStorageConfig { + return config.type == lib_mongo.MONGO_CONNECTION_TYPE; +} diff --git a/packages/service-core/test/src/__snapshots__/sync.test.ts.snap b/modules/module-mongodb-storage/test/src/__snapshots__/storage_sync.test.ts.snap similarity index 100% rename from packages/service-core/test/src/__snapshots__/sync.test.ts.snap rename to modules/module-mongodb-storage/test/src/__snapshots__/storage_sync.test.ts.snap diff --git a/modules/module-mongodb-storage/test/src/env.ts b/modules/module-mongodb-storage/test/src/env.ts new file mode 100644 index 00000000..1c86eae3 --- /dev/null +++ b/modules/module-mongodb-storage/test/src/env.ts @@ -0,0 +1,6 @@ +import { utils } from '@powersync/lib-services-framework'; + +export const env = utils.collectEnvironmentVariables({ + MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), + CI: utils.type.boolean.default('false') +}); diff --git a/modules/module-mongodb-storage/test/src/setup.ts b/modules/module-mongodb-storage/test/src/setup.ts new file mode 100644 index 00000000..debe6601 --- /dev/null +++ b/modules/module-mongodb-storage/test/src/setup.ts @@ -0,0 +1,9 @@ +import { container } from '@powersync/lib-services-framework'; +import { test_utils } from '@powersync/service-core-tests'; +import { beforeAll } from 'vitest'; + +beforeAll(async () => { + // Executes for every test file + container.registerDefaults(); + await test_utils.initMetrics(); +}); diff --git a/modules/module-mongodb-storage/test/src/storage.test.ts b/modules/module-mongodb-storage/test/src/storage.test.ts new file mode 100644 index 00000000..c84e4657 --- /dev/null +++ b/modules/module-mongodb-storage/test/src/storage.test.ts @@ -0,0 +1,7 @@ +import { register } from '@powersync/service-core-tests'; +import { describe } from 'vitest'; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; + +describe('Mongo Sync Bucket Storage', () => register.registerDataStorageTests(INITIALIZED_MONGO_STORAGE_FACTORY)); + +describe('Sync Bucket Validation', register.registerBucketValidationTests); diff --git a/modules/module-mongodb-storage/test/src/storage_compacting.test.ts b/modules/module-mongodb-storage/test/src/storage_compacting.test.ts new file mode 100644 index 00000000..64040a74 --- /dev/null +++ b/modules/module-mongodb-storage/test/src/storage_compacting.test.ts @@ -0,0 +1,6 @@ +import { MongoCompactOptions } from '@module/storage/implementation/MongoCompactor.js'; +import { register } from '@powersync/service-core-tests'; +import { describe } from 'vitest'; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; + +describe('Mongo Sync Bucket Storage Compact', () => register.registerCompactTests(INITIALIZED_MONGO_STORAGE_FACTORY, { clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 })); \ No newline at end of file diff --git a/modules/module-mongodb-storage/test/src/storage_sync.test.ts b/modules/module-mongodb-storage/test/src/storage_sync.test.ts new file mode 100644 index 00000000..2617671e --- /dev/null +++ b/modules/module-mongodb-storage/test/src/storage_sync.test.ts @@ -0,0 +1,113 @@ +import { storage } from '@powersync/service-core'; +import { register, TEST_TABLE, test_utils } from '@powersync/service-core-tests'; +import { describe, expect, test } from 'vitest'; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; + +describe('sync - mongodb', () => { + register.registerSyncTests(INITIALIZED_MONGO_STORAGE_FACTORY); + + // The split of returned results can vary depending on storage drivers + test('large batch (2)', async () => { + // Test syncing a batch of data that is small in count, + // but large enough in size to be split over multiple returned chunks. + // Similar to the above test, but splits over 1MB chunks. + const sync_rules = test_utils.testRules( + ` + bucket_definitions: + global: + data: + - SELECT id, description FROM "%" + ` + ); + using factory = await INITIALIZED_MONGO_STORAGE_FACTORY(); + const bucketStorage = factory.getInstance(sync_rules); + + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { + const sourceTable = TEST_TABLE; + + const largeDescription = '0123456789'.repeat(2_000_00); + + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'test1', + description: 'test1' + }, + afterReplicaId: test_utils.rid('test1') + }); + + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'large1', + description: largeDescription + }, + afterReplicaId: test_utils.rid('large1') + }); + + // Large enough to split the returned batch + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'large2', + description: largeDescription + }, + afterReplicaId: test_utils.rid('large2') + }); + + await batch.save({ + sourceTable, + tag: storage.SaveOperationTag.INSERT, + after: { + id: 'test3', + description: 'test3' + }, + afterReplicaId: test_utils.rid('test3') + }); + }); + + const checkpoint = result!.flushed_op; + + const options: storage.BucketDataBatchOptions = {}; + + const batch1 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options) + ); + expect(test_utils.getBatchData(batch1)).toEqual([ + { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }, + { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 } + ]); + expect(test_utils.getBatchMeta(batch1)).toEqual({ + after: '0', + has_more: true, + next_after: '2' + }); + + const batch2 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options) + ); + expect(test_utils.getBatchData(batch2)).toEqual([ + { op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 } + ]); + expect(test_utils.getBatchMeta(batch2)).toEqual({ + after: '2', + has_more: true, + next_after: '3' + }); + + const batch3 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options) + ); + expect(test_utils.getBatchData(batch3)).toEqual([ + { op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 } + ]); + expect(test_utils.getBatchMeta(batch3)).toEqual({ + after: '3', + has_more: false, + next_after: '4' + }); + }); +}); diff --git a/modules/module-mongodb-storage/test/src/util.ts b/modules/module-mongodb-storage/test/src/util.ts new file mode 100644 index 00000000..db3b2dd1 --- /dev/null +++ b/modules/module-mongodb-storage/test/src/util.ts @@ -0,0 +1,8 @@ +import { env } from './env.js'; + +import { MongoTestStorageFactoryGenerator } from '@module/storage/implementation/MongoTestStorageFactoryGenerator.js'; + +export const INITIALIZED_MONGO_STORAGE_FACTORY = MongoTestStorageFactoryGenerator({ + url: env.MONGO_TEST_URL, + isCI: env.CI +}); diff --git a/modules/module-mongodb-storage/test/tsconfig.json b/modules/module-mongodb-storage/test/tsconfig.json new file mode 100644 index 00000000..d802b8ee --- /dev/null +++ b/modules/module-mongodb-storage/test/tsconfig.json @@ -0,0 +1,31 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": { + "@/*": ["../../../packages/service-core/src/*"], + "@module/*": ["../src/*"], + "@core-tests/*": ["../../../packages/service-core/test/src/*"] + } + }, + "include": ["src"], + "references": [ + { + "path": "../" + }, + { + "path": "../../../packages/service-core/test" + }, + { + "path": "../../../packages/service-core/" + }, + { + "path": "../../../packages/service-core-tests/" + } + ] +} diff --git a/modules/module-mongodb-storage/tsconfig.json b/modules/module-mongodb-storage/tsconfig.json new file mode 100644 index 00000000..bc419b8b --- /dev/null +++ b/modules/module-mongodb-storage/tsconfig.json @@ -0,0 +1,31 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true + }, + "include": ["src"], + "references": [ + { + "path": "../../packages/types" + }, + { + "path": "../../packages/jsonbig" + }, + { + "path": "../../packages/sync-rules" + }, + { + "path": "../../packages/service-core" + }, + { + "path": "../../libs/lib-services" + }, + { + "path": "../../libs/lib-mongodb" + } + ] +} diff --git a/modules/module-mongodb-storage/vitest.config.ts b/modules/module-mongodb-storage/vitest.config.ts new file mode 100644 index 00000000..60fad90a --- /dev/null +++ b/modules/module-mongodb-storage/vitest.config.ts @@ -0,0 +1,15 @@ +import tsconfigPaths from 'vite-tsconfig-paths'; +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + plugins: [tsconfigPaths()], + test: { + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' + } +}); diff --git a/modules/module-mongodb/package.json b/modules/module-mongodb/package.json index 96dc7830..640dea3b 100644 --- a/modules/module-mongodb/package.json +++ b/modules/module-mongodb/package.json @@ -33,12 +33,15 @@ "@powersync/service-jsonbig": "workspace:*", "@powersync/service-sync-rules": "workspace:*", "@powersync/service-types": "workspace:*", + "@powersync/lib-service-mongodb": "workspace:*", "mongodb": "^6.11.0", - "ts-codec": "^1.2.2", - "uuid": "^9.0.1", - "uri-js": "^4.4.1" + "bson": "^6.8.0", + "ts-codec": "^1.3.0", + "uuid": "^9.0.1" }, "devDependencies": { - "@types/uuid": "^9.0.4" + "@types/uuid": "^9.0.4", + "@powersync/service-core-tests": "workspace:*", + "@powersync/service-module-mongodb-storage": "workspace:*" } } diff --git a/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts b/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts index a0bc519e..65ce0836 100644 --- a/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts +++ b/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts @@ -1,13 +1,13 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; import { api, ParseSyncRulesOptions, SourceTable } from '@powersync/service-core'; -import * as mongo from 'mongodb'; - import * as sync_rules from '@powersync/service-sync-rules'; import * as service_types from '@powersync/service-types'; +import * as mongo from 'mongodb'; import { MongoManager } from '../replication/MongoManager.js'; import { constructAfterRecord, createCheckpoint } from '../replication/MongoRelation.js'; +import { CHECKPOINTS_COLLECTION } from '../replication/replication-utils.js'; import * as types from '../types/types.js'; import { escapeRegExp } from '../utils.js'; -import { CHECKPOINTS_COLLECTION } from '../replication/replication-utils.js'; export class MongoRouteAPIAdapter implements api.RouteAPI { protected client: mongo.MongoClient; @@ -45,7 +45,7 @@ export class MongoRouteAPIAdapter implements api.RouteAPI { async getConnectionStatus(): Promise { const base = { id: this.config.id, - uri: types.baseUri(this.config) + uri: lib_mongo.baseUri(this.config) }; try { diff --git a/modules/module-mongodb/src/index.ts b/modules/module-mongodb/src/index.ts index 6ecba2a8..f844c110 100644 --- a/modules/module-mongodb/src/index.ts +++ b/modules/module-mongodb/src/index.ts @@ -1 +1,4 @@ +export * from './api/MongoRouteAPIAdapter.js'; export * from './module/MongoModule.js'; +export * from './replication/replication-index.js'; +export * from './types/types.js'; diff --git a/modules/module-mongodb/src/module/MongoModule.ts b/modules/module-mongodb/src/module/MongoModule.ts index e50c7702..cf03cb4b 100644 --- a/modules/module-mongodb/src/module/MongoModule.ts +++ b/modules/module-mongodb/src/module/MongoModule.ts @@ -1,25 +1,22 @@ +import * as lib_mongo from '@powersync/lib-service-mongodb'; import { api, ConfigurationFileSyncRulesProvider, replication, system, TearDownOptions } from '@powersync/service-core'; import { MongoRouteAPIAdapter } from '../api/MongoRouteAPIAdapter.js'; +import { ChangeStreamReplicator } from '../replication/ChangeStreamReplicator.js'; import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactory.js'; import { MongoErrorRateLimiter } from '../replication/MongoErrorRateLimiter.js'; -import { ChangeStreamReplicator } from '../replication/ChangeStreamReplicator.js'; -import * as types from '../types/types.js'; import { MongoManager } from '../replication/MongoManager.js'; import { checkSourceConfiguration } from '../replication/replication-utils.js'; +import * as types from '../types/types.js'; export class MongoModule extends replication.ReplicationModule { constructor() { super({ name: 'MongoDB', - type: types.MONGO_CONNECTION_TYPE, + type: lib_mongo.MONGO_CONNECTION_TYPE, configSchema: types.MongoConnectionConfig }); } - async initialize(context: system.ServiceContextContainer): Promise { - await super.initialize(context); - } - protected createRouteAPIAdapter(): api.RouteAPI { return new MongoRouteAPIAdapter(this.resolveConfig(this.decodedConfig!)); } diff --git a/modules/module-mongodb/src/types/types.ts b/modules/module-mongodb/src/types/types.ts index 1498193f..4ee1d18a 100644 --- a/modules/module-mongodb/src/types/types.ts +++ b/modules/module-mongodb/src/types/types.ts @@ -1,9 +1,7 @@ -import { normalizeMongoConfig } from '@powersync/service-core'; +import * as lib_mongo from '@powersync/lib-service-mongodb/types'; import * as service_types from '@powersync/service-types'; import * as t from 'ts-codec'; -export const MONGO_CONNECTION_TYPE = 'mongodb' as const; - export enum PostImagesOption { /** * Use fullDocument: updateLookup on the changeStream. @@ -53,18 +51,9 @@ export interface NormalizedMongoConnectionConfig { postImages: PostImagesOption; } -export const MongoConnectionConfig = service_types.configFile.DataSourceConfig.and( +export const MongoConnectionConfig = lib_mongo.BaseMongoConfig.and(service_types.configFile.DataSourceConfig).and( t.object({ - type: t.literal(MONGO_CONNECTION_TYPE), - /** Unique identifier for the connection - optional when a single connection is present. */ - id: t.string.optional(), - /** Tag used as reference in sync rules. Defaults to "default". Does not have to be unique. */ - tag: t.string.optional(), - uri: t.string, - username: t.string.optional(), - password: t.string.optional(), - database: t.string.optional(), - + // Replication specific settings post_images: t.literal('off').or(t.literal('auto_configure')).or(t.literal('read_only')).optional() }) ); @@ -72,20 +61,21 @@ export const MongoConnectionConfig = service_types.configFile.DataSourceConfig.a /** * Config input specified when starting services */ -export type MongoConnectionConfig = t.Decoded; +export type MongoConnectionConfig = t.Encoded; +export type MongoConnectionConfigDecoded = t.Decoded; /** * Resolved version of {@link MongoConnectionConfig} */ -export type ResolvedConnectionConfig = MongoConnectionConfig & NormalizedMongoConnectionConfig; +export type ResolvedConnectionConfig = MongoConnectionConfigDecoded & NormalizedMongoConnectionConfig; /** * Validate and normalize connection options. * * Returns destructured options. */ -export function normalizeConnectionConfig(options: MongoConnectionConfig): NormalizedMongoConnectionConfig { - const base = normalizeMongoConfig(options); +export function normalizeConnectionConfig(options: MongoConnectionConfigDecoded): NormalizedMongoConnectionConfig { + const base = lib_mongo.normalizeMongoConfig(options); return { ...base, @@ -94,12 +84,3 @@ export function normalizeConnectionConfig(options: MongoConnectionConfig): Norma postImages: (options.post_images as PostImagesOption | undefined) ?? PostImagesOption.OFF }; } - -/** - * Construct a mongodb URI, without username, password or ssl options. - * - * Only contains hostname, port, database. - */ -export function baseUri(options: NormalizedMongoConnectionConfig) { - return options.uri; -} diff --git a/modules/module-mongodb/test/src/change_stream.test.ts b/modules/module-mongodb/test/src/change_stream.test.ts index 0380fce6..a35fcd67 100644 --- a/modules/module-mongodb/test/src/change_stream.test.ts +++ b/modules/module-mongodb/test/src/change_stream.test.ts @@ -1,14 +1,13 @@ -import { putOp, removeOp } from '@core-tests/stream_utils.js'; -import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; -import { BucketStorageFactory } from '@powersync/service-core'; +import { test_utils } from '@powersync/service-core-tests'; + +import { PostImagesOption } from '@module/types/types.js'; +import { storage } from '@powersync/service-core'; import * as crypto from 'crypto'; import * as mongo from 'mongodb'; import { setTimeout } from 'node:timers/promises'; import { describe, expect, test, vi } from 'vitest'; import { ChangeStreamTestContext } from './change_stream_utils.js'; -import { PostImagesOption } from '@module/types/types.js'; - -type StorageFactory = () => Promise; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -18,10 +17,10 @@ bucket_definitions: `; describe('change stream - mongodb', { timeout: 20_000 }, function () { - defineChangeStreamTests(MONGO_STORAGE_FACTORY); + defineChangeStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); }); -function defineChangeStreamTests(factory: StorageFactory) { +function defineChangeStreamTests(factory: storage.TestStorageFactory) { test('replicating basic values', async () => { await using context = await ChangeStreamTestContext.open(factory); const { db } = context; @@ -52,10 +51,10 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }), - putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }), - putOp('test_data', { id: test_id.toHexString(), description: 'test3' }), - removeOp('test_data', test_id.toHexString()) + test_utils.putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id.toHexString(), description: 'test3' }), + test_utils.removeOp('test_data', test_id.toHexString()) ]); }); @@ -86,8 +85,8 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }), - putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }) + test_utils.putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }) ]); }); @@ -125,11 +124,11 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), // fullDocument is not available at the point this is replicated, resulting in it treated as a remove - removeOp('test_data', test_id!.toHexString()), - putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), - removeOp('test_data', test_id!.toHexString()) + test_utils.removeOp('test_data', test_id!.toHexString()), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), + test_utils.removeOp('test_data', test_id!.toHexString()) ]); }); @@ -171,11 +170,11 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), // The postImage helps us get this data - putOp('test_data', { id: test_id!.toHexString(), description: 'test2', num: 1152921504606846976n }), - putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), - removeOp('test_data', test_id!.toHexString()) + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), + test_utils.removeOp('test_data', test_id!.toHexString()) ]); }); @@ -216,11 +215,11 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), // The postImage helps us get this data - putOp('test_data', { id: test_id!.toHexString(), description: 'test2', num: 1152921504606846976n }), - putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), - removeOp('test_data', test_id!.toHexString()) + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2', num: 1152921504606846976n }), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), + test_utils.removeOp('test_data', test_id!.toHexString()) ]); }); @@ -244,7 +243,7 @@ bucket_definitions: const data = await context.getBucketData('global[]'); - expect(data).toMatchObject([putOp('test_DATA', { id: test_id, description: 'test1' })]); + expect(data).toMatchObject([test_utils.putOp('test_DATA', { id: test_id, description: 'test1' })]); }); test('replicating large values', async () => { @@ -270,10 +269,10 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data.slice(0, 1)).toMatchObject([ - putOp('test_data', { id: test_id.toHexString(), name: 'test1', description: largeDescription }) + test_utils.putOp('test_data', { id: test_id.toHexString(), name: 'test1', description: largeDescription }) ]); expect(data.slice(1)).toMatchObject([ - putOp('test_data', { id: test_id.toHexString(), name: 'test2', description: largeDescription }) + test_utils.putOp('test_data', { id: test_id.toHexString(), name: 'test2', description: largeDescription }) ]); }); @@ -302,8 +301,8 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id, description: 'test1' }), - removeOp('test_data', test_id) + test_utils.putOp('test_data', { id: test_id, description: 'test1' }), + test_utils.removeOp('test_data', test_id) ]); }); @@ -330,9 +329,9 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data1', { id: test_id, description: 'test1' }), - removeOp('test_data1', test_id), - putOp('test_data2', { id: test_id, description: 'test1' }) + test_utils.putOp('test_data1', { id: test_id, description: 'test1' }), + test_utils.removeOp('test_data1', test_id), + test_utils.putOp('test_data2', { id: test_id, description: 'test1' }) ]); }); @@ -349,7 +348,7 @@ bucket_definitions: context.startStreaming(); const data = await context.getBucketData('global[]'); - expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]); + expect(data).toMatchObject([test_utils.putOp('test_data', { id: test_id, description: 'test1' })]); }); test('large record', async () => { @@ -446,8 +445,8 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([ - putOp('test_data', { id: test_id!.toHexString(), description: 'test1' }), - putOp('test_data', { id: test_id!.toHexString(), description: 'test2' }) + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1' }), + test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' }) ]); }); diff --git a/modules/module-mongodb/test/src/change_stream_utils.ts b/modules/module-mongodb/test/src/change_stream_utils.ts index 77a5d964..2209f411 100644 --- a/modules/module-mongodb/test/src/change_stream_utils.ts +++ b/modules/module-mongodb/test/src/change_stream_utils.ts @@ -1,12 +1,12 @@ import { ActiveCheckpoint, BucketStorageFactory, OpId, SyncRulesBucketStorage } from '@powersync/service-core'; -import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js'; -import { fromAsync } from '@core-tests/stream_utils.js'; -import { MongoManager } from '@module/replication/MongoManager.js'; import { ChangeStream, ChangeStreamOptions } from '@module/replication/ChangeStream.js'; -import * as mongo from 'mongodb'; +import { MongoManager } from '@module/replication/MongoManager.js'; import { createCheckpoint } from '@module/replication/MongoRelation.js'; import { NormalizedMongoConnectionConfig } from '@module/types/types.js'; +import { test_utils } from '@powersync/service-core-tests'; +import * as mongo from 'mongodb'; +import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js'; export class ChangeStreamTestContext { private _walStream?: ChangeStream; @@ -102,7 +102,7 @@ export class ChangeStreamTestContext { async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { let checkpoint = await this.getCheckpoint(options); const map = new Map(Object.entries(buckets)); - return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); } async getBucketData( @@ -117,7 +117,7 @@ export class ChangeStreamTestContext { limit: options?.limit, chunkLimitBytes: options?.chunkLimitBytes }); - const batches = await fromAsync(batch); + const batches = await test_utils.fromAsync(batch); return batches[0]?.batch.data ?? []; } diff --git a/modules/module-mongodb/test/src/env.ts b/modules/module-mongodb/test/src/env.ts index e460c80b..7bfe0385 100644 --- a/modules/module-mongodb/test/src/env.ts +++ b/modules/module-mongodb/test/src/env.ts @@ -1,6 +1,7 @@ import { utils } from '@powersync/lib-services-framework'; export const env = utils.collectEnvironmentVariables({ + MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), MONGO_TEST_DATA_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test_data'), CI: utils.type.boolean.default('false'), SLOW_TESTS: utils.type.boolean.default('false') diff --git a/modules/module-mongodb/test/src/setup.ts b/modules/module-mongodb/test/src/setup.ts index b924cf73..fe127d8a 100644 --- a/modules/module-mongodb/test/src/setup.ts +++ b/modules/module-mongodb/test/src/setup.ts @@ -1,7 +1,10 @@ import { container } from '@powersync/lib-services-framework'; +import { test_utils } from '@powersync/service-core-tests'; import { beforeAll } from 'vitest'; -beforeAll(() => { +beforeAll(async () => { // Executes for every test file container.registerDefaults(); + + await test_utils.initMetrics(); }); diff --git a/modules/module-mongodb/test/src/slow_tests.test.ts b/modules/module-mongodb/test/src/slow_tests.test.ts index 535e967c..4225cfbb 100644 --- a/modules/module-mongodb/test/src/slow_tests.test.ts +++ b/modules/module-mongodb/test/src/slow_tests.test.ts @@ -1,30 +1,21 @@ -import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; -import { BucketStorageFactory } from '@powersync/service-core'; +import { storage } from '@powersync/service-core'; import * as mongo from 'mongodb'; import { setTimeout } from 'node:timers/promises'; import { describe, expect, test } from 'vitest'; import { ChangeStreamTestContext, setSnapshotHistorySeconds } from './change_stream_utils.js'; import { env } from './env.js'; - -type StorageFactory = () => Promise; - -const BASIC_SYNC_RULES = ` -bucket_definitions: - global: - data: - - SELECT _id as id, description FROM "test_data" -`; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; describe('change stream slow tests - mongodb', { timeout: 60_000 }, function () { if (env.CI || env.SLOW_TESTS) { - defineSlowTests(MONGO_STORAGE_FACTORY); + defineSlowTests(INITIALIZED_MONGO_STORAGE_FACTORY); } else { // Need something in this file. test('no-op', () => {}); } }); -function defineSlowTests(factory: StorageFactory) { +function defineSlowTests(factory: storage.TestStorageFactory) { test('replicating snapshot with lots of data', async () => { await using context = await ChangeStreamTestContext.open(factory); // Test with low minSnapshotHistoryWindowInSeconds, to trigger: @@ -96,8 +87,10 @@ bucket_definitions: const data = await context.getBucketData('global[]', undefined, { limit: 50_000, chunkLimitBytes: 60_000_000 }); - const preDocuments = data.filter((d) => JSON.parse(d.data! as string).description.startsWith('pre')).length; - const updatedDocuments = data.filter((d) => JSON.parse(d.data! as string).description.startsWith('updated')).length; + const preDocuments = data.filter((d: any) => JSON.parse(d.data! as string).description.startsWith('pre')).length; + const updatedDocuments = data.filter((d: any) => + JSON.parse(d.data! as string).description.startsWith('updated') + ).length; // If the test works properly, preDocuments should be around 2000-3000. // The total should be around 9000-9900. diff --git a/modules/module-mongodb/test/src/util.ts b/modules/module-mongodb/test/src/util.ts index a101f77a..01312adb 100644 --- a/modules/module-mongodb/test/src/util.ts +++ b/modules/module-mongodb/test/src/util.ts @@ -1,18 +1,8 @@ import * as types from '@module/types/types.js'; -import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core'; -import { env } from './env.js'; -import { logger } from '@powersync/lib-services-framework'; -import { connectMongo } from '@core-tests/util.js'; +import * as mongo_storage from '@powersync/service-module-mongodb-storage'; import * as mongo from 'mongodb'; - -// The metrics need to be initialized before they can be used -await Metrics.initialise({ - disable_telemetry_sharing: true, - powersync_instance_id: 'test', - internal_metrics_endpoint: 'unused.for.tests.com' -}); -Metrics.getInstance().resetCounters(); +import { env } from './env.js'; export const TEST_URI = env.MONGO_TEST_DATA_URL; @@ -21,20 +11,10 @@ export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ uri: TEST_URI }); -export type StorageFactory = () => Promise; - -export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => { - const db = await connectMongo(); - - // None of the PG tests insert data into this collection, so it was never created - if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { - await db.db.createCollection('bucket_parameters'); - } - - await db.clear(); - - return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); -}; +export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageFactoryGenerator({ + url: env.MONGO_TEST_URL, + isCI: env.CI +}); export async function clearTestDb(db: mongo.Db) { await db.dropDatabase(); diff --git a/modules/module-mongodb/test/tsconfig.json b/modules/module-mongodb/test/tsconfig.json index 18898c4e..d802b8ee 100644 --- a/modules/module-mongodb/test/tsconfig.json +++ b/modules/module-mongodb/test/tsconfig.json @@ -23,6 +23,9 @@ }, { "path": "../../../packages/service-core/" + }, + { + "path": "../../../packages/service-core-tests/" } ] } diff --git a/modules/module-mongodb/tsconfig.json b/modules/module-mongodb/tsconfig.json index 6afdde02..d5610094 100644 --- a/modules/module-mongodb/tsconfig.json +++ b/modules/module-mongodb/tsconfig.json @@ -23,6 +23,12 @@ }, { "path": "../../libs/lib-services" + }, + { + "path": "../../libs/lib-mongodb" + }, + { + "path": "../module-mongodb-storage" } ] } diff --git a/modules/module-mongodb/vitest.config.ts b/modules/module-mongodb/vitest.config.ts index 7a39c1f7..60fad90a 100644 --- a/modules/module-mongodb/vitest.config.ts +++ b/modules/module-mongodb/vitest.config.ts @@ -1,5 +1,5 @@ -import { defineConfig } from 'vitest/config'; import tsconfigPaths from 'vite-tsconfig-paths'; +import { defineConfig } from 'vitest/config'; export default defineConfig({ plugins: [tsconfigPaths()], diff --git a/modules/module-mysql/package.json b/modules/module-mysql/package.json index d3b8d2ad..33b4b1bd 100644 --- a/modules/module-mysql/package.json +++ b/modules/module-mysql/package.json @@ -37,13 +37,15 @@ "semver": "^7.5.4", "async": "^3.2.4", "mysql2": "^3.11.0", - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uri-js": "^4.4.1", "uuid": "^9.0.1" }, "devDependencies": { "@types/semver": "^7.5.4", "@types/async": "^3.2.24", - "@types/uuid": "^9.0.4" + "@types/uuid": "^9.0.4", + "@powersync/service-core-tests": "workspace:*", + "@powersync/service-module-mongodb-storage": "workspace:*" } } diff --git a/modules/module-mysql/test/src/BinLogStream.test.ts b/modules/module-mysql/test/src/BinLogStream.test.ts index 5ac980cb..7295935a 100644 --- a/modules/module-mysql/test/src/BinLogStream.test.ts +++ b/modules/module-mysql/test/src/BinLogStream.test.ts @@ -1,9 +1,9 @@ -import { putOp, removeOp } from '@core-tests/stream_utils.js'; -import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js'; -import { Metrics } from '@powersync/service-core'; +import { Metrics, storage } from '@powersync/service-core'; +import { putOp, removeOp } from '@powersync/service-core-tests'; import { v4 as uuid } from 'uuid'; import { describe, expect, test } from 'vitest'; import { BinlogStreamTestContext } from './BinlogStreamUtils.js'; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -12,11 +12,15 @@ bucket_definitions: - SELECT id, description FROM "test_data" `; -describe('Binlog stream - mongodb', { timeout: 20_000 }, function () { - defineBinlogStreamTests(MONGO_STORAGE_FACTORY); -}); +describe( + ' Binlog stream - mongodb', + function () { + defineBinlogStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); + }, + { timeout: 20_000 } +); -function defineBinlogStreamTests(factory: StorageFactory) { +function defineBinlogStreamTests(factory: storage.TestStorageFactory) { test('Replicate basic values', async () => { await using context = await BinlogStreamTestContext.open(factory); const { connectionManager } = context; diff --git a/modules/module-mysql/test/src/BinlogStreamUtils.ts b/modules/module-mysql/test/src/BinlogStreamUtils.ts index 5cf3f0dd..be6c3064 100644 --- a/modules/module-mysql/test/src/BinlogStreamUtils.ts +++ b/modules/module-mysql/test/src/BinlogStreamUtils.ts @@ -1,18 +1,18 @@ +import { readExecutedGtid } from '@module/common/read-executed-gtid.js'; +import { BinLogStream, BinLogStreamOptions } from '@module/replication/BinLogStream.js'; +import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js'; +import { logger } from '@powersync/lib-services-framework'; import { ActiveCheckpoint, BucketStorageFactory, OpId, OplogEntry, + storage, SyncRulesBucketStorage } from '@powersync/service-core'; -import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js'; -import { fromAsync } from '@core-tests/stream_utils.js'; -import { BinLogStream, BinLogStreamOptions } from '@module/replication/BinLogStream.js'; -import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js'; +import { test_utils } from '@powersync/service-core-tests'; import mysqlPromise from 'mysql2/promise'; -import { readExecutedGtid } from '@module/common/read-executed-gtid.js'; -import { logger } from '@powersync/lib-services-framework'; -import { StorageFactory } from '@core-tests/util.js'; +import { clearTestDb, TEST_CONNECTION_OPTIONS } from './util.js'; /** * Tests operating on the binlog stream need to configure the stream and manage asynchronous @@ -28,7 +28,7 @@ export class BinlogStreamTestContext { public storage?: SyncRulesBucketStorage; private replicationDone = false; - static async open(factory: StorageFactory, options?: { doNotClear?: boolean }) { + static async open(factory: storage.TestStorageFactory, options?: { doNotClear?: boolean }) { const f = await factory({ doNotClear: options?.doNotClear }); const connectionManager = new MySQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); @@ -132,14 +132,14 @@ export class BinlogStreamTestContext { async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { const checkpoint = await this.getCheckpoint(options); const map = new Map(Object.entries(buckets)); - return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); } async getBucketData(bucket: string, start = '0', options?: { timeout?: number }): Promise { const checkpoint = await this.getCheckpoint(options); const map = new Map([[bucket, start]]); const batch = this.storage!.getBucketDataBatch(checkpoint, map); - const batches = await fromAsync(batch); + const batches = await test_utils.fromAsync(batch); return batches[0]?.batch.data ?? []; } } diff --git a/modules/module-mysql/test/src/env.ts b/modules/module-mysql/test/src/env.ts index 05fc76c4..53ecef64 100644 --- a/modules/module-mysql/test/src/env.ts +++ b/modules/module-mysql/test/src/env.ts @@ -2,6 +2,7 @@ import { utils } from '@powersync/lib-services-framework'; export const env = utils.collectEnvironmentVariables({ MYSQL_TEST_URI: utils.type.string.default('mysql://root:mypassword@localhost:3306/mydatabase'), + MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), CI: utils.type.boolean.default('false'), SLOW_TESTS: utils.type.boolean.default('false') }); diff --git a/modules/module-mysql/test/src/setup.ts b/modules/module-mysql/test/src/setup.ts index b924cf73..debe6601 100644 --- a/modules/module-mysql/test/src/setup.ts +++ b/modules/module-mysql/test/src/setup.ts @@ -1,7 +1,9 @@ import { container } from '@powersync/lib-services-framework'; +import { test_utils } from '@powersync/service-core-tests'; import { beforeAll } from 'vitest'; -beforeAll(() => { +beforeAll(async () => { // Executes for every test file container.registerDefaults(); + await test_utils.initMetrics(); }); diff --git a/modules/module-mysql/test/src/util.ts b/modules/module-mysql/test/src/util.ts index f87f13e8..597f03b1 100644 --- a/modules/module-mysql/test/src/util.ts +++ b/modules/module-mysql/test/src/util.ts @@ -1,9 +1,8 @@ import * as types from '@module/types/types.js'; -import { BucketStorageFactory, Metrics, MongoBucketStorage } from '@powersync/service-core'; -import { env } from './env.js'; -import mysqlPromise from 'mysql2/promise'; -import { connectMongo } from '@core-tests/util.js'; import { getMySQLVersion, isVersionAtLeast } from '@module/utils/mysql-utils.js'; +import * as mongo_storage from '@powersync/service-module-mongodb-storage'; +import mysqlPromise from 'mysql2/promise'; +import { env } from './env.js'; export const TEST_URI = env.MYSQL_TEST_URI; @@ -12,28 +11,10 @@ export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ uri: TEST_URI }); -// The metrics need to be initialized before they can be used -await Metrics.initialise({ - disable_telemetry_sharing: true, - powersync_instance_id: 'test', - internal_metrics_endpoint: 'unused.for.tests.com' +export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageFactoryGenerator({ + url: env.MONGO_TEST_URL, + isCI: env.CI }); -Metrics.getInstance().resetCounters(); - -export type StorageFactory = () => Promise; - -export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => { - const db = await connectMongo(); - - // None of the tests insert data into this collection, so it was never created - if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { - await db.db.createCollection('bucket_parameters'); - } - - await db.clear(); - - return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); -}; export async function clearTestDb(connection: mysqlPromise.Connection) { const version = await getMySQLVersion(connection); diff --git a/modules/module-postgres/package.json b/modules/module-postgres/package.json index 236b66c5..282cece2 100644 --- a/modules/module-postgres/package.json +++ b/modules/module-postgres/package.json @@ -36,11 +36,13 @@ "@powersync/service-types": "workspace:*", "pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87", "jose": "^4.15.1", - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uuid": "^9.0.1", "uri-js": "^4.4.1" }, "devDependencies": { - "@types/uuid": "^9.0.4" + "@types/uuid": "^9.0.4", + "@powersync/service-core-tests": "workspace:*", + "@powersync/service-module-mongodb-storage": "workspace:*" } } diff --git a/modules/module-postgres/src/index.ts b/modules/module-postgres/src/index.ts index 3b0d8719..ec110750 100644 --- a/modules/module-postgres/src/index.ts +++ b/modules/module-postgres/src/index.ts @@ -1 +1,3 @@ export * from './module/PostgresModule.js'; + +export * as pg_utils from './utils/pgwire_utils.js'; diff --git a/modules/module-postgres/src/module/PostgresModule.ts b/modules/module-postgres/src/module/PostgresModule.ts index 53e79e54..734b23c0 100644 --- a/modules/module-postgres/src/module/PostgresModule.ts +++ b/modules/module-postgres/src/module/PostgresModule.ts @@ -6,10 +6,10 @@ import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactor import { PgManager } from '../replication/PgManager.js'; import { PostgresErrorRateLimiter } from '../replication/PostgresErrorRateLimiter.js'; import { checkSourceConfiguration, cleanUpReplicationSlot } from '../replication/replication-utils.js'; +import { PUBLICATION_NAME } from '../replication/WalStream.js'; import { WalStreamReplicator } from '../replication/WalStreamReplicator.js'; import * as types from '../types/types.js'; import { PostgresConnectionConfig } from '../types/types.js'; -import { PUBLICATION_NAME } from '../replication/WalStream.js'; export class PostgresModule extends replication.ReplicationModule { constructor() { diff --git a/modules/module-postgres/src/types/types.ts b/modules/module-postgres/src/types/types.ts index 6ab1c419..3629a8cb 100644 --- a/modules/module-postgres/src/types/types.ts +++ b/modules/module-postgres/src/types/types.ts @@ -133,6 +133,12 @@ export function normalizeConnectionConfig(options: PostgresConnectionConfig): No }; } +export function isPostgresConfig( + config: service_types.configFile.DataSourceConfig +): config is PostgresConnectionConfig { + return config.type == POSTGRES_CONNECTION_TYPE; +} + /** * Check whether the port is in a "safe" range. * diff --git a/modules/module-postgres/test/src/env.ts b/modules/module-postgres/test/src/env.ts index fa8f76ca..214b75ca 100644 --- a/modules/module-postgres/test/src/env.ts +++ b/modules/module-postgres/test/src/env.ts @@ -2,6 +2,7 @@ import { utils } from '@powersync/lib-services-framework'; export const env = utils.collectEnvironmentVariables({ PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'), + MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), CI: utils.type.boolean.default('false'), SLOW_TESTS: utils.type.boolean.default('false') }); diff --git a/modules/module-postgres/test/src/large_batch.test.ts b/modules/module-postgres/test/src/large_batch.test.ts index bc08083a..4d49a259 100644 --- a/modules/module-postgres/test/src/large_batch.test.ts +++ b/modules/module-postgres/test/src/large_batch.test.ts @@ -1,17 +1,16 @@ -import { MONGO_STORAGE_FACTORY, StorageFactory, StorageOptions } from '@core-tests/util.js'; +import { Metrics, storage } from '@powersync/service-core'; +import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; import { populateData } from '../../dist/utils/populate_test_data.js'; import { env } from './env.js'; -import { TEST_CONNECTION_OPTIONS } from './util.js'; +import { INITIALIZED_MONGO_STORAGE_FACTORY, TEST_CONNECTION_OPTIONS } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; -import * as timers from 'timers/promises'; -import { Metrics } from '@powersync/service-core'; describe('batch replication tests - mongodb', { timeout: 120_000 }, function () { // These are slow but consistent tests. // Not run on every test run, but we do run on CI, or when manually debugging issues. if (env.CI || env.SLOW_TESTS) { - defineBatchTests(MONGO_STORAGE_FACTORY); + defineBatchTests(INITIALIZED_MONGO_STORAGE_FACTORY); } else { // Need something in this file. test('no-op', () => {}); @@ -23,7 +22,7 @@ const BASIC_SYNC_RULES = `bucket_definitions: data: - SELECT id, description, other FROM "test_data"`; -function defineBatchTests(factory: StorageFactory) { +function defineBatchTests(factory: storage.TestStorageFactory) { test('update large record', async () => { await using context = await WalStreamTestContext.open(factory); // This test generates a large transaction in MongoDB, despite the replicated data diff --git a/modules/module-postgres/test/src/schema_changes.test.ts b/modules/module-postgres/test/src/schema_changes.test.ts index daeffd6c..c6bb3de2 100644 --- a/modules/module-postgres/test/src/schema_changes.test.ts +++ b/modules/module-postgres/test/src/schema_changes.test.ts @@ -1,8 +1,9 @@ -import { compareIds, putOp, removeOp } from '@core-tests/stream_utils.js'; -import { reduceBucket } from '@powersync/service-core'; -import { setTimeout } from 'node:timers/promises'; +import { compareIds, putOp, reduceBucket, removeOp, test_utils } from '@powersync/service-core-tests'; +import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; -import { INITIALIZED_MONGO_STORAGE_FACTORY, StorageFactory } from './util.js'; + +import { storage } from '@powersync/service-core'; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; describe('schema changes', { timeout: 20_000 }, function () { @@ -16,14 +17,14 @@ bucket_definitions: - SELECT id, * FROM "test_data" `; -const PUT_T1 = putOp('test_data', { id: 't1', description: 'test1' }); -const PUT_T2 = putOp('test_data', { id: 't2', description: 'test2' }); -const PUT_T3 = putOp('test_data', { id: 't3', description: 'test3' }); +const PUT_T1 = test_utils.putOp('test_data', { id: 't1', description: 'test1' }); +const PUT_T2 = test_utils.putOp('test_data', { id: 't2', description: 'test2' }); +const PUT_T3 = test_utils.putOp('test_data', { id: 't3', description: 'test3' }); -const REMOVE_T1 = removeOp('test_data', 't1'); -const REMOVE_T2 = removeOp('test_data', 't2'); +const REMOVE_T1 = test_utils.removeOp('test_data', 't1'); +const REMOVE_T2 = test_utils.removeOp('test_data', 't2'); -function defineTests(factory: StorageFactory) { +function defineTests(factory: storage.TestStorageFactory) { test('re-create table', async () => { await using context = await WalStreamTestContext.open(factory); @@ -544,7 +545,7 @@ function defineTests(factory: StorageFactory) { ); // Need some delay for the snapshot to be triggered - await setTimeout(5); + await timers.setTimeout(5); let stop = false; diff --git a/modules/module-postgres/test/src/setup.ts b/modules/module-postgres/test/src/setup.ts index b924cf73..debe6601 100644 --- a/modules/module-postgres/test/src/setup.ts +++ b/modules/module-postgres/test/src/setup.ts @@ -1,7 +1,9 @@ import { container } from '@powersync/lib-services-framework'; +import { test_utils } from '@powersync/service-core-tests'; import { beforeAll } from 'vitest'; -beforeAll(() => { +beforeAll(async () => { // Executes for every test file container.registerDefaults(); + await test_utils.initMetrics(); }); diff --git a/modules/module-postgres/test/src/slow_tests.test.ts b/modules/module-postgres/test/src/slow_tests.test.ts index df522f81..fd02ed90 100644 --- a/modules/module-postgres/test/src/slow_tests.test.ts +++ b/modules/module-postgres/test/src/slow_tests.test.ts @@ -2,30 +2,35 @@ import * as bson from 'bson'; import { afterEach, describe, expect, test } from 'vitest'; import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js'; import { env } from './env.js'; -import { clearTestDb, connectPgPool, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; +import { + clearTestDb, + connectPgPool, + getClientCheckpoint, + INITIALIZED_MONGO_STORAGE_FACTORY, + TEST_CONNECTION_OPTIONS +} from './util.js'; import * as pgwire from '@powersync/service-jpgwire'; import { SqliteRow } from '@powersync/service-sync-rules'; -import { mapOpEntry, MongoBucketStorage } from '@/storage/storage-index.js'; -import { validateCompactedBucket } from '@core-tests/bucket_validation.js'; -import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js'; import { PgManager } from '@module/replication/PgManager.js'; +import { storage } from '@powersync/service-core'; +import { test_utils } from '@powersync/service-core-tests'; +import * as mongo_storage from '@powersync/service-module-mongodb-storage'; import * as timers from 'node:timers/promises'; -import { reduceBucket } from '@powersync/service-core'; describe('slow tests - mongodb', function () { // These are slow, inconsistent tests. // Not run on every test run, but we do run on CI, or when manually debugging issues. if (env.CI || env.SLOW_TESTS) { - defineSlowTests(MONGO_STORAGE_FACTORY); + defineSlowTests(INITIALIZED_MONGO_STORAGE_FACTORY); } else { // Need something in this file. test('no-op', () => {}); } }); -function defineSlowTests(factory: StorageFactory) { +function defineSlowTests(factory: storage.TestStorageFactory) { let walStream: WalStream | undefined; let connections: PgManager | undefined; let abortController: AbortController | undefined; @@ -74,7 +79,7 @@ function defineSlowTests(factory: StorageFactory) { const replicationConnection = await connections.replicationConnection(); const pool = connections.pool; await clearTestDb(pool); - const f = (await factory()) as MongoBucketStorage; + const f = (await factory()) as mongo_storage.storage.MongoBucketStorage; const syncRuleContent = ` bucket_definitions: @@ -171,13 +176,13 @@ bucket_definitions: const checkpoint = BigInt((await storage.getCheckpoint()).checkpoint); const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray()) .filter((row) => row._id.o <= checkpoint) - .map(mapOpEntry); + .map(mongo_storage.storage.mapOpEntry); await storage.compact({ maxOpId: checkpoint }); const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray()) .filter((row) => row._id.o <= checkpoint) - .map(mapOpEntry); + .map(mongo_storage.storage.mapOpEntry); - validateCompactedBucket(opsBefore, opsAfter); + test_utils.validateCompactedBucket(opsBefore, opsAfter); } }; @@ -202,8 +207,8 @@ bucket_definitions: const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray(); // All a single bucket in this test - const bucket = ops.map((op) => mapOpEntry(op)); - const reduced = reduceBucket(bucket); + const bucket = ops.map((op) => mongo_storage.storage.mapOpEntry(op)); + const reduced = test_utils.reduceBucket(bucket); expect(reduced).toMatchObject([ { op_id: '0', diff --git a/modules/module-postgres/test/src/util.ts b/modules/module-postgres/test/src/util.ts index 1fb0a658..7499dfd1 100644 --- a/modules/module-postgres/test/src/util.ts +++ b/modules/module-postgres/test/src/util.ts @@ -1,46 +1,25 @@ -import { connectMongo } from '@core-tests/util.js'; +import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js'; import * as types from '@module/types/types.js'; import * as pg_utils from '@module/utils/pgwire_utils.js'; import { logger } from '@powersync/lib-services-framework'; -import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core'; +import { BucketStorageFactory, OpId } from '@powersync/service-core'; import * as pgwire from '@powersync/service-jpgwire'; -import { pgwireRows } from '@powersync/service-jpgwire'; +import * as mongo_storage from '@powersync/service-module-mongodb-storage'; import { env } from './env.js'; -import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js'; - -// The metrics need to be initialized before they can be used -await Metrics.initialise({ - disable_telemetry_sharing: true, - powersync_instance_id: 'test', - internal_metrics_endpoint: 'unused.for.tests.com' -}); -Metrics.getInstance().resetCounters(); export const TEST_URI = env.PG_TEST_URL; +export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageFactoryGenerator({ + url: env.MONGO_TEST_URL, + isCI: env.CI +}); + export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ type: 'postgresql', uri: TEST_URI, sslmode: 'disable' }); -export type StorageFactory = () => Promise; - -export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => { - const db = await connectMongo(); - - // None of the PG tests insert data into this collection, so it was never created - if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { - await db.db.createCollection('bucket_parameters'); - } - - await db.clear(); - - return new MongoBucketStorage(db, { - slot_name_prefix: 'test_' - }); -}; - export async function clearTestDb(db: pgwire.PgClient) { await db.query( "select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'" diff --git a/modules/module-postgres/test/src/validation.test.ts b/modules/module-postgres/test/src/validation.test.ts index f4eb9069..c97b3c95 100644 --- a/modules/module-postgres/test/src/validation.test.ts +++ b/modules/module-postgres/test/src/validation.test.ts @@ -1,10 +1,12 @@ -import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; import { getDebugTablesInfo } from '@module/replication/replication-utils.js'; import { expect, test } from 'vitest'; + +// Not quite a walStreamTest, but it helps to manage the connection +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; test('validate tables', async () => { - await using context = await WalStreamTestContext.open(MONGO_STORAGE_FACTORY); + await using context = await WalStreamTestContext.open(INITIALIZED_MONGO_STORAGE_FACTORY); const { pool } = context; await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`); diff --git a/modules/module-postgres/test/src/wal_stream.test.ts b/modules/module-postgres/test/src/wal_stream.test.ts index 0398f1da..f444696b 100644 --- a/modules/module-postgres/test/src/wal_stream.test.ts +++ b/modules/module-postgres/test/src/wal_stream.test.ts @@ -1,13 +1,11 @@ -import { putOp, removeOp } from '@core-tests/stream_utils.js'; -import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; -import { BucketStorageFactory, Metrics } from '@powersync/service-core'; +import { MissingReplicationSlotError } from '@module/replication/WalStream.js'; +import { Metrics, storage } from '@powersync/service-core'; +import { putOp, removeOp } from '@powersync/service-core-tests'; import { pgwireRows } from '@powersync/service-jpgwire'; import * as crypto from 'crypto'; import { describe, expect, test } from 'vitest'; +import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js'; import { WalStreamTestContext } from './wal_stream_utils.js'; -import { MissingReplicationSlotError } from '@module/replication/WalStream.js'; - -type StorageFactory = () => Promise; const BASIC_SYNC_RULES = ` bucket_definitions: @@ -17,10 +15,10 @@ bucket_definitions: `; describe('wal stream - mongodb', { timeout: 20_000 }, function () { - defineWalStreamTests(MONGO_STORAGE_FACTORY); + defineWalStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY); }); -function defineWalStreamTests(factory: StorageFactory) { +function defineWalStreamTests(factory: storage.TestStorageFactory) { test('replicating basic values', async () => { await using context = await WalStreamTestContext.open(factory); const { pool } = context; diff --git a/modules/module-postgres/test/src/wal_stream_utils.ts b/modules/module-postgres/test/src/wal_stream_utils.ts index 3cde14cf..ae549b70 100644 --- a/modules/module-postgres/test/src/wal_stream_utils.ts +++ b/modules/module-postgres/test/src/wal_stream_utils.ts @@ -1,10 +1,9 @@ -import { fromAsync } from '@core-tests/stream_utils.js'; import { PgManager } from '@module/replication/PgManager.js'; import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js'; -import { BucketStorageFactory, OplogEntry, SyncRulesBucketStorage } from '@powersync/service-core'; +import { BucketStorageFactory, OplogEntry, storage, SyncRulesBucketStorage } from '@powersync/service-core'; +import { test_utils } from '@powersync/service-core-tests'; import * as pgwire from '@powersync/service-jpgwire'; import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; -import { StorageOptions } from '@core-tests/util.js'; export class WalStreamTestContext implements AsyncDisposable { private _walStream?: WalStream; @@ -20,7 +19,7 @@ export class WalStreamTestContext implements AsyncDisposable { * This configures all the context, and tears it down afterwards. */ static async open( - factory: (options: StorageOptions) => Promise, + factory: (options: storage.TestStorageOptions) => Promise, options?: { doNotClear?: boolean } ) { const f = await factory({ doNotClear: options?.doNotClear }); @@ -132,7 +131,7 @@ export class WalStreamTestContext implements AsyncDisposable { async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { let checkpoint = await this.getCheckpoint(options); const map = new Map(Object.entries(buckets)); - return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); } /** @@ -146,7 +145,7 @@ export class WalStreamTestContext implements AsyncDisposable { while (true) { const batch = this.storage!.getBucketDataBatch(checkpoint, map); - const batches = await fromAsync(batch); + const batches = await test_utils.fromAsync(batch); data = data.concat(batches[0]?.batch.data ?? []); if (batches.length == 0 || !batches[0]!.batch.has_more) { break; @@ -164,7 +163,7 @@ export class WalStreamTestContext implements AsyncDisposable { const { checkpoint } = await this.storage!.getCheckpoint(); const map = new Map([[bucket, start]]); const batch = this.storage!.getBucketDataBatch(checkpoint, map); - const batches = await fromAsync(batch); + const batches = await test_utils.fromAsync(batch); return batches[0]?.batch.data ?? []; } } diff --git a/package.json b/package.json index 1846299d..a0ff8792 100644 --- a/package.json +++ b/package.json @@ -24,7 +24,7 @@ "@changesets/cli": "^2.27.8", "@types/node": "^22.5.5", "async": "^3.2.4", - "bson": "^6.6.0", + "bson": "^6.8.0", "concurrently": "^8.2.2", "inquirer": "^9.2.7", "npm-check-updates": "^17.1.2", diff --git a/packages/rsocket-router/package.json b/packages/rsocket-router/package.json index 91a57646..fc58788e 100644 --- a/packages/rsocket-router/package.json +++ b/packages/rsocket-router/package.json @@ -20,14 +20,14 @@ "dependencies": { "@powersync/lib-services-framework": "workspace:*", "rsocket-core": "1.0.0-alpha.3", - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uuid": "^9.0.1", "ws": "^8.17.0" }, "devDependencies": { "@types/uuid": "^9.0.4", "@types/ws": "~8.2.0", - "bson": "^6.6.0", + "bson": "^6.8.0", "rsocket-websocket-client": "1.0.0-alpha.3" } } diff --git a/packages/service-core-tests/LICENSE b/packages/service-core-tests/LICENSE new file mode 100644 index 00000000..c8efd46c --- /dev/null +++ b/packages/service-core-tests/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, Apache 2.0 Future License + +## Abbreviation + +FSL-1.1-Apache-2.0 + +## Notice + +Copyright 2023-2024 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/packages/service-core-tests/README.md b/packages/service-core-tests/README.md new file mode 100644 index 00000000..bc376c69 --- /dev/null +++ b/packages/service-core-tests/README.md @@ -0,0 +1,5 @@ +# PowerSync Service Core Tests + +A small helper package which exposes common unit tests and test utility functions. + +This package is used in various modules for their unit tests. \ No newline at end of file diff --git a/packages/service-core-tests/package.json b/packages/service-core-tests/package.json new file mode 100644 index 00000000..9bd23f12 --- /dev/null +++ b/packages/service-core-tests/package.json @@ -0,0 +1,28 @@ +{ + "name": "@powersync/service-core-tests", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "publishConfig": { + "access": "public" + }, + "version": "0.1.0", + "main": "dist/index.js", + "license": "FSL-1.1-Apache-2.0", + "type": "module", + "scripts": { + "build": "tsc -b", + "clean": "rm -rf ./dist && tsc -b --clean" + }, + "dependencies": { + "@powersync/service-core": "workspace:^", + "@powersync/service-jsonbig": "workspace:^", + "@powersync/service-sync-rules": "workspace:^" + }, + "peerDependencies": { + "vite-tsconfig-paths": "^4.3.2", + "vitest": "^2.1.1" + }, + "devDependencies": { + "typescript": "^5.6.2" + } +} diff --git a/packages/service-core-tests/src/index.ts b/packages/service-core-tests/src/index.ts new file mode 100644 index 00000000..099d18e2 --- /dev/null +++ b/packages/service-core-tests/src/index.ts @@ -0,0 +1,5 @@ +export * from './test-utils/test-utils-index.js'; +export * as test_utils from './test-utils/test-utils-index.js'; + +export * from './tests/tests-index.js'; +export * as register from './tests/tests-index.js'; diff --git a/packages/service-core-tests/src/test-utils/bucket-validation.ts b/packages/service-core-tests/src/test-utils/bucket-validation.ts new file mode 100644 index 00000000..be4b1329 --- /dev/null +++ b/packages/service-core-tests/src/test-utils/bucket-validation.ts @@ -0,0 +1,120 @@ +import { utils } from '@powersync/service-core'; +import { expect } from 'vitest'; + +/** + * Reduce a bucket to the final state as stored on the client. + * + * This keeps the final state for each row as a PUT operation. + * + * All other operations are replaced with a single CLEAR operation, + * summing their checksums, and using a 0 as an op_id. + * + * This is the function $r(B)$, as described in /docs/bucket-properties.md. + */ +export function reduceBucket(operations: utils.OplogEntry[]) { + let rowState = new Map(); + let otherChecksum = 0; + + for (let op of operations) { + const key = rowKey(op); + if (op.op == 'PUT') { + const existing = rowState.get(key); + if (existing) { + otherChecksum = utils.addChecksums(otherChecksum, existing.checksum as number); + } + rowState.set(key, op); + } else if (op.op == 'REMOVE') { + const existing = rowState.get(key); + if (existing) { + otherChecksum = utils.addChecksums(otherChecksum, existing.checksum as number); + } + rowState.delete(key); + otherChecksum = utils.addChecksums(otherChecksum, op.checksum as number); + } else if (op.op == 'CLEAR') { + rowState.clear(); + otherChecksum = op.checksum as number; + } else if (op.op == 'MOVE') { + otherChecksum = utils.addChecksums(otherChecksum, op.checksum as number); + } else { + throw new Error(`Unknown operation ${op.op}`); + } + } + + const puts = [...rowState.values()].sort((a, b) => { + return Number(BigInt(a.op_id) - BigInt(b.op_id)); + }); + + let finalState: utils.OplogEntry[] = [ + // Special operation to indiciate the checksum remainder + { op_id: '0', op: 'CLEAR', checksum: otherChecksum }, + ...puts + ]; + + return finalState; +} + +function rowKey(entry: utils.OplogEntry) { + return `${entry.object_type}/${entry.object_id}/${entry.subkey}`; +} + +/** +import { OplogEntry } from '@/util/protocol-types.js'; +import { reduceBucket } from '@/util/utils.js'; +import { expect } from 'vitest'; + +/** + * Validate this property, as described in /docs/bucket-properties.md: + * + * $r(B_{[..id_n]}) = r(r(B_{[..id_i]}) \cup B_{[id_{i+1}..id_n]}) \;\forall\; i \in [1..n]$ + * + * We test that a client syncing the entire bucket in one go (left side of the equation), + * ends up with the same result as another client syncing up to operation id_i, then sync + * the rest. + */ +export function validateBucket(bucket: utils.OplogEntry[]) { + const r1 = reduceBucket(bucket); + for (let i = 0; i <= bucket.length; i++) { + const r2 = reduceBucket(bucket.slice(0, i + 1)); + const b3 = bucket.slice(i + 1); + const r3 = r2.concat(b3); + const r4 = reduceBucket(r3); + expect(r4).toEqual(r1); + } + + // This is the same check, just implemented differently + validateCompactedBucket(bucket, bucket); +} + +/** + * Validate these properties for a bucket $B$ and its compacted version $B'$,: + * as described in /docs/bucket-properties.md: + * + * 1. $r(B) = r(B')$ + * 2. $r(B_{[..c]}) = r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) \;\forall\; c_i \in B$ + * + * The first one is that the result of syncing the original bucket is the same as + * syncing the compacted bucket. + * + * The second property is that result of syncing the entire original bucket, is the same + * as syncing any partial version of that (up to op $c_i$), and then continue syncing + * using the compacted bucket. + */ +export function validateCompactedBucket(bucket: utils.OplogEntry[], compacted: utils.OplogEntry[]) { + // r(B_{[..c]}) + const r1 = reduceBucket(bucket); + // r(B) = r(B') + expect(reduceBucket(compacted)).toEqual(r1); + + for (let i = 0; i < bucket.length; i++) { + // r(B_{[..c_i]}) + const r2 = reduceBucket(bucket.slice(0, i + 1)); + const c_i = BigInt(bucket[i].op_id); + // B'_{[c_i+1..c]} + const b3 = compacted.filter((op) => BigInt(op.op_id) > c_i); + // r(B_{[..c_i]}) \cup B'_{[c_i+1..c]} + const r3 = r2.concat(b3); + // r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) + const r4 = reduceBucket(r3); + expect(r4).toEqual(r1); + } +} diff --git a/packages/service-core-tests/src/test-utils/general-utils.ts b/packages/service-core-tests/src/test-utils/general-utils.ts new file mode 100644 index 00000000..7cbcf577 --- /dev/null +++ b/packages/service-core-tests/src/test-utils/general-utils.ts @@ -0,0 +1,102 @@ +import { storage, utils } from '@powersync/service-core'; +import { SqlSyncRules } from '@powersync/service-sync-rules'; +import * as bson from 'bson'; + +export const ZERO_LSN = '0/0'; + +export const PARSE_OPTIONS: storage.ParseSyncRulesOptions = { + defaultSchema: 'public' +}; + +export const BATCH_OPTIONS: storage.StartBatchOptions = { + ...PARSE_OPTIONS, + zeroLSN: ZERO_LSN, + storeCurrentData: true +}; + +export function testRules(content: string): storage.PersistedSyncRulesContent { + return { + id: 1, + sync_rules_content: content, + slot_name: 'test', + parsed(options) { + return { + id: 1, + sync_rules: SqlSyncRules.fromYaml(content, options), + slot_name: 'test' + }; + }, + lock() { + throw new Error('Not implemented'); + } + }; +} + +export function makeTestTable(name: string, columns?: string[] | undefined) { + const relId = utils.hashData('table', name, (columns ?? ['id']).join(',')); + const id = new bson.ObjectId('6544e3899293153fa7b38331'); + return new storage.SourceTable( + id, + storage.SourceTable.DEFAULT_TAG, + relId, + 'public', + name, + (columns ?? ['id']).map((column) => ({ name: column, type: 'VARCHAR', typeId: 25 })), + true + ); +} + +export function getBatchData( + batch: utils.SyncBucketData[] | storage.SyncBucketDataBatch[] | storage.SyncBucketDataBatch +) { + const first = getFirst(batch); + if (first == null) { + return []; + } + return first.data.map((d) => { + return { + op_id: d.op_id, + op: d.op, + object_id: d.object_id, + checksum: d.checksum + }; + }); +} + +export function getBatchMeta( + batch: utils.SyncBucketData[] | storage.SyncBucketDataBatch[] | storage.SyncBucketDataBatch +) { + const first = getFirst(batch); + if (first == null) { + return null; + } + return { + has_more: first.has_more, + after: first.after, + next_after: first.next_after + }; +} + +function getFirst( + batch: utils.SyncBucketData[] | storage.SyncBucketDataBatch[] | storage.SyncBucketDataBatch +): utils.SyncBucketData | null { + if (!Array.isArray(batch)) { + return batch.batch; + } + if (batch.length == 0) { + return null; + } + let first = batch[0]; + if ((first as storage.SyncBucketDataBatch).batch != null) { + return (first as storage.SyncBucketDataBatch).batch; + } else { + return first as utils.SyncBucketData; + } +} + +/** + * Replica id in the old Postgres format, for backwards-compatible tests. + */ +export function rid(id: string): bson.UUID { + return utils.getUuidReplicaIdentityBson({ id: id }, [{ name: 'id', type: 'VARCHAR', typeId: 25 }]); +} diff --git a/packages/service-core-tests/src/test-utils/metrics-utils.ts b/packages/service-core-tests/src/test-utils/metrics-utils.ts new file mode 100644 index 00000000..9fe704d7 --- /dev/null +++ b/packages/service-core-tests/src/test-utils/metrics-utils.ts @@ -0,0 +1,10 @@ +import { Metrics } from '@powersync/service-core'; + +export const initMetrics = async () => { + await Metrics.initialise({ + disable_telemetry_sharing: true, + powersync_instance_id: 'test', + internal_metrics_endpoint: 'unused.for.tests.com' + }); + Metrics.getInstance().resetCounters(); +}; diff --git a/packages/service-core/test/src/stream_utils.ts b/packages/service-core-tests/src/test-utils/stream_utils.ts similarity index 78% rename from packages/service-core/test/src/stream_utils.ts rename to packages/service-core-tests/src/test-utils/stream_utils.ts index bcbc9168..7ccf5889 100644 --- a/packages/service-core/test/src/stream_utils.ts +++ b/packages/service-core-tests/src/test-utils/stream_utils.ts @@ -1,7 +1,7 @@ -import { OplogEntry } from '@/util/protocol-types.js'; +import { utils } from '@powersync/service-core'; import { JSONBig } from '@powersync/service-jsonbig'; -export function putOp(table: string, data: Record): Partial { +export function putOp(table: string, data: Record): Partial { return { op: 'PUT', object_type: table, @@ -10,7 +10,7 @@ export function putOp(table: string, data: Record): Partial { +export function removeOp(table: string, id: string): Partial { return { op: 'REMOVE', object_type: table, @@ -18,7 +18,7 @@ export function removeOp(table: string, id: string): Partial { }; } -export function compareIds(a: OplogEntry, b: OplogEntry) { +export function compareIds(a: utils.OplogEntry, b: utils.OplogEntry) { return a.object_id!.localeCompare(b.object_id!); } diff --git a/packages/service-core-tests/src/test-utils/test-utils-index.ts b/packages/service-core-tests/src/test-utils/test-utils-index.ts new file mode 100644 index 00000000..1e4cbea0 --- /dev/null +++ b/packages/service-core-tests/src/test-utils/test-utils-index.ts @@ -0,0 +1,4 @@ +export * from './bucket-validation.js'; +export * from './general-utils.js'; +export * from './metrics-utils.js'; +export * from './stream_utils.js'; diff --git a/packages/service-core/test/src/bucket_validation.test.ts b/packages/service-core-tests/src/tests/register-bucket-validation-tests.ts similarity index 77% rename from packages/service-core/test/src/bucket_validation.test.ts rename to packages/service-core-tests/src/tests/register-bucket-validation-tests.ts index 31b16747..52971835 100644 --- a/packages/service-core/test/src/bucket_validation.test.ts +++ b/packages/service-core-tests/src/tests/register-bucket-validation-tests.ts @@ -1,12 +1,17 @@ -import { OplogEntry } from '@/util/protocol-types.js'; -import { describe, expect, test } from 'vitest'; -import { validateBucket } from './bucket_validation.js'; -import { reduceBucket } from '@/index.js'; +import { OplogEntry } from '@powersync/service-core'; +import { expect, test } from 'vitest'; +import * as test_utils from '../test-utils/test-utils-index.js'; -// This tests the reduceBucket function. -// While this function is not used directly in the service implementation, -// it is an important part of validating consistency in other tests. -describe('bucket validation', () => { +/** + * This tests the reduceBucket function. + * While this function is not used directly in the service implementation, + * it is an important part of validating consistency in other tests. + * @example + * ```TypeScript + * describe('bucket validation', registerBucketValidationTests); + * ``` + */ +export function registerBucketValidationTests() { const ops1: OplogEntry[] = [ { op_id: '1', @@ -47,7 +52,7 @@ describe('bucket validation', () => { ]; test('reduce 1', () => { - expect(reduceBucket(ops1)).toEqual([ + expect(test_utils.reduceBucket(ops1)).toEqual([ { checksum: -1778190028, op: 'CLEAR', @@ -64,7 +69,7 @@ describe('bucket validation', () => { } ]); - expect(reduceBucket(reduceBucket(ops1))).toEqual([ + expect(test_utils.reduceBucket(test_utils.reduceBucket(ops1))).toEqual([ { checksum: -1778190028, op: 'CLEAR', @@ -81,7 +86,7 @@ describe('bucket validation', () => { } ]); - validateBucket(ops1); + test_utils.validateBucket(ops1); }); test('reduce 2', () => { @@ -104,7 +109,7 @@ describe('bucket validation', () => { } ]; - expect(reduceBucket(bucket)).toEqual([ + expect(test_utils.reduceBucket(bucket)).toEqual([ { checksum: 93784613, op: 'CLEAR', @@ -121,7 +126,7 @@ describe('bucket validation', () => { } ]); - expect(reduceBucket(reduceBucket(bucket))).toEqual([ + expect(test_utils.reduceBucket(test_utils.reduceBucket(bucket))).toEqual([ { checksum: 93784613, op: 'CLEAR', @@ -138,6 +143,6 @@ describe('bucket validation', () => { } ]); - validateBucket(bucket); + test_utils.validateBucket(bucket); }); -}); +} diff --git a/packages/service-core/test/src/compacting.test.ts b/packages/service-core-tests/src/tests/register-compacting-tests.ts similarity index 50% rename from packages/service-core/test/src/compacting.test.ts rename to packages/service-core-tests/src/tests/register-compacting-tests.ts index b5caf72a..e7ee55ab 100644 --- a/packages/service-core/test/src/compacting.test.ts +++ b/packages/service-core-tests/src/tests/register-compacting-tests.ts @@ -1,65 +1,70 @@ -import { SaveOperationTag } from '@/storage/BucketStorage.js'; -import { MongoCompactOptions } from '@/storage/mongo/MongoCompactor.js'; -import { describe, expect, test } from 'vitest'; -import { validateCompactedBucket } from './bucket_validation.js'; -import { oneFromAsync } from './stream_utils.js'; -import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, rid, testRules } from './util.js'; - -const TEST_TABLE = makeTestTable('test', ['id']); - -// Test with the default options - large batch sizes -describe('compacting buckets - default options', () => compactTests({})); - -// Also test with the miniumum batch sizes, forcing usage of multiple batches internally -describe('compacting buckets - batched', () => - compactTests({ clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 })); - -function compactTests(compactOptions: MongoCompactOptions) { - const factory = MONGO_STORAGE_FACTORY; - +import { storage } from '@powersync/service-core'; +import { expect, test } from 'vitest'; +import * as test_utils from '../test-utils/test-utils-index.js'; + +const TEST_TABLE = test_utils.makeTestTable('test', ['id']); + +/** + * @example + * ```TypeScript + * // Test with the default options - large batch sizes + * describe('compacting buckets - default options', () => registerCompactTests(() => new MongoStorageFactory(), {})); + * + * // Also test with the miniumum batch sizes, forcing usage of multiple batches internally + * describe('compacting buckets - batched', () => + * compactTests(() => new MongoStorageFactory(), { clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 })); + * ``` + */ +export function registerCompactTests( + generateStorageFactory: storage.TestStorageFactory, + compactOptions: CompactOptions +) { test('compacting (1)', async () => { - const sync_rules = testRules(` + const sync_rules = test_utils.testRules(` bucket_definitions: global: data: [select * from test] `); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1' }, - afterReplicaId: rid('t1') + afterReplicaId: test_utils.rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2' }, - afterReplicaId: rid('t2') + afterReplicaId: test_utils.rid('t2') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 't2' }, - afterReplicaId: rid('t2') + afterReplicaId: test_utils.rid('t2') }); }); const checkpoint = result!.flushed_op; - const batchBefore = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batchBefore = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const dataBefore = batchBefore.batch.data; - const checksumBefore = await storage.getChecksums(checkpoint, ['global[]']); + const checksumBefore = await bucketStorage.getChecksums(checkpoint, ['global[]']); expect(dataBefore).toMatchObject([ { @@ -82,11 +87,13 @@ bucket_definitions: } ]); - await storage.compact(compactOptions); + await bucketStorage.compact(compactOptions); - const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batchAfter = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const dataAfter = batchAfter.batch.data; - const checksumAfter = await storage.getChecksums(checkpoint, ['global[]']); + const checksumAfter = await bucketStorage.getChecksums(checkpoint, ['global[]']); expect(batchAfter.targetOp).toEqual(3n); expect(dataAfter).toMatchObject([ @@ -111,61 +118,64 @@ bucket_definitions: expect(checksumBefore.get('global[]')).toEqual(checksumAfter.get('global[]')); - validateCompactedBucket(dataBefore, dataAfter); + test_utils.validateCompactedBucket(dataBefore, dataAfter); }); test('compacting (2)', async () => { - const sync_rules = testRules(` + const sync_rules = test_utils.testRules(` bucket_definitions: global: data: [select * from test] `); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1' }, - afterReplicaId: rid('t1') + afterReplicaId: test_utils.rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2' }, - afterReplicaId: rid('t2') + afterReplicaId: test_utils.rid('t2') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.DELETE, + tag: storage.SaveOperationTag.DELETE, before: { id: 't1' }, - beforeReplicaId: rid('t1') + beforeReplicaId: test_utils.rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 't2' }, - afterReplicaId: rid('t2') + afterReplicaId: test_utils.rid('t2') }); }); const checkpoint = result!.flushed_op; - const batchBefore = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batchBefore = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const dataBefore = batchBefore.batch.data; - const checksumBefore = await storage.getChecksums(checkpoint, ['global[]']); + const checksumBefore = await bucketStorage.getChecksums(checkpoint, ['global[]']); expect(dataBefore).toMatchObject([ { @@ -194,11 +204,13 @@ bucket_definitions: } ]); - await storage.compact(compactOptions); + await bucketStorage.compact(compactOptions); - const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batchAfter = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const dataAfter = batchAfter.batch.data; - const checksumAfter = await storage.getChecksums(checkpoint, ['global[]']); + const checksumAfter = await bucketStorage.getChecksums(checkpoint, ['global[]']); expect(batchAfter.targetOp).toEqual(4n); expect(dataAfter).toMatchObject([ @@ -216,22 +228,23 @@ bucket_definitions: ]); expect(checksumBefore.get('global[]')).toEqual(checksumAfter.get('global[]')); - validateCompactedBucket(dataBefore, dataAfter); + test_utils.validateCompactedBucket(dataBefore, dataAfter); }); test('compacting (3)', async () => { - const sync_rules = testRules(` + const sync_rules = test_utils.testRules(` bucket_definitions: global: data: [select * from test] `); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1' }, @@ -240,7 +253,7 @@ bucket_definitions: await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2' }, @@ -249,7 +262,7 @@ bucket_definitions: await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.DELETE, + tag: storage.SaveOperationTag.DELETE, before: { id: 't1' }, @@ -258,12 +271,12 @@ bucket_definitions: }); const checkpoint1 = result!.flushed_op; - const checksumBefore = await storage.getChecksums(checkpoint1, ['global[]']); + const checksumBefore = await bucketStorage.getChecksums(checkpoint1, ['global[]']); - const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.DELETE, + tag: storage.SaveOperationTag.DELETE, before: { id: 't2' }, @@ -272,11 +285,13 @@ bucket_definitions: }); const checkpoint2 = result2!.flushed_op; - await storage.compact(compactOptions); + await bucketStorage.compact(compactOptions); - const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint2, new Map([['global[]', '0']]))); + const batchAfter = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', '0']])) + ); const dataAfter = batchAfter.batch.data; - const checksumAfter = await storage.getChecksums(checkpoint2, ['global[]']); + const checksumAfter = await bucketStorage.getChecksums(checkpoint2, ['global[]']); expect(batchAfter.targetOp).toEqual(4n); expect(dataAfter).toMatchObject([ diff --git a/packages/service-core/test/src/data_storage.test.ts b/packages/service-core-tests/src/tests/register-data-storage-tests.ts similarity index 60% rename from packages/service-core/test/src/data_storage.test.ts rename to packages/service-core-tests/src/tests/register-data-storage-tests.ts index 35b3f62c..f0d4d781 100644 --- a/packages/service-core/test/src/data_storage.test.ts +++ b/packages/service-core-tests/src/tests/register-data-storage-tests.ts @@ -1,29 +1,34 @@ -import { BucketDataBatchOptions, SaveOperationTag } from '@/storage/BucketStorage.js'; -import { getUuidReplicaIdentityBson } from '@/util/util-index.js'; +import { getUuidReplicaIdentityBson, OplogEntry, storage } from '@powersync/service-core'; import { RequestParameters } from '@powersync/service-sync-rules'; -import { describe, expect, test } from 'vitest'; -import { fromAsync, oneFromAsync } from './stream_utils.js'; -import { - BATCH_OPTIONS, - getBatchData, - getBatchMeta, - makeTestTable, - MONGO_STORAGE_FACTORY, - PARSE_OPTIONS, - rid, - StorageFactory, - testRules -} from './util.js'; - -const TEST_TABLE = makeTestTable('test', ['id']); - -describe('store - mongodb', function () { - defineDataStorageTests(MONGO_STORAGE_FACTORY); -}); - -function defineDataStorageTests(factory: StorageFactory) { +import { expect, test } from 'vitest'; +import * as test_utils from '../test-utils/test-utils-index.js'; + +export const TEST_TABLE = test_utils.makeTestTable('test', ['id']); + +/** + * Normalize data from OplogEntries for comparison in tests. + * Tests typically expect the stringified result + */ +const normalizeOplogData = (data: OplogEntry['data']) => { + if (data != null && typeof data == 'object') { + return JSON.stringify(data); + } + return data; +}; + +/** + * @example + * ```TypeScript + * + * describe('store - mongodb', function () { + * registerDataStorageTests(MONGO_STORAGE_FACTORY); + * }); + * + * ``` + */ +export function registerDataStorageTests(generateStorageFactory: storage.TestStorageFactory) { test('save and load parameters', async () => { - const sync_rules = testRules(` + const sync_rules = test_utils.testRules(` bucket_definitions: mybucket: parameters: @@ -31,35 +36,36 @@ bucket_definitions: data: [] `); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2', id1: 'user3', id2: 'user4', group_id: 'group2a' }, - afterReplicaId: rid('t2') + afterReplicaId: test_utils.rid('t2') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', id1: 'user1', id2: 'user2', group_id: 'group1a' }, - afterReplicaId: rid('t1') + afterReplicaId: test_utils.rid('t1') }); }); - const parameters = await storage.getParameterSets(result!.flushed_op, [['mybucket', '1', 'user1']]); + const parameters = await bucketStorage.getParameterSets(result!.flushed_op, [['mybucket', '1', 'user1']]); expect(parameters).toEqual([ { group_id: 'group1a' @@ -68,7 +74,7 @@ bucket_definitions: }); test('it should use the latest version', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: mybucket: @@ -78,32 +84,33 @@ bucket_definitions: ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'user1', group_id: 'group1' }, - afterReplicaId: rid('user1') + afterReplicaId: test_utils.rid('user1') }); }); - const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'user1', group_id: 'group2' }, - afterReplicaId: rid('user1') + afterReplicaId: test_utils.rid('user1') }); }); - const parameters = await storage.getParameterSets(result2!.flushed_op, [['mybucket', '1', 'user1']]); + const parameters = await bucketStorage.getParameterSets(result2!.flushed_op, [['mybucket', '1', 'user1']]); expect(parameters).toEqual([ { group_id: 'group2' @@ -111,7 +118,7 @@ bucket_definitions: ]); // Use the checkpoint to get older data if relevant - const parameters2 = await storage.getParameterSets(result1!.flushed_op, [['mybucket', '1', 'user1']]); + const parameters2 = await bucketStorage.getParameterSets(result1!.flushed_op, [['mybucket', '1', 'user1']]); expect(parameters2).toEqual([ { group_id: 'group1' @@ -120,7 +127,7 @@ bucket_definitions: }); test('it should use the latest version after updates', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: mybucket: @@ -132,49 +139,50 @@ bucket_definitions: ` ); - const storage = (await factory()).getInstance(sync_rules); + await using factory = await generateStorageFactory(); + await using bucketStorage = factory.getInstance(sync_rules); - const table = makeTestTable('todos', ['id', 'list_id']); + const table = test_utils.makeTestTable('todos', ['id', 'list_id']); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // Create two todos which initially belong to different lists await batch.save({ sourceTable: table, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'todo1', list_id: 'list1' }, - afterReplicaId: rid('todo1') + afterReplicaId: test_utils.rid('todo1') }); await batch.save({ sourceTable: table, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'todo2', list_id: 'list2' }, - afterReplicaId: rid('todo2') + afterReplicaId: test_utils.rid('todo2') }); }); - const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // Update the second todo item to now belong to list 1 await batch.save({ sourceTable: table, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'todo2', list_id: 'list1' }, - afterReplicaId: rid('todo2') + afterReplicaId: test_utils.rid('todo2') }); }); // We specifically request the todo_ids for both lists. // There removal operation for the association of `list2`::`todo2` should not interfere with the new // association of `list1`::`todo2` - const parameters = await storage.getParameterSets(BigInt(result2!.flushed_op).toString(), [ + const parameters = await bucketStorage.getParameterSets(BigInt(result2!.flushed_op).toString(), [ ['mybucket', '1', 'list1'], ['mybucket', '1', 'list2'] ]); @@ -190,7 +198,7 @@ bucket_definitions: }); test('save and load parameters with different number types', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: mybucket: @@ -200,12 +208,13 @@ bucket_definitions: ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', group_id: 'group1', @@ -213,7 +222,7 @@ bucket_definitions: f2: 314, f3: 3.14 }, - afterReplicaId: rid('t1') + afterReplicaId: test_utils.rid('t1') }); }); @@ -221,11 +230,11 @@ bucket_definitions: const checkpoint = result!.flushed_op; - const parameters1 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3.14]]); + const parameters1 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3.14]]); expect(parameters1).toEqual([TEST_PARAMS]); - const parameters2 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 314, 314n, 3.14]]); + const parameters2 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314, 314n, 3.14]]); expect(parameters2).toEqual([TEST_PARAMS]); - const parameters3 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3]]); + const parameters3 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3]]); expect(parameters3).toEqual([]); }); @@ -234,7 +243,7 @@ bucket_definitions: // This specific case tested here cannot happen with postgres in practice, but we still // test this to ensure correct deserialization. - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: mybucket: @@ -244,23 +253,24 @@ bucket_definitions: ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', group_id: 'group1', n1: 1152921504606846976n // 2^60 }, - afterReplicaId: rid('t1') + afterReplicaId: test_utils.rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 't1', group_id: 'group1', @@ -268,7 +278,7 @@ bucket_definitions: // in practice. n1: undefined }, - afterReplicaId: rid('t1') + afterReplicaId: test_utils.rid('t1') }); }); @@ -276,12 +286,12 @@ bucket_definitions: const checkpoint = result!.flushed_op; - const parameters1 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 1152921504606846976n]]); + const parameters1 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 1152921504606846976n]]); expect(parameters1).toEqual([TEST_PARAMS]); }); test('removing row', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -289,30 +299,33 @@ bucket_definitions: - SELECT id, description FROM "%" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, - beforeReplicaId: rid('test1') + tag: storage.SaveOperationTag.DELETE, + beforeReplicaId: test_utils.rid('test1') }); }); const checkpoint = result!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const data = batch[0].batch.data.map((d) => { return { op: d.op, @@ -329,7 +342,7 @@ bucket_definitions: { op: 'REMOVE', object_id: 'test1', checksum: c2 } ]); - const checksums = [...(await storage.getChecksums(checkpoint, ['global[]'])).values()]; + const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; expect(checksums).toEqual([ { bucket: 'global[]', @@ -340,9 +353,9 @@ bucket_definitions: }); test('save and load parameters with workspaceId', async () => { - const WORKSPACE_TABLE = makeTestTable('workspace', ['id']); + const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id']); - const sync_rules_content = testRules( + const sync_rules_content = test_utils.testRules( ` bucket_definitions: by_workspace: @@ -352,19 +365,20 @@ bucket_definitions: data: [] ` ); - const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules; + const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules; - const storage = (await factory()).getInstance(sync_rules_content); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules_content); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace1', userId: 'u1' }, - afterReplicaId: rid('workspace1') + afterReplicaId: test_utils.rid('workspace1') }); }); @@ -377,12 +391,12 @@ bucket_definitions: const lookups = q1.getLookups(parameters); expect(lookups).toEqual([['by_workspace', '1', 'u1']]); - const parameter_sets = await storage.getParameterSets(checkpoint, lookups); + const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups); expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]); const buckets = await sync_rules.queryBucketIds({ getParameterSets(lookups) { - return storage.getParameterSets(checkpoint, lookups); + return bucketStorage.getParameterSets(checkpoint, lookups); }, parameters }); @@ -390,9 +404,9 @@ bucket_definitions: }); test('save and load parameters with dynamic global buckets', async () => { - const WORKSPACE_TABLE = makeTestTable('workspace'); + const WORKSPACE_TABLE = test_utils.makeTestTable('workspace'); - const sync_rules_content = testRules( + const sync_rules_content = test_utils.testRules( ` bucket_definitions: by_public_workspace: @@ -402,39 +416,40 @@ bucket_definitions: data: [] ` ); - const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules; + const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules; - const storage = (await factory()).getInstance(sync_rules_content); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules_content); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace1', visibility: 'public' }, - afterReplicaId: rid('workspace1') + afterReplicaId: test_utils.rid('workspace1') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace2', visibility: 'private' }, - afterReplicaId: rid('workspace2') + afterReplicaId: test_utils.rid('workspace2') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace3', visibility: 'public' }, - afterReplicaId: rid('workspace3') + afterReplicaId: test_utils.rid('workspace3') }); }); @@ -447,13 +462,13 @@ bucket_definitions: const lookups = q1.getLookups(parameters); expect(lookups).toEqual([['by_public_workspace', '1']]); - const parameter_sets = await storage.getParameterSets(checkpoint, lookups); + const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups); parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b))); expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]); const buckets = await sync_rules.queryBucketIds({ getParameterSets(lookups) { - return storage.getParameterSets(checkpoint, lookups); + return bucketStorage.getParameterSets(checkpoint, lookups); }, parameters }); @@ -462,9 +477,9 @@ bucket_definitions: }); test('multiple parameter queries', async () => { - const WORKSPACE_TABLE = makeTestTable('workspace'); + const WORKSPACE_TABLE = test_utils.makeTestTable('workspace'); - const sync_rules_content = testRules( + const sync_rules_content = test_utils.testRules( ` bucket_definitions: by_workspace: @@ -476,51 +491,52 @@ bucket_definitions: data: [] ` ); - const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules; + const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules; - const storage = (await factory()).getInstance(sync_rules_content); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules_content); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace1', visibility: 'public' }, - afterReplicaId: rid('workspace1') + afterReplicaId: test_utils.rid('workspace1') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace2', visibility: 'private' }, - afterReplicaId: rid('workspace2') + afterReplicaId: test_utils.rid('workspace2') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace3', user_id: 'u1', visibility: 'private' }, - afterReplicaId: rid('workspace3') + afterReplicaId: test_utils.rid('workspace3') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'workspace4', user_id: 'u2', visibility: 'private' }, - afterReplicaId: rid('workspace4') + afterReplicaId: test_utils.rid('workspace4') }); }); @@ -533,7 +549,7 @@ bucket_definitions: const lookups1 = q1.getLookups(parameters); expect(lookups1).toEqual([['by_workspace', '1']]); - const parameter_sets1 = await storage.getParameterSets(checkpoint, lookups1); + const parameter_sets1 = await bucketStorage.getParameterSets(checkpoint, lookups1); parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b))); expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]); @@ -541,14 +557,14 @@ bucket_definitions: const lookups2 = q2.getLookups(parameters); expect(lookups2).toEqual([['by_workspace', '2', 'u1']]); - const parameter_sets2 = await storage.getParameterSets(checkpoint, lookups2); + const parameter_sets2 = await bucketStorage.getParameterSets(checkpoint, lookups2); parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b))); expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]); // Test final values - the important part const buckets = await sync_rules.queryBucketIds({ getParameterSets(lookups) { - return storage.getParameterSets(checkpoint, lookups); + return bucketStorage.getParameterSets(checkpoint, lookups); }, parameters }); @@ -557,7 +573,7 @@ bucket_definitions: }); test('changing client ids', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -565,44 +581,48 @@ bucket_definitions: - SELECT client_id as id, description FROM "%" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + + const bucketStorage = factory.getInstance(sync_rules); const sourceTable = TEST_TABLE; - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', client_id: 'client1a', description: 'test1a' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'test1', client_id: 'client1b', description: 'test1b' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test2', client_id: 'client2', description: 'test2' }, - afterReplicaId: rid('test2') + afterReplicaId: test_utils.rid('test2') }); }); const checkpoint = result!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const data = batch[0].batch.data.map((d) => { return { op: d.op, @@ -619,7 +639,7 @@ bucket_definitions: }); test('re-apply delete', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -627,45 +647,48 @@ bucket_definitions: - SELECT id, description FROM "%" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); }); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, - beforeReplicaId: rid('test1') + tag: storage.SaveOperationTag.DELETE, + beforeReplicaId: test_utils.rid('test1') }); }); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, - beforeReplicaId: rid('test1') + tag: storage.SaveOperationTag.DELETE, + beforeReplicaId: test_utils.rid('test1') }); }); const checkpoint = result!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const data = batch[0].batch.data.map((d) => { return { op: d.op, @@ -682,7 +705,7 @@ bucket_definitions: { op: 'REMOVE', object_id: 'test1', checksum: c2 } ]); - const checksums = [...(await storage.getChecksums(checkpoint, ['global[]'])).values()]; + const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; expect(checksums).toEqual([ { bucket: 'global[]', @@ -693,7 +716,7 @@ bucket_definitions: }); test('re-apply update + delete', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -701,85 +724,88 @@ bucket_definitions: - SELECT id, description FROM "%" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); }); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, - beforeReplicaId: rid('test1') + tag: storage.SaveOperationTag.DELETE, + beforeReplicaId: test_utils.rid('test1') }); }); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, - beforeReplicaId: rid('test1') + tag: storage.SaveOperationTag.DELETE, + beforeReplicaId: test_utils.rid('test1') }); }); const checkpoint = result!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])) + ); const data = batch[0].batch.data.map((d) => { return { @@ -799,7 +825,7 @@ bucket_definitions: { op: 'REMOVE', object_id: 'test1', checksum: c2 } ]); - const checksums = [...(await storage.getChecksums(checkpoint, ['global[]'])).values()]; + const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()]; expect(checksums).toEqual([ { bucket: 'global[]', @@ -810,7 +836,7 @@ bucket_definitions: }); test('truncate parameters', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: mybucket: @@ -820,27 +846,28 @@ bucket_definitions: ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2', id1: 'user3', id2: 'user4', group_id: 'group2a' }, - afterReplicaId: rid('t2') + afterReplicaId: test_utils.rid('t2') }); await batch.truncate([TEST_TABLE]); }); - const { checkpoint } = await storage.getCheckpoint(); + const { checkpoint } = await bucketStorage.getCheckpoint(); - const parameters = await storage.getParameterSets(checkpoint, [['mybucket', '1', 'user1']]); + const parameters = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 'user1']]); expect(parameters).toEqual([]); }); @@ -853,7 +880,7 @@ bucket_definitions: // 1. Not getting the correct "current_data" state for each operation. // 2. Output order not being correct. - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -861,123 +888,127 @@ bucket_definitions: - SELECT id, description FROM "test" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); // Pre-setup - const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1a' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test2', description: 'test2a' }, - afterReplicaId: rid('test2') + afterReplicaId: test_utils.rid('test2') }); }); const checkpoint1 = result1?.flushed_op ?? '0'; // Test batch - const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; // b await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1b' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, before: { id: 'test1' }, - beforeReplicaId: rid('test1'), + beforeReplicaId: test_utils.rid('test1'), after: { id: 'test2', description: 'test2b' }, - afterReplicaId: rid('test2') + afterReplicaId: test_utils.rid('test2') }); await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, before: { id: 'test2' }, - beforeReplicaId: rid('test2'), + beforeReplicaId: test_utils.rid('test2'), after: { id: 'test3', description: 'test3b' }, - afterReplicaId: rid('test3') + afterReplicaId: test_utils.rid('test3') }); // c await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 'test2', description: 'test2c' }, - afterReplicaId: rid('test2') + afterReplicaId: test_utils.rid('test2') }); // d await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test4', description: 'test4d' }, - afterReplicaId: rid('test4') + afterReplicaId: test_utils.rid('test4') }); await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, before: { id: 'test4' }, - beforeReplicaId: rid('test4'), + beforeReplicaId: test_utils.rid('test4'), after: { id: 'test5', description: 'test5d' }, - afterReplicaId: rid('test5') + afterReplicaId: test_utils.rid('test5') }); }); const checkpoint2 = result2!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]])) + ); + const data = batch[0].batch.data.map((d) => { return { op: d.op, object_id: d.object_id, - data: d.data + data: normalizeOplogData(d.data) }; }); @@ -1001,7 +1032,7 @@ bucket_definitions: }); test('changed data with replica identity full', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -1015,15 +1046,16 @@ bucket_definitions: { name: 'description', type: 'VARCHAR', typeId: 25 } ]); } - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const sourceTable = makeTestTable('test', ['id', 'description']); + const sourceTable = test_utils.makeTestTable('test', ['id', 'description']); // Pre-setup - const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1a' @@ -1034,11 +1066,11 @@ bucket_definitions: const checkpoint1 = result1?.flushed_op ?? '0'; - const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // Unchanged, but has a before id await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, before: { id: 'test1', description: 'test1a' @@ -1052,11 +1084,11 @@ bucket_definitions: }); }); - const result3 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result3 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // Delete await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, + tag: storage.SaveOperationTag.DELETE, before: { id: 'test1', description: 'test1b' @@ -1068,12 +1100,14 @@ bucket_definitions: const checkpoint3 = result3!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])) + ); const data = batch[0].batch.data.map((d) => { return { op: d.op, object_id: d.object_id, - data: d.data, + data: normalizeOplogData(d.data), subkey: d.subkey }; }); @@ -1105,7 +1139,7 @@ bucket_definitions: }); test('unchanged data with replica identity full', async () => { - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -1120,15 +1154,16 @@ bucket_definitions: ]); } - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const sourceTable = makeTestTable('test', ['id', 'description']); + const sourceTable = test_utils.makeTestTable('test', ['id', 'description']); // Pre-setup - const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1a' @@ -1139,11 +1174,11 @@ bucket_definitions: const checkpoint1 = result1?.flushed_op ?? '0'; - const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // Unchanged, but has a before id await batch.save({ sourceTable, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, before: { id: 'test1', description: 'test1a' @@ -1157,11 +1192,11 @@ bucket_definitions: }); }); - const result3 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result3 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // Delete await batch.save({ sourceTable, - tag: SaveOperationTag.DELETE, + tag: storage.SaveOperationTag.DELETE, before: { id: 'test1', description: 'test1a' @@ -1173,12 +1208,14 @@ bucket_definitions: const checkpoint3 = result3!.flushed_op; - const batch = await fromAsync(storage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]]))); + const batch = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])) + ); const data = batch[0].batch.data.map((d) => { return { op: d.op, object_id: d.object_id, - data: d.data, + data: normalizeOplogData(d.data), subkey: d.subkey }; }); @@ -1207,7 +1244,7 @@ bucket_definitions: // but large enough in size to be split over multiple returned batches. // The specific batch splits is an implementation detail of the storage driver, // and the test will have to updated when other implementations are added. - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -1215,192 +1252,98 @@ bucket_definitions: - SELECT id, description FROM "%" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; const largeDescription = '0123456789'.repeat(12_000_00); await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' }, - afterReplicaId: rid('test1') + afterReplicaId: test_utils.rid('test1') }); await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'large1', description: largeDescription }, - afterReplicaId: rid('large1') + afterReplicaId: test_utils.rid('large1') }); // Large enough to split the returned batch await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'large2', description: largeDescription }, - afterReplicaId: rid('large2') + afterReplicaId: test_utils.rid('large2') }); await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 'test3', description: 'test3' }, - afterReplicaId: rid('test3') + afterReplicaId: test_utils.rid('test3') }); }); const checkpoint = result!.flushed_op; - const options: BucketDataBatchOptions = { + const options: storage.BucketDataBatchOptions = { chunkLimitBytes: 16 * 1024 * 1024 }; - const batch1 = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options)); - expect(getBatchData(batch1)).toEqual([ + const batch1 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options) + ); + expect(test_utils.getBatchData(batch1)).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }, { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 454746904 } ]); - expect(getBatchMeta(batch1)).toEqual({ + expect(test_utils.getBatchMeta(batch1)).toEqual({ after: '0', has_more: true, next_after: '2' }); - const batch2 = await fromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options) + const batch2 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options) ); - expect(getBatchData(batch2)).toEqual([ + expect(test_utils.getBatchData(batch2)).toEqual([ { op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1795508474 }, { op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 } ]); - expect(getBatchMeta(batch2)).toEqual({ + expect(test_utils.getBatchMeta(batch2)).toEqual({ after: '2', has_more: false, next_after: '4' }); - const batch3 = await fromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options) - ); - expect(getBatchData(batch3)).toEqual([]); - expect(getBatchMeta(batch3)).toEqual(null); - }); - - test('large batch (2)', async () => { - // Test syncing a batch of data that is small in count, - // but large enough in size to be split over multiple returned chunks. - // Similar to the above test, but splits over 1MB chunks. - const sync_rules = testRules( - ` -bucket_definitions: - global: - data: - - SELECT id, description FROM "%" -` - ); - const storage = (await factory()).getInstance(sync_rules); - - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { - const sourceTable = TEST_TABLE; - - const largeDescription = '0123456789'.repeat(2_000_00); - - await batch.save({ - sourceTable, - tag: SaveOperationTag.INSERT, - after: { - id: 'test1', - description: 'test1' - }, - afterReplicaId: rid('test1') - }); - - await batch.save({ - sourceTable, - tag: SaveOperationTag.INSERT, - after: { - id: 'large1', - description: largeDescription - }, - afterReplicaId: rid('large1') - }); - - // Large enough to split the returned batch - await batch.save({ - sourceTable, - tag: SaveOperationTag.INSERT, - after: { - id: 'large2', - description: largeDescription - }, - afterReplicaId: rid('large2') - }); - - await batch.save({ - sourceTable, - tag: SaveOperationTag.INSERT, - after: { - id: 'test3', - description: 'test3' - }, - afterReplicaId: rid('test3') - }); - }); - - const checkpoint = result!.flushed_op; - - const options: BucketDataBatchOptions = {}; - - const batch1 = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options)); - expect(getBatchData(batch1)).toEqual([ - { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }, - { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 } - ]); - expect(getBatchMeta(batch1)).toEqual({ - after: '0', - has_more: true, - next_after: '2' - }); - - const batch2 = await fromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options) - ); - expect(getBatchData(batch2)).toEqual([{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }]); - expect(getBatchMeta(batch2)).toEqual({ - after: '2', - has_more: true, - next_after: '3' - }); - - const batch3 = await fromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options) + const batch3 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options) ); - expect(getBatchData(batch3)).toEqual([{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }]); - expect(getBatchMeta(batch3)).toEqual({ - after: '3', - has_more: false, - next_after: '4' - }); + expect(test_utils.getBatchData(batch3)).toEqual([]); + expect(test_utils.getBatchMeta(batch3)).toEqual(null); }); test('long batch', async () => { // Test syncing a batch of data that is limited by count. - const sync_rules = testRules( + const sync_rules = test_utils.testRules( ` bucket_definitions: global: @@ -1408,15 +1351,16 @@ bucket_definitions: - SELECT id, description FROM "%" ` ); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; for (let i = 1; i <= 6; i++) { await batch.save({ sourceTable, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: `test${i}`, description: `test${i}` @@ -1428,60 +1372,61 @@ bucket_definitions: const checkpoint = result!.flushed_op; - const batch1 = await oneFromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), { limit: 4 }) + const batch1 = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), { limit: 4 }) ); - expect(getBatchData(batch1)).toEqual([ + expect(test_utils.getBatchData(batch1)).toEqual([ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }, { op_id: '2', op: 'PUT', object_id: 'test2', checksum: 730027011 }, { op_id: '3', op: 'PUT', object_id: 'test3', checksum: 1359888332 }, { op_id: '4', op: 'PUT', object_id: 'test4', checksum: 2049153252 } ]); - expect(getBatchMeta(batch1)).toEqual({ + expect(test_utils.getBatchMeta(batch1)).toEqual({ after: '0', has_more: true, next_after: '4' }); - const batch2 = await oneFromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1.batch.next_after]]), { + const batch2 = await test_utils.oneFromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1.batch.next_after]]), { limit: 4 }) ); - expect(getBatchData(batch2)).toEqual([ + expect(test_utils.getBatchData(batch2)).toEqual([ { op_id: '5', op: 'PUT', object_id: 'test5', checksum: 3686902721 }, { op_id: '6', op: 'PUT', object_id: 'test6', checksum: 1974820016 } ]); - expect(getBatchMeta(batch2)).toEqual({ + expect(test_utils.getBatchMeta(batch2)).toEqual({ after: '4', has_more: false, next_after: '6' }); - const batch3 = await fromAsync( - storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2.batch.next_after]]), { + const batch3 = await test_utils.fromAsync( + bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2.batch.next_after]]), { limit: 4 }) ); - expect(getBatchData(batch3)).toEqual([]); + expect(test_utils.getBatchData(batch3)).toEqual([]); - expect(getBatchMeta(batch3)).toEqual(null); + expect(test_utils.getBatchMeta(batch3)).toEqual(null); }); test('batch should be disposed automatically', async () => { - const sync_rules = testRules(` + const sync_rules = test_utils.testRules(` bucket_definitions: global: data: [] `); - const storage = (await factory()).getInstance(sync_rules); + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); let isDisposed = false; - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { batch.registerListener({ disposed: () => { isDisposed = true; @@ -1493,7 +1438,7 @@ bucket_definitions: isDisposed = false; let errorCaught = false; try { - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { batch.registerListener({ disposed: () => { isDisposed = true; @@ -1509,9 +1454,47 @@ bucket_definitions: expect(isDisposed).true; }); - test('empty storage metrics', async () => { - const f = await factory({ dropAll: true }); + test('batch should be disposed automatically', async () => { + const sync_rules = test_utils.testRules(` + bucket_definitions: + global: + data: [] + `); + + using factory = await generateStorageFactory(); + const bucketStorage = factory.getInstance(sync_rules); + let isDisposed = false; + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { + batch.registerListener({ + disposed: () => { + isDisposed = true; + } + }); + }); + expect(isDisposed).true; + + isDisposed = false; + let errorCaught = false; + try { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { + batch.registerListener({ + disposed: () => { + isDisposed = true; + } + }); + throw new Error(`Testing exceptions`); + }); + } catch (ex) { + errorCaught = true; + expect(ex.message.includes('Testing')).true; + } + expect(errorCaught).true; + expect(isDisposed).true; + }); + + test('empty storage metrics', async () => { + using f = await generateStorageFactory({ dropAll: true }); const metrics = await f.getStorageMetrics(); expect(metrics).toEqual({ operations_size_bytes: 0, @@ -1532,7 +1515,7 @@ bucket_definitions: }); test('invalidate cached parsed sync rules', async () => { - const sync_rules_content = testRules( + const sync_rules_content = test_utils.testRules( ` bucket_definitions: by_workspace: @@ -1543,7 +1526,7 @@ bucket_definitions: ` ); - const bucketStorageFactory = await factory(); + using bucketStorageFactory = await generateStorageFactory(); const syncBucketStorage = bucketStorageFactory.getInstance(sync_rules_content); const parsedSchema1 = syncBucketStorage.getParsedSyncRules({ diff --git a/packages/service-core/test/src/sync.test.ts b/packages/service-core-tests/src/tests/register-sync-tests.ts similarity index 75% rename from packages/service-core/test/src/sync.test.ts rename to packages/service-core-tests/src/tests/register-sync-tests.ts index 75e5f550..9736f8cd 100644 --- a/packages/service-core/test/src/sync.test.ts +++ b/packages/service-core-tests/src/tests/register-sync-tests.ts @@ -1,18 +1,16 @@ -import { SaveOperationTag } from '@/storage/storage-index.js'; -import { RequestTracker } from '@/sync/RequestTracker.js'; -import { streamResponse, SyncStreamParameters } from '@/sync/sync.js'; -import { StreamingSyncLine } from '@/util/protocol-types.js'; +import { storage, sync, utils } from '@powersync/service-core'; import { JSONBig } from '@powersync/service-jsonbig'; import { RequestParameters } from '@powersync/service-sync-rules'; +import path from 'path'; import * as timers from 'timers/promises'; -import { describe, expect, test } from 'vitest'; -import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, PARSE_OPTIONS, StorageFactory } from './util.js'; +import { fileURLToPath } from 'url'; +import { expect, test } from 'vitest'; +import * as test_utils from '../test-utils/test-utils-index.js'; -describe('sync - mongodb', function () { - defineTests(MONGO_STORAGE_FACTORY); -}); +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); -const TEST_TABLE = makeTestTable('test', ['id']); +const TEST_TABLE = test_utils.makeTestTable('test', ['id']); const BASIC_SYNC_RULES = ` bucket_definitions: @@ -21,23 +19,33 @@ bucket_definitions: - SELECT * FROM test `; -function defineTests(factory: StorageFactory) { - const tracker = new RequestTracker(); +export const SYNC_SNAPSHOT_PATH = path.resolve(__dirname, '../__snapshots/sync.test.js.snap'); + +/** + * @example + * ```TypeScript + * describe('sync - mongodb', function () { + * registerSyncTests(MONGO_STORAGE_FACTORY); + * }); + * ``` + */ +export function registerSyncTests(factory: storage.TestStorageFactory) { + const tracker = new sync.RequestTracker(); test('sync global data', async () => { - const f = await factory(); + using f = await factory(); const syncRules = await f.updateSyncRules({ content: BASIC_SYNC_RULES }); - const storage = f.getInstance(syncRules); - await storage.autoActivate(); + const bucketStorage = f.getInstance(syncRules); + await bucketStorage.autoActivate(); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', description: 'Test 1' @@ -47,7 +55,7 @@ function defineTests(factory: StorageFactory) { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2', description: 'Test 2' @@ -58,14 +66,14 @@ function defineTests(factory: StorageFactory) { await batch.commit('0/1'); }); - const stream = streamResponse({ + const stream = sync.streamResponse({ storage: f, params: { buckets: [], include_checksum: true, raw_data: true }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -82,13 +90,13 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules); - await storage.autoActivate(); + const bucketStorage = await f.getInstance(syncRules); + await bucketStorage.autoActivate(); - const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { + const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', description: 'Test\n"string"', @@ -100,14 +108,14 @@ function defineTests(factory: StorageFactory) { await batch.commit('0/1'); }); - const stream = streamResponse({ + const stream = sync.streamResponse({ storage: f, params: { buckets: [], include_checksum: true, raw_data: false }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -129,14 +137,14 @@ function defineTests(factory: StorageFactory) { const storage = await f.getInstance(syncRules); await storage.autoActivate(); - const stream = streamResponse({ + const stream = sync.streamResponse({ storage: f, params: { buckets: [], include_checksum: true, raw_data: true }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: 0 } as any @@ -147,23 +155,23 @@ function defineTests(factory: StorageFactory) { }); test('sync updates to global data', async () => { - const f = await factory(); + using f = await factory(); const syncRules = await f.updateSyncRules({ content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules); - await storage.autoActivate(); + const bucketStorage = await f.getInstance(syncRules); + await bucketStorage.autoActivate(); - const stream = streamResponse({ + const stream = sync.streamResponse({ storage: f, params: { buckets: [], include_checksum: true, raw_data: true }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -172,10 +180,10 @@ function defineTests(factory: StorageFactory) { expect(await getCheckpointLines(iter)).toMatchSnapshot(); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', description: 'Test 1' @@ -188,10 +196,10 @@ function defineTests(factory: StorageFactory) { expect(await getCheckpointLines(iter)).toMatchSnapshot(); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2', description: 'Test 2' @@ -208,7 +216,7 @@ function defineTests(factory: StorageFactory) { }); test('expiring token', async () => { - const f = await factory(); + using f = await factory(); const syncRules = await f.updateSyncRules({ content: BASIC_SYNC_RULES @@ -219,14 +227,14 @@ function defineTests(factory: StorageFactory) { const exp = Date.now() / 1000 + 0.1; - const stream = streamResponse({ + const stream = sync.streamResponse({ storage: f, params: { buckets: [], include_checksum: true, raw_data: true }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: exp } as any @@ -246,19 +254,19 @@ function defineTests(factory: StorageFactory) { // This is expected to be rare in practice, but it is important to handle // this case correctly to maintain consistency on the client. - const f = await factory(); + using f = await factory(); const syncRules = await f.updateSyncRules({ content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules); - await storage.autoActivate(); + const bucketStorage = await f.getInstance(syncRules); + await bucketStorage.autoActivate(); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't1', description: 'Test 1' @@ -268,7 +276,7 @@ function defineTests(factory: StorageFactory) { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.INSERT, + tag: storage.SaveOperationTag.INSERT, after: { id: 't2', description: 'Test 2' @@ -279,14 +287,14 @@ function defineTests(factory: StorageFactory) { await batch.commit('0/1'); }); - const stream = streamResponse({ + const stream = sync.streamResponse({ storage: f, params: { buckets: [], include_checksum: true, raw_data: true }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -306,10 +314,10 @@ function defineTests(factory: StorageFactory) { // Now we save additional data AND compact before continuing. // This invalidates the checkpoint we've received above. - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 't1', description: 'Test 1b' @@ -319,7 +327,7 @@ function defineTests(factory: StorageFactory) { await batch.save({ sourceTable: TEST_TABLE, - tag: SaveOperationTag.UPDATE, + tag: storage.SaveOperationTag.UPDATE, after: { id: 't2', description: 'Test 2b' @@ -330,7 +338,7 @@ function defineTests(factory: StorageFactory) { await batch.commit('0/2'); }); - await storage.compact(); + await bucketStorage.compact(); const lines2 = await getCheckpointLines(iter, { consume: true }); @@ -383,38 +391,38 @@ function defineTests(factory: StorageFactory) { }); test('write checkpoint', async () => { - const f = await factory(); + using f = await factory(); const syncRules = await f.updateSyncRules({ content: BASIC_SYNC_RULES }); - const storage = f.getInstance(syncRules); - await storage.autoActivate(); + const bucketStorage = f.getInstance(syncRules); + await bucketStorage.autoActivate(); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // <= the managed write checkpoint LSN below await batch.commit('0/1'); }); - const checkpoint = await storage.createManagedWriteCheckpoint({ + const checkpoint = await bucketStorage.createManagedWriteCheckpoint({ user_id: 'test', heads: { '1': '1/0' } }); - const params: SyncStreamParameters = { + const params: sync.SyncStreamParameters = { storage: f, params: { buckets: [], include_checksum: true, raw_data: true }, - parseOptions: PARSE_OPTIONS, + parseOptions: test_utils.PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: 'test' }, {}), token: { sub: 'test', exp: Date.now() / 1000 + 10 } as any }; - const stream1 = streamResponse(params); + const stream1 = sync.streamResponse(params); const lines1 = await consumeCheckpointLines(stream1); // If write checkpoints are not correctly filtered, this may already @@ -426,14 +434,14 @@ function defineTests(factory: StorageFactory) { }) }); - await storage.startBatch(BATCH_OPTIONS, async (batch) => { + await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => { // must be >= the managed write checkpoint LSN await batch.commit('1/0'); }); // At this point the LSN has advanced, so the write checkpoint should be // included in the next checkpoint message. - const stream2 = streamResponse(params); + const stream2 = sync.streamResponse(params); const lines2 = await consumeCheckpointLines(stream2); expect(lines2[0]).toMatchObject({ checkpoint: expect.objectContaining({ @@ -492,7 +500,7 @@ async function consumeIterator( * Does not stop the iterator unless options.consume is true. */ async function getCheckpointLines( - iter: AsyncIterator, + iter: AsyncIterator, options?: { consume?: boolean } ) { return consumeIterator(iter, { @@ -506,6 +514,8 @@ async function getCheckpointLines( * * Stops the iterator afterwards. */ -async function consumeCheckpointLines(iterable: AsyncIterable): Promise { +async function consumeCheckpointLines( + iterable: AsyncIterable +): Promise { return getCheckpointLines(iterable[Symbol.asyncIterator](), { consume: true }); } diff --git a/packages/service-core-tests/src/tests/tests-index.ts b/packages/service-core-tests/src/tests/tests-index.ts new file mode 100644 index 00000000..4f0e017f --- /dev/null +++ b/packages/service-core-tests/src/tests/tests-index.ts @@ -0,0 +1,4 @@ +export * from './register-bucket-validation-tests.js'; +export * from './register-compacting-tests.js'; +export * from './register-data-storage-tests.js'; +export * from './register-sync-tests.js'; diff --git a/packages/service-core-tests/tsconfig.json b/packages/service-core-tests/tsconfig.json new file mode 100644 index 00000000..d2e14207 --- /dev/null +++ b/packages/service-core-tests/tsconfig.json @@ -0,0 +1,34 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true + }, + "include": ["src"], + "references": [ + { + "path": "../types" + }, + { + "path": "../rsocket-router" + }, + { + "path": "../jsonbig" + }, + { + "path": "../jpgwire" + }, + { + "path": "../sync-rules" + }, + { + "path": "../service-core" + }, + { + "path": "../../libs/lib-services" + } + ] +} diff --git a/packages/service-core/package.json b/packages/service-core/package.json index 590f2b90..1dd3b001 100644 --- a/packages/service-core/package.json +++ b/packages/service-core/package.json @@ -29,7 +29,7 @@ "@powersync/service-types": "workspace:*", "async": "^3.2.4", "async-mutex": "^0.5.0", - "bson": "^6.6.0", + "bson": "^6.8.0", "commander": "^12.0.0", "cors": "^2.8.5", "ipaddr.js": "^2.1.0", @@ -37,9 +37,8 @@ "jose": "^4.15.1", "lodash": "^4.17.21", "lru-cache": "^10.2.2", - "mongodb": "^6.11.0", "node-fetch": "^3.3.2", - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uri-js": "^4.4.1", "uuid": "^9.0.1", "winston": "^3.13.0", diff --git a/packages/service-core/src/db/db-index.ts b/packages/service-core/src/db/db-index.ts deleted file mode 100644 index 0cbae156..00000000 --- a/packages/service-core/src/db/db-index.ts +++ /dev/null @@ -1 +0,0 @@ -export * as mongo from './mongo.js'; diff --git a/packages/service-core/src/entry/commands/compact-action.ts b/packages/service-core/src/entry/commands/compact-action.ts index bfa0da4c..31f49e7f 100644 --- a/packages/service-core/src/entry/commands/compact-action.ts +++ b/packages/service-core/src/entry/commands/compact-action.ts @@ -1,9 +1,11 @@ import { Command } from 'commander'; -import { logger } from '@powersync/lib-services-framework'; +import { container, logger } from '@powersync/lib-services-framework'; import * as v8 from 'v8'; -import * as storage from '../../storage/storage-index.js'; +import * as system from '../../system/system-index.js'; import * as utils from '../../util/util-index.js'; + +import { modules } from '../../index.js'; import { extractRunnerOptions, wrapConfigCommand } from './config-command.js'; const COMMAND_NAME = 'compact'; @@ -32,20 +34,22 @@ export function registerCompactAction(program: Command) { if (buckets == null) { logger.info('Compacting storage for all buckets...'); } else { - logger.info(`Compacting storage for ${buckets.join(', ')}...`); + logger.info(`Compacting storage for ${buckets?.join(', ')}...`); } - const runnerConfig = extractRunnerOptions(options); - const configuration = await utils.loadConfig(runnerConfig); - logger.info('Successfully loaded configuration...'); - const { storage: storageConfig } = configuration; + const config = await utils.loadConfig(extractRunnerOptions(options)); + const serviceContext = new system.ServiceContextContainer(config); + + // Register modules in order to allow custom module compacting + const moduleManager = container.getImplementation(modules.ModuleManager); + await moduleManager.initialize(serviceContext); + logger.info('Connecting to storage...'); - const psdb = storage.createPowerSyncMongo(storageConfig); - const client = psdb.client; - await client.connect(); + try { - const bucketStorage = new storage.MongoBucketStorage(psdb, { - slot_name_prefix: configuration.slot_name_prefix - }); + // Start the storage engine in order to create the appropriate BucketStorage + await serviceContext.lifeCycleEngine.start(); + const bucketStorage = serviceContext.storageEngine.activeBucketStorage; + const active = await bucketStorage.getActiveSyncRulesContent(); if (active == null) { logger.info('No active instance to compact'); @@ -57,9 +61,10 @@ export function registerCompactAction(program: Command) { logger.info('Successfully compacted storage.'); } catch (e) { logger.error(`Failed to compact: ${e.toString()}`); + // Indirectly triggers lifeCycleEngine.stop process.exit(1); } finally { - await client.close(); + // Indirectly triggers lifeCycleEngine.stop process.exit(0); } }); diff --git a/packages/service-core/src/entry/commands/migrate-action.ts b/packages/service-core/src/entry/commands/migrate-action.ts index 29ed5bbe..3350118f 100644 --- a/packages/service-core/src/entry/commands/migrate-action.ts +++ b/packages/service-core/src/entry/commands/migrate-action.ts @@ -1,7 +1,9 @@ -import { logger } from '@powersync/lib-services-framework'; +import { container, logger, migrations } from '@powersync/lib-services-framework'; import { Command } from 'commander'; -import * as migrations from '../../migrations/migrations-index.js'; +import * as modules from '../../modules/modules-index.js'; +import * as system from '../../system/system-index.js'; +import * as utils from '../../util/util-index.js'; import { extractRunnerOptions, wrapConfigCommand } from './config-command.js'; const COMMAND_NAME = 'migrate'; @@ -15,12 +17,23 @@ export function registerMigrationAction(program: Command) { .description('Run migrations') .argument('', 'Migration direction. `up` or `down`') .action(async (direction: migrations.Direction, options) => { + const config = await utils.loadConfig(extractRunnerOptions(options)); + const serviceContext = new system.ServiceContextContainer(config); + + // Register modules in order to allow custom module migrations + const moduleManager = container.getImplementation(modules.ModuleManager); + await moduleManager.initialize(serviceContext); + try { - await migrations.migrate({ + await serviceContext.migrations.migrate({ direction, - runner_config: extractRunnerOptions(options) + // Give the migrations access to the service context + migrationContext: { + service_context: serviceContext + } }); + await serviceContext.migrations[Symbol.asyncDispose](); process.exit(0); } catch (e) { logger.error(`Migration failure`, e); diff --git a/packages/service-core/src/index.ts b/packages/service-core/src/index.ts index a365d77d..9a90ceeb 100644 --- a/packages/service-core/src/index.ts +++ b/packages/service-core/src/index.ts @@ -6,9 +6,6 @@ export * as api from './api/api-index.js'; export * from './auth/auth-index.js'; export * as auth from './auth/auth-index.js'; -export * from './db/db-index.js'; -export * as db from './db/db-index.js'; - export * from './entry/entry-index.js'; export * as entry from './entry/entry-index.js'; @@ -18,8 +15,8 @@ export * as framework from '@powersync/lib-services-framework'; export * from './metrics/Metrics.js'; export * as metrics from './metrics/Metrics.js'; +export * from './migrations/migrations-index.js'; export * as migrations from './migrations/migrations-index.js'; -export * from './migrations/migrations.js'; export * from './modules/modules-index.js'; export * as modules from './modules/modules-index.js'; diff --git a/packages/service-core/src/locks/LockManager.ts b/packages/service-core/src/locks/LockManager.ts deleted file mode 100644 index 6bb53488..00000000 --- a/packages/service-core/src/locks/LockManager.ts +++ /dev/null @@ -1,16 +0,0 @@ -import * as bson from 'bson'; - -export class LockActiveError extends Error { - constructor() { - super('Lock is already active'); - this.name = this.constructor.name; - } -} - -export type LockManager = { - acquire: () => Promise; - refresh: (lock_id: bson.ObjectId) => Promise; - release: (lock_id: bson.ObjectId) => Promise; - - lock: (handler: (refresh: () => Promise) => Promise) => Promise; -}; diff --git a/packages/service-core/src/locks/MongoLocks.ts b/packages/service-core/src/locks/MongoLocks.ts deleted file mode 100644 index 33ad7bc8..00000000 --- a/packages/service-core/src/locks/MongoLocks.ts +++ /dev/null @@ -1,142 +0,0 @@ -import * as mongo from 'mongodb'; -import * as bson from 'bson'; -import { LockActiveError, LockManager } from './LockManager.js'; - -/** - * Lock Document Schema - */ -export type Lock = { - name: string; - active_lock?: { - lock_id: bson.ObjectId; - ts: Date; - }; -}; - -export type Collection = mongo.Collection; - -export type AcquireLockParams = { - /** - * Name of the process/user trying to acquire the lock. - */ - name: string; - /** - * The TTL of the lock (ms). Default: 60000 ms (1 min) - */ - timeout?: number; -}; - -const DEFAULT_LOCK_TIMEOUT = 60 * 1000; // 1 minute - -const acquireLock = async (collection: Collection, params: AcquireLockParams) => { - const now = new Date(); - const lock_timeout = params.timeout ?? DEFAULT_LOCK_TIMEOUT; - const lock_id = new bson.ObjectId(); - - await collection.updateOne( - { - name: params.name - }, - { - $setOnInsert: { - name: params.name - } - }, - { - upsert: true - } - ); - - const expired_ts = now.getTime() - lock_timeout; - - const res = await collection.updateOne( - { - $and: [ - { name: params.name }, - { - $or: [{ active_lock: { $exists: false } }, { 'active_lock.ts': { $lte: new Date(expired_ts) } }] - } - ] - }, - { - $set: { - active_lock: { - lock_id: lock_id, - ts: now - } - } - } - ); - - if (res.modifiedCount === 0) { - return null; - } - - return lock_id; -}; - -const refreshLock = async (collection: Collection, lock_id: bson.ObjectId) => { - const res = await collection.updateOne( - { - 'active_lock.lock_id': lock_id - }, - { - $set: { - 'active_lock.ts': new Date() - } - } - ); - - if (res.modifiedCount === 0) { - throw new Error('Lock not found, could not refresh'); - } -}; - -export const releaseLock = async (collection: Collection, lock_id: bson.ObjectId) => { - const res = await collection.updateOne( - { - 'active_lock.lock_id': lock_id - }, - { - $unset: { - active_lock: true - } - } - ); - - if (res.modifiedCount === 0) { - throw new Error('Lock not found, could not release'); - } -}; - -export type CreateLockManagerParams = { - /** - * Name of the process/user trying to acquire the lock. - */ - name: string; - /** - * The TTL for each lock (ms). Default: 60000 ms (1 min) - */ - timeout?: number; -}; - -export const createMongoLockManager = (collection: Collection, params: CreateLockManagerParams): LockManager => { - return { - acquire: () => acquireLock(collection, params), - refresh: (lock_id: bson.ObjectId) => refreshLock(collection, lock_id), - release: (lock_id: bson.ObjectId) => releaseLock(collection, lock_id), - - lock: async (handler) => { - const lock_id = await acquireLock(collection, params); - if (!lock_id) { - throw new LockActiveError(); - } - - try { - await handler(() => refreshLock(collection, lock_id)); - } finally { - await releaseLock(collection, lock_id); - } - } - }; -}; diff --git a/packages/service-core/src/locks/locks-index.ts b/packages/service-core/src/locks/locks-index.ts deleted file mode 100644 index 3b7e387a..00000000 --- a/packages/service-core/src/locks/locks-index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export * from './LockManager.js'; -export * from './MongoLocks.js'; diff --git a/packages/service-core/src/migrations/PowerSyncMigrationManager.ts b/packages/service-core/src/migrations/PowerSyncMigrationManager.ts new file mode 100644 index 00000000..23615967 --- /dev/null +++ b/packages/service-core/src/migrations/PowerSyncMigrationManager.ts @@ -0,0 +1,42 @@ +import * as framework from '@powersync/lib-services-framework'; +import fs from 'fs/promises'; +import path from 'path'; +import * as system from '../system/system-index.js'; + +/** + * PowerSync service migrations each have this context available to the `up` and `down` methods. + */ +export interface PowerSyncMigrationContext { + service_context: system.ServiceContext; +} + +export interface PowerSyncMigrationGenerics extends framework.MigrationAgentGenerics { + MIGRATION_CONTEXT: PowerSyncMigrationContext; +} + +export type PowerSyncMigrationFunction = framework.MigrationFunction; + +export abstract class AbstractPowerSyncMigrationAgent extends framework.AbstractMigrationAgent { + abstract getInternalScriptsDir(): string; + + async loadInternalMigrations(): Promise[]> { + const migrationsDir = this.getInternalScriptsDir(); + const files = await fs.readdir(migrationsDir); + const migrations = files.filter((file) => { + return '.js' == path.extname(file); + }); + + return await Promise.all( + migrations.map(async (migration) => { + const module = await import(path.resolve(migrationsDir, migration)); + return { + name: path.basename(migration).replace(path.extname(migration), ''), + up: module.up, + down: module.down + }; + }) + ); + } +} + +export type PowerSyncMigrationManager = framework.MigrationManager; diff --git a/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts b/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts deleted file mode 100644 index 9042f0b5..00000000 --- a/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts +++ /dev/null @@ -1,38 +0,0 @@ -import * as mongo from '../../../db/mongo.js'; -import * as storage from '../../../storage/storage-index.js'; -import * as utils from '../../../util/util-index.js'; - -export const up = async (context: utils.MigrationContext) => { - const { runner_config } = context; - - const config = await utils.loadConfig(runner_config); - - const database = storage.createPowerSyncMongo(config.storage); - await mongo.waitForAuth(database.db); - try { - await database.bucket_parameters.createIndex( - { - 'key.g': 1, - lookup: 1, - _id: 1 - }, - { name: 'lookup1' } - ); - } finally { - await database.client.close(); - } -}; - -export const down = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); - - const database = storage.createPowerSyncMongo(config.storage); - try { - if (await database.bucket_parameters.indexExists('lookup')) { - await database.bucket_parameters.dropIndex('lookup1'); - } - } finally { - await database.client.close(); - } -}; diff --git a/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts b/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts deleted file mode 100644 index be568408..00000000 --- a/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +++ /dev/null @@ -1,34 +0,0 @@ -import * as storage from '../../../storage/storage-index.js'; -import * as utils from '../../../util/util-index.js'; - -export const up = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); - const db = storage.createPowerSyncMongo(config.storage); - - try { - await db.write_checkpoints.createIndex( - { - user_id: 1 - }, - { name: 'user_id' } - ); - } finally { - await db.client.close(); - } -}; - -export const down = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); - - const db = storage.createPowerSyncMongo(config.storage); - - try { - if (await db.write_checkpoints.indexExists('user_id')) { - await db.write_checkpoints.dropIndex('user_id'); - } - } finally { - await db.client.close(); - } -}; diff --git a/packages/service-core/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts b/packages/service-core/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts deleted file mode 100644 index 2bac37fc..00000000 --- a/packages/service-core/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts +++ /dev/null @@ -1,37 +0,0 @@ -import * as storage from '../../../storage/storage-index.js'; -import * as utils from '../../../util/util-index.js'; - -const INDEX_NAME = 'user_sync_rule_unique'; - -export const up = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); - const db = storage.createPowerSyncMongo(config.storage); - - try { - await db.custom_write_checkpoints.createIndex( - { - user_id: 1, - sync_rules_id: 1 - }, - { name: INDEX_NAME, unique: true } - ); - } finally { - await db.client.close(); - } -}; - -export const down = async (context: utils.MigrationContext) => { - const { runner_config } = context; - const config = await utils.loadConfig(runner_config); - - const db = storage.createPowerSyncMongo(config.storage); - - try { - if (await db.custom_write_checkpoints.indexExists(INDEX_NAME)) { - await db.custom_write_checkpoints.dropIndex(INDEX_NAME); - } - } finally { - await db.client.close(); - } -}; diff --git a/packages/service-core/src/migrations/definitions.ts b/packages/service-core/src/migrations/definitions.ts deleted file mode 100644 index 406a9fa0..00000000 --- a/packages/service-core/src/migrations/definitions.ts +++ /dev/null @@ -1,21 +0,0 @@ -export type Migration = { - name: string; - up: () => Promise; - down: () => Promise; -}; - -export enum Direction { - Up = 'up', - Down = 'down' -} - -export type ExecutedMigration = { - name: string; - direction: Direction; - timestamp: Date; -}; - -export type MigrationState = { - last_run: string; - log: ExecutedMigration[]; -}; diff --git a/packages/service-core/src/migrations/ensure-automatic-migrations.ts b/packages/service-core/src/migrations/ensure-automatic-migrations.ts new file mode 100644 index 00000000..ea03acdf --- /dev/null +++ b/packages/service-core/src/migrations/ensure-automatic-migrations.ts @@ -0,0 +1,15 @@ +import * as framework from '@powersync/lib-services-framework'; +import * as system from '../system/system-index.js'; + +export const ensureAutomaticMigrations = async (options: { serviceContext: system.ServiceContext }) => { + const { serviceContext } = options; + if (serviceContext.configuration.migrations?.disable_auto_migration) { + return; + } + await serviceContext.migrations.migrate({ + direction: framework.migrations.Direction.Up, + migrationContext: { + service_context: serviceContext + } + }); +}; diff --git a/packages/service-core/src/migrations/executor.ts b/packages/service-core/src/migrations/executor.ts deleted file mode 100644 index edff07b4..00000000 --- a/packages/service-core/src/migrations/executor.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { logger } from '@powersync/lib-services-framework'; -import * as defs from './definitions.js'; -import { MigrationStore } from './store/migration-store.js'; - -type ExecuteParams = { - migrations: defs.Migration[]; - state?: defs.MigrationState; - - direction: defs.Direction; - count?: number; -}; - -export async function* execute(params: ExecuteParams): AsyncGenerator { - let migrations = [...params.migrations]; - if (params.direction === defs.Direction.Down) { - migrations = migrations.reverse(); - } - - let index = 0; - - if (params.state) { - // Find the index of the last run - index = migrations.findIndex((migration) => { - return migration.name === params.state!.last_run; - }); - - if (index === -1) { - throw new Error(`The last run migration ${params.state?.last_run} was not found in the given set of migrations`); - } - - // If we are migrating down then we want to include the last run migration, otherwise we want to start at the next one - if (params.direction === defs.Direction.Up) { - index += 1; - } - } - - migrations = migrations.slice(index); - - let i = 0; - for (const migration of migrations) { - if (params.count && params.count === i) { - return; - } - - logger.info(`Executing ${migration.name} (${params.direction})`); - try { - switch (params.direction) { - case defs.Direction.Up: { - await migration.up(); - break; - } - case defs.Direction.Down: { - await migration.down(); - break; - } - } - logger.debug(`Success`); - } catch (err) { - logger.error(`Failed`, err); - process.exit(1); - } - - yield { - name: migration.name, - direction: params.direction, - timestamp: new Date() - }; - - i++; - } -} - -type WriteLogsParams = { - store: MigrationStore; - state?: defs.MigrationState; - log_stream: Iterable | AsyncIterable; -}; -export const writeLogsToStore = async (params: WriteLogsParams): Promise => { - const log = [...(params.state?.log || [])]; - for await (const migration of params.log_stream) { - log.push(migration); - await params.store.save({ - last_run: migration.name, - log: log - }); - } -}; diff --git a/packages/service-core/src/migrations/migrations-index.ts b/packages/service-core/src/migrations/migrations-index.ts index ae77adab..bc7b7720 100644 --- a/packages/service-core/src/migrations/migrations-index.ts +++ b/packages/service-core/src/migrations/migrations-index.ts @@ -1,3 +1,2 @@ -export * from './definitions.js'; -export * from './executor.js'; -export * from './migrations.js'; +export * from './ensure-automatic-migrations.js'; +export * from './PowerSyncMigrationManager.js'; diff --git a/packages/service-core/src/migrations/migrations.ts b/packages/service-core/src/migrations/migrations.ts deleted file mode 100644 index d2abdc6e..00000000 --- a/packages/service-core/src/migrations/migrations.ts +++ /dev/null @@ -1,142 +0,0 @@ -import * as fs from 'fs/promises'; -import * as path from 'path'; -import { fileURLToPath } from 'url'; - -import { logger } from '@powersync/lib-services-framework'; -import * as db from '../db/db-index.js'; -import * as locks from '../locks/locks-index.js'; -import * as util from '../util/util-index.js'; -import { Direction } from './definitions.js'; -import { execute, writeLogsToStore } from './executor.js'; -import { createMongoMigrationStore } from './store/migration-store.js'; - -const DEFAULT_MONGO_LOCK_COLLECTION = 'locks'; -const MONGO_LOCK_PROCESS = 'migrations'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); - -const MIGRATIONS_DIR = path.join(__dirname, '/db/migrations'); - -export type MigrationOptions = { - direction: Direction; - runner_config: util.RunnerConfig; -}; - -export type AutomaticMigrationParams = { - config: util.ResolvedPowerSyncConfig; - runner_config: util.RunnerConfig; -}; - -/** - * Loads migrations and injects a custom context for loading the specified - * runner configuration. - */ -const loadMigrations = async (dir: string, runnerConfig: util.RunnerConfig) => { - const files = await fs.readdir(dir); - const migrations = files.filter((file) => { - return path.extname(file) === '.js'; - }); - - const context: util.MigrationContext = { - runner_config: runnerConfig - }; - - return await Promise.all( - migrations.map(async (migration) => { - const module = await import(path.resolve(dir, migration)); - return { - name: path.basename(migration).replace(path.extname(migration), ''), - up: () => module.up(context), - down: () => module.down(context) - }; - }) - ); -}; - -/** - * Runs migration scripts exclusively using Mongo locks - */ -export const migrate = async (options: MigrationOptions) => { - const { direction, runner_config } = options; - - const config = await util.loadConfig(runner_config); - const { storage } = config; - /** - * Try and get Mongo from config file. - * But this might not be available in Journey Micro as we use the standard Mongo. - */ - - const client = db.mongo.createMongoClient(storage); - logger.info('Connecting to MongoDB'); - await client.connect(); - - const clientDB = client.db(storage.database); - const collection = clientDB.collection(DEFAULT_MONGO_LOCK_COLLECTION); - - const manager = locks.createMongoLockManager(collection, { - name: MONGO_LOCK_PROCESS - }); - - // Only one process should execute this at a time. - logger.info('Acquiring lock'); - const lockId = await manager.acquire(); - - if (!lockId) { - throw new Error('Could not acquire lock_id'); - } - - let isReleased = false; - const releaseLock = async () => { - if (isReleased) { - return; - } - await manager.release(lockId); - isReleased = true; - }; - - // For the case where the migration is terminated - process.addListener('beforeExit', releaseLock); - - try { - logger.info('Loading migrations'); - const migrations = await loadMigrations(MIGRATIONS_DIR, runner_config); - - // Use the provided config to connect to Mongo - const store = createMongoMigrationStore(clientDB); - - const state = await store.load(); - - logger.info('Running migrations'); - const logStream = execute({ - direction: direction, - migrations, - state - }); - - await writeLogsToStore({ - log_stream: logStream, - store, - state - }); - } finally { - logger.info('Releasing lock'); - await releaseLock(); - logger.info('Closing database'); - await client.close(true); - process.removeListener('beforeExit', releaseLock); - logger.info('Done with migrations'); - } -}; - -/** - * Ensures automatic migrations are executed - */ -export const ensureAutomaticMigrations = async (params: AutomaticMigrationParams) => { - if (!params.config.migrations?.disable_auto_migration) { - await migrate({ - direction: Direction.Up, - runner_config: params.runner_config - }); - } -}; diff --git a/packages/service-core/src/storage/BucketStorage.ts b/packages/service-core/src/storage/BucketStorage.ts index f9664126..eaa21235 100644 --- a/packages/service-core/src/storage/BucketStorage.ts +++ b/packages/service-core/src/storage/BucketStorage.ts @@ -8,13 +8,49 @@ import { SqliteRow, ToastableSqliteRow } from '@powersync/service-sync-rules'; +import { BSON } from 'bson'; import * as util from '../util/util-index.js'; import { ReplicationEventPayload } from './ReplicationEventPayload.js'; import { SourceEntityDescriptor } from './SourceEntity.js'; import { SourceTable } from './SourceTable.js'; -import { BatchedCustomWriteCheckpointOptions, ReplicaId } from './storage-index.js'; +import { BatchedCustomWriteCheckpointOptions } from './storage-index.js'; import { SyncStorageWriteCheckpointAPI } from './WriteCheckpointAPI.js'; +/** + * Replica id uniquely identifying a row on the source database. + * + * Can be any value serializable to BSON. + * + * If the value is an entire document, the data serialized to a v5 UUID may be a good choice here. + */ +export type ReplicaId = BSON.UUID | BSON.Document | any; + +export enum SyncRuleState { + /** + * New sync rules - needs to be processed (initial replication). + * + * While multiple sets of sync rules _can_ be in PROCESSING, + * it's generally pointless, so we only keep one in that state. + */ + PROCESSING = 'PROCESSING', + + /** + * Sync rule processing is done, and can be used for sync. + * + * Only one set of sync rules should be in ACTIVE state. + */ + ACTIVE = 'ACTIVE', + /** + * This state is used when the sync rules has been replaced, + * and replication is or should be stopped. + */ + STOP = 'STOP', + /** + * After sync rules have been stopped, the data needs to be + * deleted. Once deleted, the state is TERMINATED. + */ + TERMINATED = 'TERMINATED' +} export interface BucketStorageFactoryListener extends DisposableListener { syncStorageCreated: (storage: SyncRulesBucketStorage) => void; replicationEvent: (event: ReplicationEventPayload) => void; @@ -501,3 +537,19 @@ export interface TerminateOptions { */ clearStorage: boolean; } + +/** + * Helper for tests. + * This is not in the `service-core-tests` package in order for storage modules + * to provide relevant factories without requiring `service-core-tests` as a direct dependency. + */ +export interface TestStorageOptions { + /** + * By default, collections are only cleared/ + * Setting this to true will drop the collections completely. + */ + dropAll?: boolean; + + doNotClear?: boolean; +} +export type TestStorageFactory = (options?: TestStorageOptions) => Promise; diff --git a/packages/service-core/src/storage/bson.ts b/packages/service-core/src/storage/bson.ts new file mode 100644 index 00000000..edc08c93 --- /dev/null +++ b/packages/service-core/src/storage/bson.ts @@ -0,0 +1,78 @@ +import * as bson from 'bson'; + +import { SqliteJsonValue } from '@powersync/service-sync-rules'; +import { ReplicaId } from './BucketStorage.js'; + +export const BSON_DESERIALIZE_OPTIONS: bson.DeserializeOptions = { + // use bigint instead of Long + useBigInt64: true +}; + +/** + * Lookup serialization must be number-agnostic. I.e. normalize numbers, instead of preserving numbers. + * @param lookup + */ +export const serializeLookupBuffer = (lookup: SqliteJsonValue[]): Buffer => { + const normalized = lookup.map((value) => { + if (typeof value == 'number' && Number.isInteger(value)) { + return BigInt(value); + } else { + return value; + } + }); + return bson.serialize({ l: normalized }) as Buffer; +}; + +export const serializeLookup = (lookup: SqliteJsonValue[]) => { + return new bson.Binary(serializeLookupBuffer(lookup)); +}; + +/** + * True if this is a bson.UUID. + * + * Works even with multiple copies of the bson package. + */ +export const isUUID = (value: any): value is bson.UUID => { + if (value == null || typeof value != 'object') { + return false; + } + const uuid = value as bson.UUID; + return uuid._bsontype == 'Binary' && uuid.sub_type == bson.Binary.SUBTYPE_UUID; +}; + +export const serializeReplicaId = (id: ReplicaId): Buffer => { + return bson.serialize({ id }) as Buffer; +}; + +export const deserializeReplicaId = (id: Buffer): ReplicaId => { + const deserialized = deserializeBson(id); + return deserialized.id; +}; + +export const deserializeBson = (buffer: Buffer) => { + return bson.deserialize(buffer, BSON_DESERIALIZE_OPTIONS); +}; + +export const serializeBson = (document: any): Buffer => { + return bson.serialize(document) as Buffer; +}; + +/** + * Returns true if two ReplicaId values are the same (serializes to the same BSON value). + */ +export const replicaIdEquals = (a: ReplicaId, b: ReplicaId) => { + if (a === b) { + return true; + } else if (typeof a == 'string' && typeof b == 'string') { + return a == b; + } else if (isUUID(a) && isUUID(b)) { + return a.equals(b); + } else if (a == null && b == null) { + return true; + } else if ((b == null && a != null) || (a == null && b != null)) { + return false; + } else { + // There are many possible primitive values, this covers them all + return serializeReplicaId(a).equals(serializeReplicaId(b) as ArrayBuffer as Uint8Array); + } +}; diff --git a/packages/service-core/src/storage/mongo/MongoStorageProvider.ts b/packages/service-core/src/storage/mongo/MongoStorageProvider.ts deleted file mode 100644 index b4e84f20..00000000 --- a/packages/service-core/src/storage/mongo/MongoStorageProvider.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { logger } from '@powersync/lib-services-framework'; -import * as db from '../../db/db-index.js'; -import { MongoBucketStorage } from '../MongoBucketStorage.js'; -import { ActiveStorage, BucketStorageProvider, GetStorageOptions } from '../StorageProvider.js'; -import { PowerSyncMongo } from './db.js'; - -export class MongoStorageProvider implements BucketStorageProvider { - get type() { - return 'mongodb'; - } - - async getStorage(options: GetStorageOptions): Promise { - const { resolvedConfig } = options; - - const client = db.mongo.createMongoClient(resolvedConfig.storage); - - const database = new PowerSyncMongo(client, { database: resolvedConfig.storage.database }); - - return { - storage: new MongoBucketStorage(database, { - // TODO currently need the entire resolved config due to this - slot_name_prefix: resolvedConfig.slot_name_prefix - }), - shutDown: () => client.close(), - tearDown: () => { - logger.info(`Tearing down storage: ${database.db.namespace}...`); - return database.db.dropDatabase(); - } - } satisfies ActiveStorage; - } -} diff --git a/packages/service-core/src/storage/mongo/config.ts b/packages/service-core/src/storage/mongo/config.ts deleted file mode 100644 index 8ff241e2..00000000 --- a/packages/service-core/src/storage/mongo/config.ts +++ /dev/null @@ -1,40 +0,0 @@ -import * as urijs from 'uri-js'; - -export interface MongoConnectionConfig { - uri: string; - username?: string; - password?: string; - database?: string; -} - -/** - * Validate and normalize connection options. - * - * Returns destructured options. - * - * For use by both storage and mongo module. - */ -export function normalizeMongoConfig(options: MongoConnectionConfig) { - let uri = urijs.parse(options.uri); - - const database = options.database ?? uri.path?.substring(1) ?? ''; - - const userInfo = uri.userinfo?.split(':'); - - const username = options.username ?? userInfo?.[0]; - const password = options.password ?? userInfo?.[1]; - - if (database == '') { - throw new Error(`database required`); - } - - delete uri.userinfo; - - return { - uri: urijs.serialize(uri), - database, - - username, - password - }; -} diff --git a/packages/service-core/src/storage/storage-index.ts b/packages/service-core/src/storage/storage-index.ts index 231bb384..b3e2c15b 100644 --- a/packages/service-core/src/storage/storage-index.ts +++ b/packages/service-core/src/storage/storage-index.ts @@ -1,21 +1,9 @@ +export * from './bson.js'; export * from './BucketStorage.js'; -export * from './MongoBucketStorage.js'; +export * from './ChecksumCache.js'; export * from './ReplicationEventPayload.js'; export * from './SourceEntity.js'; export * from './SourceTable.js'; export * from './StorageEngine.js'; - -export * from './mongo/config.js'; -export * from './mongo/db.js'; -export * from './mongo/models.js'; -export * from './mongo/MongoBucketBatch.js'; -export * from './mongo/MongoIdSequence.js'; -export * from './mongo/MongoPersistedSyncRules.js'; -export * from './mongo/MongoPersistedSyncRulesContent.js'; -export * from './mongo/MongoStorageProvider.js'; -export * from './mongo/MongoSyncBucketStorage.js'; -export * from './mongo/MongoSyncRulesLock.js'; -export * from './mongo/OperationBatch.js'; -export * from './mongo/PersistedBatch.js'; -export * from './mongo/util.js'; +export * from './StorageProvider.js'; export * from './WriteCheckpointAPI.js'; diff --git a/packages/service-core/src/sync/sync.ts b/packages/service-core/src/sync/sync.ts index 8f2f900a..0b257820 100644 --- a/packages/service-core/src/sync/sync.ts +++ b/packages/service-core/src/sync/sync.ts @@ -318,7 +318,9 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator migrationManager[Symbol.asyncDispose]() + }); + this.lifeCycleEngine.withLifecycle(this.storageEngine, { start: (storageEngine) => storageEngine.start(), stop: (storageEngine) => storageEngine.shutDown() }); - - // Mongo storage is available as an option by default TODO: Consider moving this to a Mongo Storage Module - this.storageEngine.registerProvider(new storage.MongoStorageProvider()); } get replicationEngine(): replication.ReplicationEngine | null { @@ -51,6 +60,10 @@ export class ServiceContextContainer implements ServiceContext { return container.getOptional(metrics.Metrics); } + get migrations(): PowerSyncMigrationManager { + return container.getImplementation(framework.ContainerImplementation.MIGRATION_MANAGER); + } + /** * Allows for registering core and generic implementations of services/helpers. * This uses the framework container under the hood. diff --git a/packages/service-core/src/util/config/types.ts b/packages/service-core/src/util/config/types.ts index 99829526..f56dce95 100644 --- a/packages/service-core/src/util/config/types.ts +++ b/packages/service-core/src/util/config/types.ts @@ -30,8 +30,8 @@ export type SyncRulesConfig = { export type ResolvedPowerSyncConfig = { base_config: PowerSyncConfig; - connections?: configFile.DataSourceConfig[]; - storage: configFile.StorageConfig; + connections?: configFile.GenericDataSourceConfig[]; + storage: configFile.GenericStorageConfig; dev: { demo_auth: boolean; demo_password?: string; diff --git a/packages/service-core/src/util/utils.ts b/packages/service-core/src/util/utils.ts index 4829ee92..b34cf749 100644 --- a/packages/service-core/src/util/utils.ts +++ b/packages/service-core/src/util/utils.ts @@ -208,6 +208,17 @@ export function reduceBucket(operations: OplogEntry[]) { return finalState; } +/** + * Flattens string to reduce memory usage (around 320 bytes -> 120 bytes), + * at the cost of some upfront CPU usage. + * + * From: https://github.com/davidmarkclements/flatstr/issues/8 + */ +export function flatstr(s: string) { + s.match(/\n/g); + return s; +} + function rowKey(entry: OplogEntry) { return `${entry.object_type}/${entry.object_id}/${entry.subkey}`; } diff --git a/packages/service-core/test/src/bucket_validation.ts b/packages/service-core/test/src/bucket_validation.ts deleted file mode 100644 index 039c723f..00000000 --- a/packages/service-core/test/src/bucket_validation.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { OplogEntry } from '@/util/protocol-types.js'; -import { reduceBucket } from '@/util/utils.js'; -import { expect } from 'vitest'; - -/** - * Validate this property, as described in /docs/bucket-properties.md: - * - * $r(B_{[..id_n]}) = r(r(B_{[..id_i]}) \cup B_{[id_{i+1}..id_n]}) \;\forall\; i \in [1..n]$ - * - * We test that a client syncing the entire bucket in one go (left side of the equation), - * ends up with the same result as another client syncing up to operation id_i, then sync - * the rest. - */ -export function validateBucket(bucket: OplogEntry[]) { - const r1 = reduceBucket(bucket); - for (let i = 0; i <= bucket.length; i++) { - const r2 = reduceBucket(bucket.slice(0, i + 1)); - const b3 = bucket.slice(i + 1); - const r3 = r2.concat(b3); - const r4 = reduceBucket(r3); - expect(r4).toEqual(r1); - } - - // This is the same check, just implemented differently - validateCompactedBucket(bucket, bucket); -} - -/** - * Validate these properties for a bucket $B$ and its compacted version $B'$,: - * as described in /docs/bucket-properties.md: - * - * 1. $r(B) = r(B')$ - * 2. $r(B_{[..c]}) = r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) \;\forall\; c_i \in B$ - * - * The first one is that the result of syncing the original bucket is the same as - * syncing the compacted bucket. - * - * The second property is that result of syncing the entire original bucket, is the same - * as syncing any partial version of that (up to op $c_i$), and then continue syncing - * using the compacted bucket. - */ -export function validateCompactedBucket(bucket: OplogEntry[], compacted: OplogEntry[]) { - // r(B_{[..c]}) - const r1 = reduceBucket(bucket); - // r(B) = r(B') - expect(reduceBucket(compacted)).toEqual(r1); - - for (let i = 0; i < bucket.length; i++) { - // r(B_{[..c_i]}) - const r2 = reduceBucket(bucket.slice(0, i + 1)); - const c_i = BigInt(bucket[i].op_id); - // B'_{[c_i+1..c]} - const b3 = compacted.filter((op) => BigInt(op.op_id) > c_i); - // r(B_{[..c_i]}) \cup B'_{[c_i+1..c]} - const r3 = r2.concat(b3); - // r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) - const r4 = reduceBucket(r3); - expect(r4).toEqual(r1); - } -} diff --git a/packages/service-core/test/src/env.ts b/packages/service-core/test/src/env.ts index 1c86eae3..eb4f8eef 100644 --- a/packages/service-core/test/src/env.ts +++ b/packages/service-core/test/src/env.ts @@ -1,6 +1,5 @@ import { utils } from '@powersync/lib-services-framework'; export const env = utils.collectEnvironmentVariables({ - MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), CI: utils.type.boolean.default('false') }); diff --git a/packages/service-core/test/src/util.ts b/packages/service-core/test/src/util.ts deleted file mode 100644 index 8b7cce85..00000000 --- a/packages/service-core/test/src/util.ts +++ /dev/null @@ -1,150 +0,0 @@ -import { Metrics } from '@/metrics/Metrics.js'; -import { - BucketStorageFactory, - ParseSyncRulesOptions, - PersistedSyncRulesContent, - StartBatchOptions, - SyncBucketDataBatch -} from '@/storage/BucketStorage.js'; -import { MongoBucketStorage } from '@/storage/MongoBucketStorage.js'; -import { SourceTable } from '@/storage/SourceTable.js'; -import { PowerSyncMongo } from '@/storage/mongo/db.js'; -import { SyncBucketData } from '@/util/protocol-types.js'; -import { getUuidReplicaIdentityBson, hashData } from '@/util/utils.js'; -import { SqlSyncRules } from '@powersync/service-sync-rules'; -import * as bson from 'bson'; -import * as mongo from 'mongodb'; -import { env } from './env.js'; - -// The metrics need to be initialised before they can be used -await Metrics.initialise({ - disable_telemetry_sharing: true, - powersync_instance_id: 'test', - internal_metrics_endpoint: 'unused.for.tests.com' -}); -Metrics.getInstance().resetCounters(); - -export interface StorageOptions { - /** - * By default, collections are only cleared/ - * Setting this to true will drop the collections completely. - */ - dropAll?: boolean; - - doNotClear?: boolean; -} -export type StorageFactory = (options?: StorageOptions) => Promise; - -export const MONGO_STORAGE_FACTORY: StorageFactory = async (options?: StorageOptions) => { - const db = await connectMongo(); - if (options?.dropAll) { - await db.drop(); - } else if (!options?.doNotClear) { - await db.clear(); - } - return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); -}; - -export const ZERO_LSN = '0/0'; - -export const PARSE_OPTIONS: ParseSyncRulesOptions = { - defaultSchema: 'public' -}; - -export const BATCH_OPTIONS: StartBatchOptions = { - ...PARSE_OPTIONS, - zeroLSN: ZERO_LSN, - storeCurrentData: true -}; - -export function testRules(content: string): PersistedSyncRulesContent { - return { - id: 1, - sync_rules_content: content, - slot_name: 'test', - parsed(options) { - return { - id: 1, - sync_rules: SqlSyncRules.fromYaml(content, options), - slot_name: 'test' - }; - }, - lock() { - throw new Error('Not implemented'); - } - }; -} - -export async function connectMongo() { - // Short timeout for tests, to fail fast when the server is not available. - // Slightly longer timeouts for CI, to avoid arbitrary test failures - const client = new mongo.MongoClient(env.MONGO_TEST_URL, { - connectTimeoutMS: env.CI ? 15_000 : 5_000, - socketTimeoutMS: env.CI ? 15_000 : 5_000, - serverSelectionTimeoutMS: env.CI ? 15_000 : 2_500 - }); - return new PowerSyncMongo(client); -} - -export function makeTestTable(name: string, columns?: string[] | undefined) { - const relId = hashData('table', name, (columns ?? ['id']).join(',')); - const id = new bson.ObjectId('6544e3899293153fa7b38331'); - return new SourceTable( - id, - SourceTable.DEFAULT_TAG, - relId, - 'public', - name, - (columns ?? ['id']).map((column) => ({ name: column, type: 'VARCHAR', typeId: 25 })), - true - ); -} - -export function getBatchData(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDataBatch) { - const first = getFirst(batch); - if (first == null) { - return []; - } - return first.data.map((d) => { - return { - op_id: d.op_id, - op: d.op, - object_id: d.object_id, - checksum: d.checksum - }; - }); -} - -export function getBatchMeta(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDataBatch) { - const first = getFirst(batch); - if (first == null) { - return null; - } - return { - has_more: first.has_more, - after: first.after, - next_after: first.next_after - }; -} - -function getFirst(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDataBatch): SyncBucketData | null { - if (!Array.isArray(batch)) { - return batch.batch; - } - if (batch.length == 0) { - return null; - } - let first = batch[0]; - if ((first as SyncBucketDataBatch).batch != null) { - return (first as SyncBucketDataBatch).batch; - } else { - return first as SyncBucketData; - } -} - -/** - * Replica id in the old Postgres format, for backwards-compatible tests. - */ -export function rid(id: string): bson.UUID { - return getUuidReplicaIdentityBson({ id: id }, [{ name: 'id', type: 'VARCHAR', typeId: 25 }]); -} diff --git a/packages/sync-rules/src/types.ts b/packages/sync-rules/src/types.ts index b506c09c..4621ae91 100644 --- a/packages/sync-rules/src/types.ts +++ b/packages/sync-rules/src/types.ts @@ -1,9 +1,9 @@ import { JSONBig, JsonContainer } from '@powersync/service-jsonbig'; +import { ColumnDefinition } from './ExpressionType.js'; import { SourceTableInterface } from './SourceTableInterface.js'; -import { ColumnDefinition, ExpressionType } from './ExpressionType.js'; +import { SyncRulesOptions } from './SqlSyncRules.js'; import { TablePattern } from './TablePattern.js'; import { toSyncRulesParameters } from './utils.js'; -import { SyncRulesOptions } from './SqlSyncRules.js'; export interface SyncRules { evaluateRow(options: EvaluateRowOptions): EvaluationResult[]; diff --git a/packages/types/package.json b/packages/types/package.json index 78c3187d..8271557c 100644 --- a/packages/types/package.json +++ b/packages/types/package.json @@ -13,7 +13,7 @@ "build": "tsc -b" }, "dependencies": { - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uri-js": "^4.4.1" } } diff --git a/packages/types/src/config/PowerSyncConfig.ts b/packages/types/src/config/PowerSyncConfig.ts index 1c4c743f..bd91b879 100644 --- a/packages/types/src/config/PowerSyncConfig.ts +++ b/packages/types/src/config/PowerSyncConfig.ts @@ -102,15 +102,20 @@ export const strictJwks = t.object({ export type StrictJwk = t.Decoded; -export const storageConfig = t.object({ - type: t.literal('mongodb'), - uri: t.string, - database: t.string.optional(), - username: t.string.optional(), - password: t.string.optional() +export const BaseStorageConfig = t.object({ + type: t.string }); -export type StorageConfig = t.Decoded; +/** + * Base configuration for Bucket storage connections. + */ +export type BaseStorageConfig = t.Encoded; + +/** + * This essentially allows any extra fields on this type + */ +export const GenericStorageConfig = BaseStorageConfig.and(t.record(t.any)); +export type GenericStorageConfig = t.Encoded; export const powerSyncConfig = t.object({ replication: t @@ -146,7 +151,7 @@ export const powerSyncConfig = t.object({ }) .optional(), - storage: storageConfig, + storage: GenericStorageConfig, port: portCodec.optional(), sync_rules: t diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1987b9c7..f62b68b2 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -18,8 +18,8 @@ importers: specifier: ^3.2.4 version: 3.2.5 bson: - specifier: ^6.6.0 - version: 6.8.0 + specifier: ^6.8.0 + version: 6.10.1 concurrently: specifier: ^8.2.2 version: 8.2.2 @@ -60,6 +60,24 @@ importers: specifier: ^8.2.3 version: 8.18.0 + libs/lib-mongodb: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../lib-services + bson: + specifier: ^6.8.0 + version: 6.10.1 + mongodb: + specifier: ^6.11.0 + version: 6.11.0(socks@2.8.3) + ts-codec: + specifier: ^1.3.0 + version: 1.3.0 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + libs/lib-services: dependencies: ajv: @@ -69,8 +87,8 @@ importers: specifier: ^1.2.0 version: 1.2.0(ajv@8.16.0) bson: - specifier: ^6.6.0 - version: 6.8.0 + specifier: ^6.8.0 + version: 6.10.1 dotenv: specifier: ^16.4.5 version: 16.4.5 @@ -78,8 +96,8 @@ importers: specifier: ^4.17.21 version: 4.17.21 ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uuid: specifier: ^9.0.1 version: 9.0.1 @@ -102,6 +120,9 @@ importers: modules/module-mongodb: dependencies: + '@powersync/lib-service-mongodb': + specifier: workspace:* + version: link:../../libs/lib-mongodb '@powersync/lib-services-framework': specifier: workspace:* version: link:../../libs/lib-services @@ -117,19 +138,71 @@ importers: '@powersync/service-types': specifier: workspace:* version: link:../../packages/types + bson: + specifier: ^6.8.0 + version: 6.10.1 mongodb: specifier: ^6.11.0 version: 6.11.0(socks@2.8.3) ts-codec: - specifier: ^1.2.2 - version: 1.2.2 - uri-js: - specifier: ^4.4.1 - version: 4.4.1 + specifier: ^1.3.0 + version: 1.3.0 + uuid: + specifier: ^9.0.1 + version: 9.0.1 + devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../module-mongodb-storage + '@types/uuid': + specifier: ^9.0.4 + version: 9.0.8 + + modules/module-mongodb-storage: + dependencies: + '@powersync/lib-service-mongodb': + specifier: workspace:* + version: link:../../libs/lib-mongodb + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + bson: + specifier: ^6.8.0 + version: 6.10.1 + ix: + specifier: ^5.0.0 + version: 5.0.0 + lru-cache: + specifier: ^10.2.2 + version: 10.4.3 + mongodb: + specifier: ^6.11.0 + version: 6.11.0(socks@2.8.3) + ts-codec: + specifier: ^1.3.0 + version: 1.3.0 uuid: specifier: ^9.0.1 version: 9.0.1 devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests '@types/uuid': specifier: ^9.0.4 version: 9.0.8 @@ -164,8 +237,8 @@ importers: specifier: ^7.5.4 version: 7.6.2 ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uri-js: specifier: ^4.4.1 version: 4.4.1 @@ -173,6 +246,12 @@ importers: specifier: ^9.0.1 version: 9.0.1 devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../module-mongodb-storage '@types/async': specifier: ^3.2.24 version: 3.2.24 @@ -210,8 +289,8 @@ importers: specifier: github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 version: https://codeload.github.com/kagis/pgwire/tar.gz/f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uri-js: specifier: ^4.4.1 version: 4.4.1 @@ -219,6 +298,12 @@ importers: specifier: ^9.0.1 version: 9.0.1 devDependencies: + '@powersync/service-core-tests': + specifier: workspace:* + version: link:../../packages/service-core-tests + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../module-mongodb-storage '@types/uuid': specifier: ^9.0.4 version: 9.0.8 @@ -256,8 +341,8 @@ importers: specifier: 1.0.0-alpha.3 version: 1.0.0-alpha.3 ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uuid: specifier: ^9.0.1 version: 9.0.1 @@ -272,8 +357,8 @@ importers: specifier: ~8.2.0 version: 8.2.3 bson: - specifier: ^6.6.0 - version: 6.8.0 + specifier: ^6.8.0 + version: 6.10.1 rsocket-websocket-client: specifier: 1.0.0-alpha.3 version: 1.0.0-alpha.3 @@ -320,8 +405,8 @@ importers: specifier: ^0.5.0 version: 0.5.0 bson: - specifier: ^6.6.0 - version: 6.8.0 + specifier: ^6.8.0 + version: 6.10.1 commander: specifier: ^12.0.0 version: 12.1.0 @@ -343,15 +428,12 @@ importers: lru-cache: specifier: ^10.2.2 version: 10.4.3 - mongodb: - specifier: ^6.11.0 - version: 6.11.0(socks@2.8.3) node-fetch: specifier: ^3.3.2 version: 3.3.2 ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uri-js: specifier: ^4.4.1 version: 4.4.1 @@ -381,6 +463,28 @@ importers: specifier: ^4.5.1 version: 4.5.1 + packages/service-core-tests: + dependencies: + '@powersync/service-core': + specifier: workspace:^ + version: link:../service-core + '@powersync/service-jsonbig': + specifier: workspace:^ + version: link:../jsonbig + '@powersync/service-sync-rules': + specifier: workspace:^ + version: link:../sync-rules + vite-tsconfig-paths: + specifier: ^4.3.2 + version: 4.3.2(typescript@5.6.2)(vite@5.3.3(@types/node@22.5.5)) + vitest: + specifier: ^2.1.1 + version: 2.1.1(@types/node@22.5.5) + devDependencies: + typescript: + specifier: ^5.6.2 + version: 5.6.2 + packages/sync-rules: dependencies: '@powersync/service-jsonbig': @@ -415,8 +519,8 @@ importers: packages/types: dependencies: ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uri-js: specifier: ^4.4.1 version: 4.4.1 @@ -450,6 +554,9 @@ importers: '@powersync/service-module-mongodb': specifier: workspace:* version: link:../modules/module-mongodb + '@powersync/service-module-mongodb-storage': + specifier: workspace:* + version: link:../modules/module-mongodb-storage '@powersync/service-module-mysql': specifier: workspace:* version: link:../modules/module-mysql @@ -472,8 +579,8 @@ importers: specifier: ^0.5.0 version: 0.5.0 bson: - specifier: ^6.6.0 - version: 6.8.0 + specifier: ^6.8.0 + version: 6.10.1 commander: specifier: ^12.0.0 version: 12.1.0 @@ -505,8 +612,8 @@ importers: specifier: github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 version: https://codeload.github.com/kagis/pgwire/tar.gz/f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 ts-codec: - specifier: ^1.2.2 - version: 1.2.2 + specifier: ^1.3.0 + version: 1.3.0 uuid: specifier: ^9.0.1 version: 9.0.1 @@ -554,6 +661,9 @@ importers: '@types/node': specifier: ^22.5.5 version: 22.5.5 + typescript: + specifier: ^5.6.2 + version: 5.6.2 packages: @@ -1591,10 +1701,6 @@ packages: resolution: {integrity: sha512-P92xmHDQjSKPLHqFxefqMxASNq/aWJMEZugpCjf+AF/pgcUpMMQCg7t7+ewko0/u8AapvF3luf/FoehddEK+sA==} engines: {node: '>=16.20.1'} - bson@6.8.0: - resolution: {integrity: sha512-iOJg8pr7wq2tg/zSlCCHMi3hMm5JTOxLTagf3zxhcenHsFp+c6uOs6K7W5UE7A4QIJGtqh/ZovFNMP4mOPJynQ==} - engines: {node: '>=16.20.1'} - buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} @@ -3452,8 +3558,8 @@ packages: resolution: {integrity: sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==} engines: {node: '>= 14.0.0'} - ts-codec@1.2.2: - resolution: {integrity: sha512-dlAsfmeEa3AA+6rvgKLlLb6rqTMthQNH9w+B2FGM5sRu7sTd0GSbmLuW0HAmRlIeyUEZzMXk0yRmP0afribqmg==} + ts-codec@1.3.0: + resolution: {integrity: sha512-OOaGvS0UwjyOychFZwjqSm47K65lzTCSup47RDG30crZr2MGnQCHQ13duAI4OcnzuYITNN6JDdS8RrtB0g204Q==} ts-node-dev@2.0.0: resolution: {integrity: sha512-ywMrhCfH6M75yftYvrvNarLEY+SUXtUvU8/0Z6llrHQVBx12GiFk5sStF8UdfE/yfzk9IAq7O5EEbTQsxlBI8w==} @@ -4640,7 +4746,7 @@ snapshots: '@opentelemetry/semantic-conventions': 1.25.1 '@prisma/instrumentation': 5.16.1 '@sentry/core': 8.17.0 - '@sentry/opentelemetry': 8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1) + '@sentry/opentelemetry': 8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.6.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/semantic-conventions@1.25.1) '@sentry/types': 8.17.0 '@sentry/utils': 8.17.0 optionalDependencies: @@ -4648,7 +4754,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@sentry/opentelemetry@8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)': + '@sentry/opentelemetry@8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.6.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/semantic-conventions@1.25.1)': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) @@ -4991,8 +5097,6 @@ snapshots: bson@6.10.1: {} - bson@6.8.0: {} - buffer-from@1.1.2: {} buffer@5.7.1: @@ -6949,7 +7053,7 @@ snapshots: triple-beam@1.4.1: {} - ts-codec@1.2.2: {} + ts-codec@1.3.0: {} ts-node-dev@2.0.0(@types/node@22.5.5)(typescript@5.6.2): dependencies: diff --git a/service/Dockerfile b/service/Dockerfile index 98a89332..4acd8d35 100644 --- a/service/Dockerfile +++ b/service/Dockerfile @@ -1,10 +1,8 @@ -FROM node:20.12.2 as builder +FROM node:20.12.2 AS builder WORKDIR /app RUN npm i -g pnpm@9 -ARG GITHUB_TOKEN - COPY package.json pnpm-workspace.yaml pnpm-lock.yaml tsconfig.base.json ./ COPY service/package.json service/tsconfig.json service/ @@ -16,9 +14,11 @@ COPY packages/sync-rules/package.json packages/sync-rules/tsconfig.json packages COPY packages/types/package.json packages/types/tsconfig.json packages/types/ COPY libs/lib-services/package.json libs/lib-services/tsconfig.json libs/lib-services/ +COPY libs/lib-mongodb/package.json libs/lib-mongodb/tsconfig.json libs/lib-mongodb/ COPY modules/module-postgres/package.json modules/module-postgres/tsconfig.json modules/module-postgres/ COPY modules/module-mongodb/package.json modules/module-mongodb/tsconfig.json modules/module-mongodb/ +COPY modules/module-mongodb-storage/package.json modules/module-mongodb-storage/tsconfig.json modules/module-mongodb-storage/ COPY modules/module-mysql/package.json modules/module-mysql/tsconfig.json modules/module-mysql/ RUN pnpm install --frozen-lockfile @@ -35,9 +35,11 @@ COPY packages/rsocket-router/src packages/rsocket-router/src/ COPY packages/types/src packages/types/src/ COPY libs/lib-services/src libs/lib-services/src/ +COPY libs/lib-mongodb/src libs/lib-mongodb/src/ COPY modules/module-postgres/src modules/module-postgres/src/ COPY modules/module-mongodb/src modules/module-mongodb/src/ +COPY modules/module-mongodb-storage/src modules/module-mongodb-storage/src/ COPY modules/module-mysql/src modules/module-mysql/src/ RUN pnpm build:production && \ @@ -46,10 +48,9 @@ RUN pnpm build:production && \ # === TESTER === -FROM node:20.12.2-slim as tester +FROM node:20.12.2-slim AS tester WORKDIR /app -ARG GITHUB_TOKEN COPY --from=builder /app/ ./ diff --git a/service/package.json b/service/package.json index 652674ee..780150a1 100644 --- a/service/package.json +++ b/service/package.json @@ -18,6 +18,7 @@ "@powersync/lib-services-framework": "workspace:*", "@powersync/service-module-postgres": "workspace:*", "@powersync/service-module-mongodb": "workspace:*", + "@powersync/service-module-mongodb-storage": "workspace:*", "@powersync/service-module-mysql": "workspace:*", "@powersync/service-jpgwire": "workspace:*", "@powersync/service-jsonbig": "workspace:*", @@ -26,7 +27,7 @@ "@powersync/service-types": "workspace:*", "@sentry/node": "^8.9.2", "async-mutex": "^0.5.0", - "bson": "^6.6.0", + "bson": "^6.8.0", "commander": "^12.0.0", "cors": "^2.8.5", "fastify": "4.23.2", @@ -37,7 +38,7 @@ "mongodb": "^6.11.0", "node-fetch": "^3.3.2", "pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87", - "ts-codec": "^1.2.2", + "ts-codec": "^1.3.0", "uuid": "^9.0.1", "winston": "^3.13.0", "yaml": "^2.3.2" diff --git a/service/src/entry.ts b/service/src/entry.ts index b4add385..e133c04e 100644 --- a/service/src/entry.ts +++ b/service/src/entry.ts @@ -1,20 +1,21 @@ import { container, ContainerImplementation } from '@powersync/lib-services-framework'; import * as core from '@powersync/service-core'; +import { MongoModule } from '@powersync/service-module-mongodb'; +import { MongoStorageModule } from '@powersync/service-module-mongodb-storage'; +import { MySQLModule } from '@powersync/service-module-mysql'; +import { PostgresModule } from '@powersync/service-module-postgres'; import { startServer } from './runners/server.js'; import { startStreamRunner } from './runners/stream-worker.js'; import { startUnifiedRunner } from './runners/unified-runner.js'; import { createSentryReporter } from './util/alerting.js'; -import { PostgresModule } from '@powersync/service-module-postgres'; -import { MySQLModule } from '@powersync/service-module-mysql'; -import { MongoModule } from '@powersync/service-module-mongodb'; // Initialize framework components container.registerDefaults(); container.register(ContainerImplementation.REPORTER, createSentryReporter()); const moduleManager = new core.modules.ModuleManager(); -moduleManager.register([new PostgresModule(), new MySQLModule(), new MongoModule()]); +moduleManager.register([new PostgresModule(), new MySQLModule(), new MongoModule(), new MongoStorageModule()]); // This is a bit of a hack. Commands such as the teardown command or even migrations might // want access to the ModuleManager in order to use modules container.register(core.ModuleManager, moduleManager); diff --git a/service/src/runners/stream-worker.ts b/service/src/runners/stream-worker.ts index b3c69d2a..323d7117 100644 --- a/service/src/runners/stream-worker.ts +++ b/service/src/runners/stream-worker.ts @@ -21,11 +21,6 @@ export const startStreamRunner = async (runnerConfig: core.utils.RunnerConfig) = const config = await core.utils.loadConfig(runnerConfig); - await core.migrations.ensureAutomaticMigrations({ - config, - runner_config: runnerConfig - }); - // Self hosted version allows for automatic migrations const serviceContext = new core.system.ServiceContextContainer(config); @@ -39,6 +34,11 @@ export const startStreamRunner = async (runnerConfig: core.utils.RunnerConfig) = const moduleManager = container.getImplementation(core.modules.ModuleManager); await moduleManager.initialize(serviceContext); + // Ensure automatic migrations + await core.migrations.ensureAutomaticMigrations({ + serviceContext + }); + logger.info('Starting system'); await serviceContext.lifeCycleEngine.start(); logger.info('System started'); diff --git a/service/src/runners/unified-runner.ts b/service/src/runners/unified-runner.ts index 4997d893..5e505e2f 100644 --- a/service/src/runners/unified-runner.ts +++ b/service/src/runners/unified-runner.ts @@ -13,11 +13,6 @@ export const startUnifiedRunner = async (runnerConfig: core.utils.RunnerConfig) const config = await core.utils.loadConfig(runnerConfig); - await core.migrations.ensureAutomaticMigrations({ - config, - runner_config: runnerConfig - }); - const serviceContext = new core.system.ServiceContextContainer(config); registerServerServices(serviceContext); @@ -31,6 +26,10 @@ export const startUnifiedRunner = async (runnerConfig: core.utils.RunnerConfig) const moduleManager = container.getImplementation(core.modules.ModuleManager); await moduleManager.initialize(serviceContext); + await core.migrations.ensureAutomaticMigrations({ + serviceContext + }); + logger.info('Starting service...'); await serviceContext.lifeCycleEngine.start(); logger.info('Service started'); diff --git a/service/tsconfig.json b/service/tsconfig.json index 1576a67c..574744f7 100644 --- a/service/tsconfig.json +++ b/service/tsconfig.json @@ -36,6 +36,9 @@ { "path": "../modules/module-mongodb" }, + { + "path": "../modules/module-mongodb-storage" + }, { "path": "../modules/module-mysql" } diff --git a/test-client/package.json b/test-client/package.json index e30f7320..1cbeed2d 100644 --- a/test-client/package.json +++ b/test-client/package.json @@ -20,6 +20,7 @@ "yaml": "^2.5.0" }, "devDependencies": { - "@types/node": "^22.5.5" + "@types/node": "^22.5.5", + "typescript": "^5.6.2" } } diff --git a/tsconfig.json b/tsconfig.json index 542ecf4b..4eaac015 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -25,9 +25,15 @@ { "path": "./modules/module-mongodb" }, + { + "path": "./modules/module-mongodb-storage" + }, { "path": "./libs/lib-services" }, + { + "path": "./libs/lib-mongodb" + }, { "path": "./packages/types" },