diff --git a/packages/trailbase-db-collection/.gitignore b/packages/trailbase-db-collection/.gitignore new file mode 100644 index 000000000..db877dc07 --- /dev/null +++ b/packages/trailbase-db-collection/.gitignore @@ -0,0 +1,2 @@ +# E2E test temp data directories +.trailbase-e2e-data-* diff --git a/packages/trailbase-db-collection/docker/Dockerfile b/packages/trailbase-db-collection/docker/Dockerfile new file mode 100644 index 000000000..55f8de759 --- /dev/null +++ b/packages/trailbase-db-collection/docker/Dockerfile @@ -0,0 +1,17 @@ +FROM trailbase/trailbase:latest + +USER root +WORKDIR /app + +RUN mkdir -p /app/traildepot/migrations + +COPY config.textproto /app/traildepot/config.textproto +COPY init.sql /app/traildepot/migrations/V10__init.sql + +RUN chown -R trailbase:trailbase /app/traildepot && \ + chmod -R 777 /app/traildepot + +EXPOSE 4000 + +USER trailbase +CMD ["/app/trail", "--data-dir", "/app/traildepot", "run", "--address", "0.0.0.0:4000", "--dev"] diff --git a/packages/trailbase-db-collection/docker/config.textproto b/packages/trailbase-db-collection/docker/config.textproto new file mode 100644 index 000000000..230a61465 --- /dev/null +++ b/packages/trailbase-db-collection/docker/config.textproto @@ -0,0 +1,26 @@ +email {} +server { + application_name: "TrailBase E2E" +} +auth {} +jobs {} +record_apis: [ + { + name: "users_e2e" + table_name: "users_e2e" + acl_world: [CREATE, READ, UPDATE, DELETE] + enable_subscriptions: true + }, + { + name: "posts_e2e" + table_name: "posts_e2e" + acl_world: [CREATE, READ, UPDATE, DELETE] + enable_subscriptions: true + }, + { + name: "comments_e2e" + table_name: "comments_e2e" + acl_world: [CREATE, READ, UPDATE, DELETE] + enable_subscriptions: true + } +] diff --git a/packages/trailbase-db-collection/docker/docker-compose.yml b/packages/trailbase-db-collection/docker/docker-compose.yml new file mode 100644 index 000000000..b6e204214 --- /dev/null +++ b/packages/trailbase-db-collection/docker/docker-compose.yml @@ -0,0 +1,15 @@ +version: '3.8' + +services: + trailbase: + build: + context: . + dockerfile: Dockerfile + ports: + - '4000:4000' + healthcheck: + test: ['CMD', 'curl', '-f', 'http://localhost:4000/api/healthz'] + interval: 5s + timeout: 5s + retries: 5 + start_period: 10s diff --git a/packages/trailbase-db-collection/docker/init.sql b/packages/trailbase-db-collection/docker/init.sql new file mode 100644 index 000000000..28b8158cc --- /dev/null +++ b/packages/trailbase-db-collection/docker/init.sql @@ -0,0 +1,35 @@ +-- E2E Test Tables for TrailBase +-- Using BLOB UUID PRIMARY KEY with auto-generated uuid_v7() +-- Using is_uuid() check to accept both v4 and v7 UUIDs +-- Using camelCase column names to match @tanstack/db-collection-e2e types + +CREATE TABLE "users_e2e" ( + "id" BLOB PRIMARY KEY NOT NULL CHECK(is_uuid(id)) DEFAULT (uuid_v7()), + "name" TEXT NOT NULL, + "email" TEXT, + "age" INTEGER NOT NULL, + "isActive" INTEGER NOT NULL DEFAULT 1, + "createdAt" TEXT NOT NULL, + "metadata" TEXT, + "deletedAt" TEXT +) STRICT; + +CREATE TABLE "posts_e2e" ( + "id" BLOB PRIMARY KEY NOT NULL CHECK(is_uuid(id)) DEFAULT (uuid_v7()), + "userId" TEXT NOT NULL, + "title" TEXT NOT NULL, + "content" TEXT, + "viewCount" INTEGER NOT NULL DEFAULT 0, + "largeViewCount" TEXT NOT NULL, + "publishedAt" TEXT, + "deletedAt" TEXT +) STRICT; + +CREATE TABLE "comments_e2e" ( + "id" BLOB PRIMARY KEY NOT NULL CHECK(is_uuid(id)) DEFAULT (uuid_v7()), + "postId" TEXT NOT NULL, + "userId" TEXT NOT NULL, + "text" TEXT NOT NULL, + "createdAt" TEXT NOT NULL, + "deletedAt" TEXT +) STRICT; diff --git a/packages/trailbase-db-collection/e2e/global-setup.ts b/packages/trailbase-db-collection/e2e/global-setup.ts new file mode 100644 index 000000000..710e8a0dc --- /dev/null +++ b/packages/trailbase-db-collection/e2e/global-setup.ts @@ -0,0 +1,342 @@ +import { execSync, spawn } from 'node:child_process' +import { cpSync, existsSync, mkdirSync, rmSync } from 'node:fs' +import { dirname, resolve as resolvePath } from 'node:path' +import { fileURLToPath } from 'node:url' +import type { ChildProcess } from 'node:child_process' +import type { GlobalSetupContext } from 'vitest/node' + +const __dirname = dirname(fileURLToPath(import.meta.url)) +const PACKAGE_DIR = resolvePath(__dirname, '..') +const REPO_ROOT = resolvePath(PACKAGE_DIR, '../..') +const DOCKER_DIR = resolvePath(PACKAGE_DIR, 'docker') +// Check multiple possible binary locations +const BINARY_PATHS = [ + resolvePath(PACKAGE_DIR, 'testing-bin-linux', 'trail'), + resolvePath(REPO_ROOT, 'packages/trailbase/test-linux-bin', 'trail'), +] +const CONTAINER_NAME = 'trailbase-e2e-test' +const TRAILBASE_PORT = process.env.TRAILBASE_PORT ?? '4000' +const TRAILBASE_URL = + process.env.TRAILBASE_URL ?? `http://localhost:${TRAILBASE_PORT}` + +// Module augmentation for type-safe context injection +declare module 'vitest' { + export interface ProvidedContext { + baseUrl: string + } +} + +let serverProcess: ChildProcess | null = null +let startedServer = false +let usedMethod: 'binary' | 'docker' | null = null +let tempDataDir: string | null = null +let binaryPath: string | null = null + +/** + * Find the TrailBase binary from available paths + */ +function findBinaryPath(): string | null { + for (const path of BINARY_PATHS) { + if (existsSync(path)) { + return path + } + } + return null +} + +/** + * Check if Docker is available + */ +function isDockerAvailable(): boolean { + try { + execSync('docker --version', { stdio: 'pipe' }) + return true + } catch { + return false + } +} + +/** + * Check if TrailBase is already running + */ +async function isTrailBaseRunning(url: string): Promise { + try { + const res = await fetch(`${url}/api/healthz`) + return res.ok + } catch { + try { + const res = await fetch(url) + return res.ok || res.status === 404 + } catch { + return false + } + } +} + +/** + * Start TrailBase using the local binary + */ +function startBinaryServer(): ChildProcess { + if (!binaryPath) { + throw new Error('Binary path not set') + } + console.log(`๐Ÿš€ Starting TrailBase using local binary at ${binaryPath}...`) + + // Create a temp data directory for this test run + tempDataDir = resolvePath(PACKAGE_DIR, `.trailbase-e2e-data-${Date.now()}`) + mkdirSync(tempDataDir, { recursive: true }) + mkdirSync(resolvePath(tempDataDir, 'migrations'), { recursive: true }) + + // Copy config and migrations from docker dir + cpSync( + resolvePath(DOCKER_DIR, 'config.textproto'), + resolvePath(tempDataDir, 'config.textproto'), + ) + cpSync( + resolvePath(DOCKER_DIR, 'init.sql'), + resolvePath(tempDataDir, 'migrations', 'V10__init.sql'), + ) + + const proc = spawn( + binaryPath, + [ + '--data-dir', + tempDataDir, + 'run', + '--address', + `0.0.0.0:${TRAILBASE_PORT}`, + '--dev', + ], + { + stdio: ['ignore', 'pipe', 'pipe'], + cwd: PACKAGE_DIR, + }, + ) + + proc.stdout.on('data', (data) => { + console.log(`[trailbase] ${data.toString().trim()}`) + }) + + proc.stderr.on('data', (data) => { + console.error(`[trailbase] ${data.toString().trim()}`) + }) + + proc.on('error', (error) => { + console.error('Failed to start TrailBase binary:', error) + }) + + return proc +} + +/** + * Build the TrailBase Docker image + */ +function buildDockerImage(): void { + console.log('๐Ÿ”จ Building TrailBase Docker image...') + execSync(`docker build -t trailbase-e2e ${DOCKER_DIR}`, { + stdio: 'inherit', + }) + console.log('โœ“ Docker image built') +} + +/** + * Stop and remove any existing container with the same name + */ +function cleanupExistingContainer(): void { + try { + execSync(`docker stop ${CONTAINER_NAME} 2>/dev/null || true`, { + stdio: 'pipe', + }) + execSync(`docker rm ${CONTAINER_NAME} 2>/dev/null || true`, { + stdio: 'pipe', + }) + } catch { + // Ignore errors - container might not exist + } +} + +/** + * Start the TrailBase Docker container + */ +function startDockerContainer(): ChildProcess { + console.log('๐Ÿš€ Starting TrailBase container...') + + const proc = spawn( + 'docker', + [ + 'run', + '--rm', + '--name', + CONTAINER_NAME, + '-p', + `${TRAILBASE_PORT}:4000`, + 'trailbase-e2e', + ], + { + stdio: ['ignore', 'pipe', 'pipe'], + }, + ) + + proc.stdout.on('data', (data) => { + console.log(`[trailbase] ${data.toString().trim()}`) + }) + + proc.stderr.on('data', (data) => { + console.error(`[trailbase] ${data.toString().trim()}`) + }) + + proc.on('error', (error) => { + console.error('Failed to start TrailBase container:', error) + }) + + return proc +} + +/** + * Stop the TrailBase Docker container + */ +function stopDockerContainer(): void { + console.log('๐Ÿ›‘ Stopping TrailBase container...') + try { + execSync(`docker stop ${CONTAINER_NAME}`, { stdio: 'pipe' }) + console.log('โœ“ Container stopped') + } catch { + // Container might have already stopped + } +} + +/** + * Stop the TrailBase binary server + */ +function stopBinaryServer(): void { + console.log('๐Ÿ›‘ Stopping TrailBase binary...') + if (serverProcess) { + serverProcess.kill('SIGTERM') + console.log('โœ“ Binary stopped') + } + // Clean up temp data directory + if (tempDataDir && existsSync(tempDataDir)) { + try { + rmSync(tempDataDir, { recursive: true, force: true }) + console.log('โœ“ Temp data cleaned up') + } catch { + // Ignore cleanup errors + } + } +} + +/** + * Wait for TrailBase server to be ready + */ +async function waitForTrailBase(url: string): Promise { + return new Promise((resolve, reject) => { + const timeout = setTimeout(() => { + reject( + new Error(`Timed out waiting for TrailBase to be active at ${url}`), + ) + }, 60000) // 60 seconds timeout for startup + + const check = async (): Promise => { + try { + // Try the healthz endpoint first, then fall back to root + let res = await fetch(`${url}/api/healthz`) + if (res.ok) { + clearTimeout(timeout) + return resolve() + } + // Try root as fallback + res = await fetch(url) + if (res.ok || res.status === 404) { + // 404 means server is up but no route at root + clearTimeout(timeout) + return resolve() + } + setTimeout(() => void check(), 500) + } catch { + setTimeout(() => void check(), 500) + } + } + + void check() + }) +} + +/** + * Global setup for TrailBase e2e test suite + * + * This runs once before all tests and: + * 1. Checks if TrailBase is already running (uses external instance) + * 2. If not, tries to start using local binary (preferred) + * 3. Falls back to Docker if binary not available + * 4. Waits for TrailBase server to be healthy + * 5. Provides context to all tests + * 6. Returns cleanup function + */ +export default async function ({ provide }: GlobalSetupContext) { + console.log('๐Ÿš€ Starting TrailBase e2e test suite global setup...') + + // Check if TrailBase is already running + const alreadyRunning = await isTrailBaseRunning(TRAILBASE_URL) + + if (alreadyRunning) { + console.log(`โœ“ TrailBase already running at ${TRAILBASE_URL}`) + } else { + // Try binary first (preferred for CI/local testing without Docker) + binaryPath = findBinaryPath() + if (binaryPath) { + console.log(`โœ“ TrailBase binary found at ${binaryPath}`) + serverProcess = startBinaryServer() + startedServer = true + usedMethod = 'binary' + } else if (isDockerAvailable()) { + console.log('โ„น Binary not found, using Docker...') + // Clean up any existing container + cleanupExistingContainer() + // Build Docker image + buildDockerImage() + // Start container + serverProcess = startDockerContainer() + startedServer = true + usedMethod = 'docker' + } else { + throw new Error( + `TrailBase is not running at ${TRAILBASE_URL} and no startup method is available.\n` + + `Please either:\n` + + ` 1. Start TrailBase manually at ${TRAILBASE_URL}\n` + + ` 2. Place the TrailBase binary at one of:\n` + + BINARY_PATHS.map((p) => ` - ${p}`).join('\n') + + `\n 3. Install Docker and run the tests again\n` + + `\nTo download TrailBase binary:\n` + + ` curl -sSL https://trailbase.io/install.sh | bash\n` + + `\nTo start TrailBase with Docker manually:\n` + + ` cd packages/trailbase-db-collection/docker\n` + + ` docker-compose up -d`, + ) + } + + // Wait for TrailBase server to be ready + console.log(`โณ Waiting for TrailBase at ${TRAILBASE_URL}...`) + await waitForTrailBase(TRAILBASE_URL) + console.log('โœ“ TrailBase is ready') + } + + // Provide context values to all tests + provide('baseUrl', TRAILBASE_URL) + + console.log('โœ… Global setup complete\n') + + // Return cleanup function (runs once after all tests) + return () => { + console.log('\n๐Ÿงน Running global teardown...') + if (startedServer) { + if (usedMethod === 'docker') { + stopDockerContainer() + } else if (usedMethod === 'binary') { + stopBinaryServer() + } + serverProcess?.kill() + serverProcess = null + } + console.log('โœ… Global teardown complete') + } +} diff --git a/packages/trailbase-db-collection/e2e/trailbase.e2e.test.ts b/packages/trailbase-db-collection/e2e/trailbase.e2e.test.ts new file mode 100644 index 000000000..e472e4617 --- /dev/null +++ b/packages/trailbase-db-collection/e2e/trailbase.e2e.test.ts @@ -0,0 +1,707 @@ +/** + * TrailBase Collection E2E Tests + * + * End-to-end tests using actual TrailBase server with sync. + * Uses shared test suites from @tanstack/db-collection-e2e. + */ + +import { afterAll, afterEach, beforeAll, describe, inject } from 'vitest' +import { createCollection } from '@tanstack/db' +import { initClient } from 'trailbase' +import { trailBaseCollectionOptions } from '../src/trailbase' +import { + createCollationTestSuite, + createDeduplicationTestSuite, + createJoinsTestSuite, + createLiveUpdatesTestSuite, + createMutationsTestSuite, + createPaginationTestSuite, + createPredicatesTestSuite, + createProgressiveTestSuite, + generateSeedData, +} from '../../db-collection-e2e/src/index' +import { waitFor } from '../../db-collection-e2e/src/utils/helpers' +import type { TrailBaseSyncMode } from '../src/trailbase' +import type { + Comment, + E2ETestConfig, + Post, + User, +} from '../../db-collection-e2e/src/types' +import type { Collection } from '@tanstack/db' + +declare module 'vitest' { + export interface ProvidedContext { + baseUrl: string + } +} + +/** + * Decode base64-encoded BLOB UUID to standard UUID string format + * TrailBase stores UUIDs as BLOBs and returns them as base64 + */ +function base64ToUuid(base64: string): string { + // Decode base64 to bytes + const binaryString = atob(base64) + const bytes = new Uint8Array(binaryString.length) + for (let i = 0; i < binaryString.length; i++) { + bytes[i] = binaryString.charCodeAt(i) + } + + // Convert bytes to UUID string format + const hex = Array.from(bytes) + .map((b) => b.toString(16).padStart(2, '0')) + .join('') + + return `${hex.slice(0, 8)}-${hex.slice(8, 12)}-${hex.slice(12, 16)}-${hex.slice(16, 20)}-${hex.slice(20, 32)}` +} + +/** + * Encode UUID string to URL-safe base64 format for TrailBase API calls + * TrailBase returns standard base64 from create, but API URLs need URL-safe base64 + */ +function uuidToBase64(uuid: string): string { + // Remove dashes and convert hex to bytes + const hex = uuid.replace(/-/g, '') + const bytes = new Uint8Array(16) + for (let i = 0; i < 16; i++) { + bytes[i] = parseInt(hex.slice(i * 2, i * 2 + 2), 16) + } + + // Convert bytes to URL-safe base64 (replace + with - and / with _) + let binaryString = '' + for (const byte of bytes) { + binaryString += String.fromCharCode(byte) + } + return btoa(binaryString).replace(/\+/g, '-').replace(/\//g, '_') +} + +/** + * Parse TrailBase ID response - handles various formats: + * - URL-safe base64 encoded UUID blob + * - Standard base64 encoded UUID blob + * - Plain UUID string + * - Integer (for backwards compatibility) + */ +function parseTrailBaseId(rawId: unknown): string { + const idStr = String(rawId) + + // Check if it's already a UUID string format + if ( + /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test( + idStr, + ) + ) { + return idStr + } + + // Check if it's an integer + if (/^\d+$/.test(idStr)) { + return idStr + } + + // Try URL-safe base64 decoding (with - and _ instead of + and /) + try { + // Convert URL-safe base64 to standard base64 + const standardBase64 = idStr.replace(/-/g, '+').replace(/_/g, '/') + // Add padding if needed + const padded = + standardBase64 + '=='.slice(0, (4 - (standardBase64.length % 4)) % 4) + return base64ToUuid(padded) + } catch { + // If that fails, try standard base64 + try { + return base64ToUuid(idStr) + } catch { + // If all else fails, return as-is + console.warn(`Could not parse TrailBase ID: ${idStr}`) + return idStr + } + } +} + +/** + * TrailBase record types matching the camelCase schema + * Column names match the app types, only types differ for storage + */ +interface UserRecord { + id: string // base64 encoded UUID + name: string + email: string | null + age: number + isActive: number // SQLite INTEGER (0/1) for boolean + createdAt: string // ISO date string + metadata: string | null // JSON stored as string + deletedAt: string | null // ISO date string +} + +interface PostRecord { + id: string + userId: string + title: string + content: string | null + viewCount: number + largeViewCount: string // BigInt as string + publishedAt: string | null + deletedAt: string | null +} + +interface CommentRecord { + id: string + postId: string + userId: string + text: string + createdAt: string + deletedAt: string | null +} + +/** + * Parse functions - only transform types that differ between DB and app + */ +const parseUser = (record: UserRecord): User => ({ + id: parseTrailBaseId(record.id), + name: record.name, + email: record.email, + age: record.age, + isActive: Boolean(record.isActive), + createdAt: new Date(record.createdAt), + metadata: record.metadata ? JSON.parse(record.metadata) : null, + deletedAt: record.deletedAt ? new Date(record.deletedAt) : null, +}) + +const parsePost = (record: PostRecord): Post => ({ + id: parseTrailBaseId(record.id), + userId: record.userId, + title: record.title, + content: record.content, + viewCount: record.viewCount, + largeViewCount: BigInt(record.largeViewCount), + publishedAt: record.publishedAt ? new Date(record.publishedAt) : null, + deletedAt: record.deletedAt ? new Date(record.deletedAt) : null, +}) + +const parseComment = (record: CommentRecord): Comment => ({ + id: parseTrailBaseId(record.id), + postId: record.postId, + userId: record.userId, + text: record.text, + createdAt: new Date(record.createdAt), + deletedAt: record.deletedAt ? new Date(record.deletedAt) : null, +}) + +/** + * Serialize functions - transform app types to DB storage types + * ID is base64 encoded for TrailBase BLOB storage + */ +const serializeUser = (user: User): UserRecord => ({ + id: uuidToBase64(user.id), + name: user.name, + email: user.email, + age: user.age, + isActive: user.isActive ? 1 : 0, + createdAt: user.createdAt.toISOString(), + metadata: user.metadata ? JSON.stringify(user.metadata) : null, + deletedAt: user.deletedAt ? user.deletedAt.toISOString() : null, +}) + +const serializePost = (post: Post): PostRecord => ({ + id: uuidToBase64(post.id), + userId: post.userId, + title: post.title, + content: post.content, + viewCount: post.viewCount, + largeViewCount: post.largeViewCount.toString(), + publishedAt: post.publishedAt ? post.publishedAt.toISOString() : null, + deletedAt: post.deletedAt ? post.deletedAt.toISOString() : null, +}) + +const serializeComment = (comment: Comment): CommentRecord => ({ + id: uuidToBase64(comment.id), + postId: comment.postId, + userId: comment.userId, + text: comment.text, + createdAt: comment.createdAt.toISOString(), + deletedAt: comment.deletedAt ? comment.deletedAt.toISOString() : null, +}) + +/** + * Partial serializers for updates + */ +const serializeUserPartial = (user: Partial): Partial => { + const result: Partial = {} + if (user.id !== undefined) result.id = uuidToBase64(user.id) + if (user.name !== undefined) result.name = user.name + if (user.email !== undefined) result.email = user.email + if (user.age !== undefined) result.age = user.age + if (user.isActive !== undefined) result.isActive = user.isActive ? 1 : 0 + if (user.createdAt !== undefined) + result.createdAt = user.createdAt.toISOString() + if (user.metadata !== undefined) + result.metadata = user.metadata ? JSON.stringify(user.metadata) : null + if (user.deletedAt !== undefined) + result.deletedAt = user.deletedAt ? user.deletedAt.toISOString() : null + return result +} + +const serializePostPartial = (post: Partial): Partial => { + const result: Partial = {} + if (post.id !== undefined) result.id = uuidToBase64(post.id) + if (post.userId !== undefined) result.userId = post.userId + if (post.title !== undefined) result.title = post.title + if (post.content !== undefined) result.content = post.content + if (post.viewCount !== undefined) result.viewCount = post.viewCount + if (post.largeViewCount !== undefined) + result.largeViewCount = post.largeViewCount.toString() + if (post.publishedAt !== undefined) + result.publishedAt = post.publishedAt + ? post.publishedAt.toISOString() + : null + if (post.deletedAt !== undefined) + result.deletedAt = post.deletedAt ? post.deletedAt.toISOString() : null + return result +} + +const serializeCommentPartial = ( + comment: Partial, +): Partial => { + const result: Partial = {} + if (comment.id !== undefined) result.id = uuidToBase64(comment.id) + if (comment.postId !== undefined) result.postId = comment.postId + if (comment.userId !== undefined) result.userId = comment.userId + if (comment.text !== undefined) result.text = comment.text + if (comment.createdAt !== undefined) + result.createdAt = comment.createdAt.toISOString() + if (comment.deletedAt !== undefined) + result.deletedAt = comment.deletedAt + ? comment.deletedAt.toISOString() + : null + return result +} + +/** + * Helper to create a set of collections for a given sync mode + */ +function createCollectionsForSyncMode( + client: ReturnType, + testId: string, + syncMode: TrailBaseSyncMode, + suffix: string, +) { + const usersRecordApi = client.records(`users_e2e`) + const postsRecordApi = client.records(`posts_e2e`) + const commentsRecordApi = client.records(`comments_e2e`) + + const usersCollection = createCollection( + trailBaseCollectionOptions({ + id: `trailbase-e2e-users-${suffix}-${testId}`, + recordApi: usersRecordApi, + getKey: (item: User) => item.id, + startSync: true, + syncMode, + parse: parseUser, + serialize: serializeUser, + serializePartial: serializeUserPartial, + }), + ) + + const postsCollection = createCollection( + trailBaseCollectionOptions({ + id: `trailbase-e2e-posts-${suffix}-${testId}`, + recordApi: postsRecordApi, + getKey: (item: Post) => item.id, + startSync: true, + syncMode, + parse: parsePost, + serialize: serializePost, + serializePartial: serializePostPartial, + }), + ) + + const commentsCollection = createCollection( + trailBaseCollectionOptions({ + id: `trailbase-e2e-comments-${suffix}-${testId}`, + recordApi: commentsRecordApi, + getKey: (item: Comment) => item.id, + startSync: true, + syncMode, + parse: parseComment, + serialize: serializeComment, + serializePartial: serializeCommentPartial, + }), + ) + + return { + users: usersCollection as Collection, + posts: postsCollection as Collection, + comments: commentsCollection as Collection, + } +} + +describe(`TrailBase Collection E2E Tests`, () => { + let config: E2ETestConfig + let client: ReturnType + let testId: string + let seedData: ReturnType + + // Collections for each sync mode + let eagerCollections: ReturnType + let onDemandCollections: ReturnType + + // Progressive collections with test hooks (created separately) + let progressiveUsers: Collection + let progressivePosts: Collection + let progressiveComments: Collection + + // Control mechanisms for progressive collections test hooks + const usersUpToDateControl = { + current: null as (() => void) | null, + createPromise: () => + new Promise((resolve) => { + usersUpToDateControl.current = resolve + }), + } + const postsUpToDateControl = { + current: null as (() => void) | null, + createPromise: () => + new Promise((resolve) => { + postsUpToDateControl.current = resolve + }), + } + const commentsUpToDateControl = { + current: null as (() => void) | null, + createPromise: () => + new Promise((resolve) => { + commentsUpToDateControl.current = resolve + }), + } + + beforeAll(async () => { + const baseUrl = inject(`baseUrl`) + seedData = generateSeedData() + + testId = Date.now().toString(16) + + // Initialize TrailBase client + client = initClient(baseUrl) + + // Get record APIs for seeding + const usersRecordApi = client.records(`users_e2e`) + const postsRecordApi = client.records(`posts_e2e`) + const commentsRecordApi = client.records(`comments_e2e`) + + // Clean up any existing records (from previous test runs or mutations) + console.log(`Cleaning up existing records...`) + try { + const existingComments = await commentsRecordApi.list({}) + for (const comment of existingComments.records) { + try { + await commentsRecordApi.delete(comment.id) + } catch { + /* ignore */ + } + } + const existingPosts = await postsRecordApi.list({}) + for (const post of existingPosts.records) { + try { + await postsRecordApi.delete(post.id) + } catch { + /* ignore */ + } + } + const existingUsers = await usersRecordApi.list({}) + for (const user of existingUsers.records) { + try { + await usersRecordApi.delete(user.id) + } catch { + /* ignore */ + } + } + console.log(`Cleanup complete`) + } catch (e) { + console.log(`Cleanup skipped (tables might be empty):`, e) + } + + // Insert seed data - we provide the ID so the original UUIDs are preserved + console.log(`Inserting ${seedData.users.length} users...`) + let userErrors = 0 + for (const user of seedData.users) { + try { + const serialized = serializeUser(user) + if (userErrors === 0) + console.log('First user data:', JSON.stringify(serialized)) + await usersRecordApi.create(serialized) + } catch (e) { + userErrors++ + if (userErrors <= 3) console.error('User insert error:', e) + } + } + console.log( + `Inserted users: ${seedData.users.length - userErrors} success, ${userErrors} errors`, + ) + if (seedData.users.length > 0) + console.log(`First user ID: ${seedData.users[0].id}`) + + console.log(`Inserting ${seedData.posts.length} posts...`) + let postErrors = 0 + for (const post of seedData.posts) { + try { + await postsRecordApi.create(serializePost(post)) + } catch (e) { + postErrors++ + if (postErrors <= 3) console.error('Post insert error:', e) + } + } + console.log( + `Inserted posts: ${seedData.posts.length - postErrors} success, ${postErrors} errors`, + ) + + console.log(`Inserting ${seedData.comments.length} comments...`) + let commentErrors = 0 + for (const comment of seedData.comments) { + try { + await commentsRecordApi.create(serializeComment(comment)) + } catch (e) { + commentErrors++ + if (commentErrors <= 3) console.error('Comment insert error:', e) + } + } + console.log( + `Inserted comments: ${seedData.comments.length - commentErrors} success, ${commentErrors} errors`, + ) + + // Create collections with different sync modes + eagerCollections = createCollectionsForSyncMode( + client, + testId, + `eager`, + `eager`, + ) + onDemandCollections = createCollectionsForSyncMode( + client, + testId, + `on-demand`, + `ondemand`, + ) + + // Create progressive collections with test hooks + // These use startSync: false so tests can control when sync starts + progressiveUsers = createCollection( + trailBaseCollectionOptions({ + id: `trailbase-e2e-users-progressive-${testId}`, + recordApi: usersRecordApi, + getKey: (item: User) => item.id, + startSync: false, // Don't start immediately - tests will start when ready + syncMode: `progressive`, + parse: parseUser, + serialize: serializeUser, + serializePartial: serializeUserPartial, + }), + ) as Collection + + progressivePosts = createCollection( + trailBaseCollectionOptions({ + id: `trailbase-e2e-posts-progressive-${testId}`, + recordApi: postsRecordApi, + getKey: (item: Post) => item.id, + startSync: false, + syncMode: `progressive`, + parse: parsePost, + serialize: serializePost, + serializePartial: serializePostPartial, + }), + ) as Collection + + progressiveComments = createCollection( + trailBaseCollectionOptions({ + id: `trailbase-e2e-comments-progressive-${testId}`, + recordApi: commentsRecordApi, + getKey: (item: Comment) => item.id, + startSync: false, + syncMode: `progressive`, + parse: parseComment, + serialize: serializeComment, + serializePartial: serializeCommentPartial, + }), + ) as Collection + + // Wait for eager collections to sync (they need to fetch all data before marking ready) + console.log('Calling preload on eager collections...') + await Promise.all([ + eagerCollections.users.preload(), + eagerCollections.posts.preload(), + eagerCollections.comments.preload(), + ]) + console.log('Preload complete, checking sizes...') + console.log( + `Users size: ${eagerCollections.users.size}, expected: ${seedData.users.length}`, + ) + console.log( + `Posts size: ${eagerCollections.posts.size}, expected: ${seedData.posts.length}`, + ) + console.log( + `Comments size: ${eagerCollections.comments.size}, expected: ${seedData.comments.length}`, + ) + + // Debug: try direct list API call + const testList = await usersRecordApi.list({ pagination: { limit: 10 } }) + console.log( + `Direct list API returned ${testList.records.length} records:`, + testList.records.slice(0, 2), + ) + + // Wait for eager collections to have all data + await Promise.all([ + waitFor(() => eagerCollections.users.size >= seedData.users.length, { + timeout: 30000, + interval: 500, + message: `TrailBase eager sync has not completed for users`, + }), + waitFor(() => eagerCollections.posts.size >= seedData.posts.length, { + timeout: 30000, + interval: 500, + message: `TrailBase eager sync has not completed for posts`, + }), + waitFor( + () => eagerCollections.comments.size >= seedData.comments.length, + { + timeout: 30000, + interval: 500, + message: `TrailBase eager sync has not completed for comments`, + }, + ), + ]) + + // On-demand collections are marked ready immediately + await Promise.all([ + onDemandCollections.users.preload(), + onDemandCollections.posts.preload(), + onDemandCollections.comments.preload(), + ]) + + // Note: We DON'T call preload() on progressive collections here + // because the test hooks will block. Individual progressive tests + // will handle preload and release as needed. + + config = { + collections: { + eager: { + users: eagerCollections.users, + posts: eagerCollections.posts, + comments: eagerCollections.comments, + }, + onDemand: { + users: onDemandCollections.users, + posts: onDemandCollections.posts, + comments: onDemandCollections.comments, + }, + progressive: { + users: progressiveUsers, + posts: progressivePosts, + comments: progressiveComments, + }, + }, + hasReplicationLag: true, // TrailBase has async subscription-based sync + // Note: progressiveTestControl is not provided because the explicit snapshot/swap + // transition tests require Electric-specific sync behavior that TrailBase doesn't support. + // Tests that require this will be skipped. + mutations: { + insertUser: async (user) => { + // Insert with the provided ID (base64-encoded UUID) + await usersRecordApi.create(serializeUser(user)) + // ID is preserved from the user object + }, + updateUser: async (id, updates) => { + const partialRecord: Partial = {} + if (updates.age !== undefined) partialRecord.age = updates.age + if (updates.name !== undefined) partialRecord.name = updates.name + if (updates.email !== undefined) partialRecord.email = updates.email + if (updates.isActive !== undefined) + partialRecord.isActive = updates.isActive ? 1 : 0 + const encodedId = uuidToBase64(id) + await usersRecordApi.update(encodedId, partialRecord) + }, + deleteUser: async (id) => { + const encodedId = uuidToBase64(id) + await usersRecordApi.delete(encodedId) + }, + insertPost: async (post) => { + // Insert with the provided ID + await postsRecordApi.create(serializePost(post)) + }, + }, + setup: async () => {}, + afterEach: async () => { + // TrailBase doesn't need collection restart like Electric's on-demand mode + }, + teardown: async () => { + await Promise.all([ + eagerCollections.users.cleanup(), + eagerCollections.posts.cleanup(), + eagerCollections.comments.cleanup(), + onDemandCollections.users.cleanup(), + onDemandCollections.posts.cleanup(), + onDemandCollections.comments.cleanup(), + progressiveUsers.cleanup(), + progressivePosts.cleanup(), + progressiveComments.cleanup(), + ]) + }, + } + }, 60000) // 60 second timeout for setup + + afterEach(async () => { + if (config.afterEach) { + await config.afterEach() + } + }) + + afterAll(async () => { + await config.teardown() + + // Clean up seed data + const usersRecordApi = client.records(`users_e2e`) + const postsRecordApi = client.records(`posts_e2e`) + const commentsRecordApi = client.records(`comments_e2e`) + + // Delete in reverse order due to FK constraints + // IDs need to be encoded as base64 for TrailBase API + for (const comment of seedData.comments) { + try { + await commentsRecordApi.delete(uuidToBase64(comment.id)) + } catch { + // Ignore errors + } + } + + for (const post of seedData.posts) { + try { + await postsRecordApi.delete(uuidToBase64(post.id)) + } catch { + // Ignore errors + } + } + + for (const user of seedData.users) { + try { + await usersRecordApi.delete(uuidToBase64(user.id)) + } catch { + // Ignore errors + } + } + }) + + // Helper to get config + function getConfig() { + return Promise.resolve(config) + } + + // Run all shared test suites + createPredicatesTestSuite(getConfig) + createPaginationTestSuite(getConfig) + createJoinsTestSuite(getConfig) + createDeduplicationTestSuite(getConfig) + createCollationTestSuite(getConfig) + createMutationsTestSuite(getConfig) + createLiveUpdatesTestSuite(getConfig) + createProgressiveTestSuite(getConfig) +}) diff --git a/packages/trailbase-db-collection/package.json b/packages/trailbase-db-collection/package.json index 4a46b14f0..ab64d1e2f 100644 --- a/packages/trailbase-db-collection/package.json +++ b/packages/trailbase-db-collection/package.json @@ -20,7 +20,8 @@ "build": "vite build", "dev": "vite build --watch", "lint": "eslint . --fix", - "test": "vitest --run" + "test": "vitest --run", + "test:e2e": "vitest --run --config vitest.e2e.config.ts" }, "type": "module", "main": "dist/cjs/index.cjs", @@ -55,6 +56,7 @@ "typescript": ">=4.7" }, "devDependencies": { + "@tanstack/db-collection-e2e": "workspace:*", "@types/debug": "^4.1.12", "@vitest/coverage-istanbul": "^3.2.4" } diff --git a/packages/trailbase-db-collection/src/index.ts b/packages/trailbase-db-collection/src/index.ts index 81c4593a7..42b865210 100644 --- a/packages/trailbase-db-collection/src/index.ts +++ b/packages/trailbase-db-collection/src/index.ts @@ -1,7 +1,10 @@ export { trailBaseCollectionOptions, + TRAILBASE_TEST_HOOKS, type TrailBaseCollectionConfig, type TrailBaseCollectionUtils, + type TrailBaseSyncMode, + type TrailBaseTestHooks, } from './trailbase' export * from './errors' diff --git a/packages/trailbase-db-collection/src/trailbase.ts b/packages/trailbase-db-collection/src/trailbase.ts index 32b6755c5..a50fa8831 100644 --- a/packages/trailbase-db-collection/src/trailbase.ts +++ b/packages/trailbase-db-collection/src/trailbase.ts @@ -14,10 +14,28 @@ import type { DeleteMutationFnParams, InsertMutationFnParams, SyncConfig, + SyncMode, UpdateMutationFnParams, UtilsRecord, } from '@tanstack/db' +/** + * Symbol for internal test hooks - allows tests to control sync timing + */ +export const TRAILBASE_TEST_HOOKS = Symbol.for(`TRAILBASE_TEST_HOOKS`) + +/** + * Test hooks interface for controlling sync behavior in tests + */ +export interface TrailBaseTestHooks { + /** + * Called before marking the collection as ready in progressive mode. + * Return a promise that resolves when the collection should be marked ready. + * This allows tests to pause and inspect the collection state during initial sync. + */ + beforeMarkingReady?: () => Promise +} + type ShapeOf = Record type Conversion = (value: I) => O @@ -81,24 +99,128 @@ function convertPartial< ) as OutputType } +/** + * Decode a base64-encoded BLOB UUID to a standard UUID string format for proper sorting. + * TrailBase stores UUIDs as BLOBs and returns them as base64, which doesn't sort correctly. + * This function decodes the base64 to get the UUID string which sorts lexicographically. + */ +function decodeIdForSorting(rawId: unknown): string { + const idStr = String(rawId ?? ``) + + // Check if it's already a UUID string format - return as-is + if ( + /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test( + idStr, + ) + ) { + return idStr + } + + // Check if it's an integer - return as-is + if (/^\d+$/.test(idStr)) { + return idStr + } + + // Try to decode base64 to UUID + try { + // Convert URL-safe base64 to standard base64 + const standardBase64 = idStr.replace(/-/g, `+`).replace(/_/g, `/`) + // Add padding if needed + const padded = + standardBase64 + `==`.slice(0, (4 - (standardBase64.length % 4)) % 4) + + // Decode base64 to bytes + const binaryString = atob(padded) + + // Only process if it looks like a UUID (16 bytes) + if (binaryString.length !== 16) { + return idStr + } + + const bytes = new Uint8Array(binaryString.length) + for (let i = 0; i < binaryString.length; i++) { + bytes[i] = binaryString.charCodeAt(i) + } + + // Convert bytes to UUID string format + const hex = Array.from(bytes) + .map((b) => b.toString(16).padStart(2, `0`)) + .join(``) + + return `${hex.slice(0, 8)}-${hex.slice(8, 12)}-${hex.slice(12, 16)}-${hex.slice(16, 20)}-${hex.slice(20, 32)}` + } catch { + // If decoding fails, return the original string + return idStr + } +} + +/** + * The mode of sync to use for the collection. + * @default `eager` + * @description + * - `eager`: + * - syncs all data immediately on preload + * - collection will be marked as ready once the sync is complete + * - there is no incremental sync + * - `on-demand`: + * - syncs data incrementally when the collection is queried + * - collection will be marked as ready immediately after the subscription starts + * - `progressive`: + * - syncs all data for the collection in the background + * - uses loadSubset during the initial sync to provide a fast path to the data required for queries + * - collection will be marked as ready immediately, with full sync completing in background + */ +export type TrailBaseSyncMode = SyncMode | `progressive` + /** * Configuration interface for Trailbase Collection */ export interface TrailBaseCollectionConfig< - TItem extends ShapeOf, - TRecord extends ShapeOf = TItem, + TItem extends object, + TRecord extends object = TItem, TKey extends string | number = string | number, > extends Omit< BaseCollectionConfig, - `onInsert` | `onUpdate` | `onDelete` + `onInsert` | `onUpdate` | `onDelete` | `syncMode` > { /** * Record API name */ recordApi: RecordApi - parse: Conversions - serialize: Conversions + /** + * The mode of sync to use for the collection. + * @default `eager` + */ + syncMode?: TrailBaseSyncMode + + /** + * Function to parse a TrailBase record into the app item type. + * Use this for full control over the transformation including key renaming. + */ + parse: + | ((record: TRecord) => TItem) + | Conversions, TItem & ShapeOf> + + /** + * Function to serialize an app item into a TrailBase record. + * Use this for full control over the transformation including key renaming. + */ + serialize: + | ((item: TItem) => TRecord) + | Conversions, TRecord & ShapeOf> + + /** + * Function to serialize a partial app item into a partial TrailBase record. + * Used for updates. If not provided, serialize will be used. + */ + serializePartial?: (item: Partial) => Partial + + /** + * Internal test hooks for controlling sync behavior. + * This is intended for testing only and should not be used in production. + */ + [TRAILBASE_TEST_HOOKS]?: TrailBaseTestHooks } export type AwaitTxIdFn = (txId: string, timeout?: number) => Promise @@ -108,23 +230,83 @@ export interface TrailBaseCollectionUtils extends UtilsRecord { } export function trailBaseCollectionOptions< - TItem extends ShapeOf, - TRecord extends ShapeOf = TItem, + TItem extends object, + TRecord extends object = TItem, TKey extends string | number = string | number, >( config: TrailBaseCollectionConfig, -): CollectionConfig & { utils: TrailBaseCollectionUtils } { +): CollectionConfig & { + utils: TrailBaseCollectionUtils +} { const getKey = config.getKey - const parse = (record: TRecord) => - convert(config.parse, record) - const serialUpd = (item: Partial) => - convertPartial(config.serialize, item) - const serialIns = (item: TItem) => - convert(config.serialize, item) + // Support both function and Conversions for parse + const parse: (record: TRecord) => TItem = + typeof config.parse === `function` + ? config.parse + : (record: TRecord) => + convert, TItem & ShapeOf>( + config.parse as Conversions< + TRecord & ShapeOf, + TItem & ShapeOf + >, + record as TRecord & ShapeOf, + ) as TItem + + // Support both function and Conversions for serialize + const serialIns: (item: TItem) => TRecord = + typeof config.serialize === `function` + ? config.serialize + : (item: TItem) => + convert, TRecord & ShapeOf>( + config.serialize as Conversions< + TItem & ShapeOf, + TRecord & ShapeOf + >, + item as TItem & ShapeOf, + ) as TRecord + + // For partial updates, use serializePartial if provided, otherwise fall back to a simple implementation + const serialUpd: (item: Partial) => Partial = + config.serializePartial ?? + (typeof config.serialize === `function` + ? (item: Partial) => { + // For function serializers, we need to handle partial items carefully + // We serialize and then extract only the keys that were in the partial + const keys = Object.keys(item) as Array + const full = config.serialize(item as TItem) as TRecord + const result: Partial = {} + for (const key of keys) { + // Map the key if there's a known mapping (simplified approach) + const recordKey = key as unknown as keyof TRecord + if (recordKey in full) { + result[recordKey] = full[recordKey] + } + } + return result + } + : (item: Partial) => + convertPartial, TRecord & ShapeOf>( + config.serialize as Conversions< + TItem & ShapeOf, + TRecord & ShapeOf + >, + item as Partial>, + ) as Partial) + + const abortController = new AbortController() const seenIds = new Store(new Map()) + const internalSyncMode = config.syncMode ?? `eager` + // For the collection config, progressive acts like on-demand (needs loadSubset) + const finalSyncMode = + internalSyncMode === `progressive` ? `on-demand` : internalSyncMode + let fullSyncCompleted = false + + // Get test hooks if provided + const testHooks = config[TRAILBASE_TEST_HOOKS] + const awaitIds = ( ids: Array, timeout: number = 120 * 1000, @@ -136,14 +318,22 @@ export function trailBaseCollectionOptions< } return new Promise((resolve, reject) => { - const timeoutId = setTimeout(() => { - unsubscribe() - reject(new TimeoutWaitingForIdsError(ids.toString())) - }, timeout) + const onAbort = () => { + clearTimeout(timeoutId) + reject(new TimeoutWaitingForIdsError(`Aborted while waiting for ids`)) + } + + abortController.signal.addEventListener(`abort`, onAbort) + + const timeoutId = setTimeout( + () => reject(new TimeoutWaitingForIdsError(ids.toString())), + timeout, + ) const unsubscribe = seenIds.subscribe((value) => { if (completed(value.currentVal)) { clearTimeout(timeoutId) + abortController.signal.removeEventListener(`abort`, onAbort) unsubscribe() resolve() } @@ -151,15 +341,6 @@ export function trailBaseCollectionOptions< }) } - let eventReader: ReadableStreamDefaultReader | undefined - const cancelEventReader = () => { - if (eventReader) { - eventReader.cancel() - eventReader.releaseLock() - eventReader = undefined - } - } - type SyncParams = Parameters[`sync`]>[0] const sync = { sync: (params: SyncParams) => { @@ -174,21 +355,15 @@ export function trailBaseCollectionOptions< }, }) let cursor = response.cursor - let got = 0 - begin() + // Collect all records first + const allRecords: Array = [] while (true) { const length = response.records.length if (length === 0) break - got = got + length - for (const item of response.records) { - write({ - type: `insert`, - value: parse(item), - }) - } + allRecords.push(...response.records) if (length < limit) break @@ -196,36 +371,72 @@ export function trailBaseCollectionOptions< pagination: { limit, cursor, - offset: cursor === undefined ? got : undefined, + offset: cursor === undefined ? allRecords.length : undefined, }, }) cursor = response.cursor } + // Sort by ID for consistent insertion order (deterministic tie-breaking) + // Decode base64 IDs to UUIDs for proper lexicographic sorting + allRecords.sort((a: TRecord, b: TRecord) => { + const idA = decodeIdForSorting(a[`id` as keyof TRecord]) + const idB = decodeIdForSorting(b[`id` as keyof TRecord]) + return idA.localeCompare(idB) + }) + + begin() + for (const item of allRecords) { + write({ + type: `insert`, + value: parse(item), + }) + } + commit() } // Afterwards subscribe. async function listen(reader: ReadableStreamDefaultReader) { + console.log(`[TrailBase] Subscription listener started`) while (true) { const { done, value: event } = await reader.read() if (done || !event) { - reader.releaseLock() - eventReader = undefined + console.log(`[TrailBase] Subscription stream ended`) + try { + if ((reader as any).locked) { + reader.releaseLock() + } + } catch { + // ignore if already released + } return } + console.log( + `[TrailBase] Received event:`, + JSON.stringify(event).slice(0, 200), + ) begin() let value: TItem | undefined if (`Insert` in event) { value = parse(event.Insert as TRecord) + console.log( + `[TrailBase] Insert event for item with key: ${getKey(value)}`, + ) write({ type: `insert`, value }) } else if (`Delete` in event) { value = parse(event.Delete as TRecord) + console.log( + `[TrailBase] Delete event for item with key: ${getKey(value)}`, + ) write({ type: `delete`, value }) } else if (`Update` in event) { value = parse(event.Update as TRecord) + console.log( + `[TrailBase] Update event for item with key: ${getKey(value)}`, + ) write({ type: `update`, value }) } else { console.error(`Error: ${event.Error}`) @@ -244,21 +455,58 @@ export function trailBaseCollectionOptions< async function start() { const eventStream = await config.recordApi.subscribe(`*`) - const reader = (eventReader = eventStream.getReader()) + const reader = eventStream.getReader() // Start listening for subscriptions first. Otherwise, we'd risk a gap // between the initial fetch and starting to listen. listen(reader) try { - await initialFetch() + // Eager mode: perform initial fetch to populate everything + if (internalSyncMode === `eager`) { + await initialFetch() + fullSyncCompleted = true + } } catch (e) { - cancelEventReader() + abortController.abort() throw e - } finally { - // Mark ready both if everything went well or if there's an error to - // avoid blocking apps waiting for `.preload()` to finish. + } + + // For progressive mode with test hooks, use non-blocking pattern + if ( + internalSyncMode === `progressive` && + testHooks?.beforeMarkingReady + ) { + // DON'T start full sync yet - let loadSubset handle data fetching + // Wait for the hook to resolve, THEN do full sync and mark ready + testHooks.beforeMarkingReady().then(async () => { + try { + // Now do the full sync + await initialFetch() + fullSyncCompleted = true + } catch (e) { + console.error(`TrailBase progressive full sync failed`, e) + } + markReady() + }) + } else { + // Mark ready immediately for eager/on-demand modes markReady() + + // If progressive without test hooks, start background sync + if (internalSyncMode === `progressive`) { + // Defer background sync to avoid racing with preload assertions + setTimeout(() => { + void (async () => { + try { + await initialFetch() + fullSyncCompleted = true + } catch (e) { + console.error(`TrailBase progressive full sync failed`, e) + } + })() + }, 0) + } } // Lastly, start a periodic cleanup task that will be removed when the @@ -281,17 +529,101 @@ export function trailBaseCollectionOptions< }) }, 120 * 1000) - reader.closed.finally(() => clearInterval(periodicCleanupTask)) + const onAbort = () => { + clearInterval(periodicCleanupTask) + // It's safe to call cancel and releaseLock even if the stream is already closed. + reader.cancel().catch(() => { + /* ignore */ + }) + try { + reader.releaseLock() + } catch { + /* ignore */ + } + } + + abortController.signal.addEventListener(`abort`, onAbort) + reader.closed.finally(() => { + abortController.signal.removeEventListener(`abort`, onAbort) + clearInterval(periodicCleanupTask) + }) } start() + + // Eager mode doesn't need subset loading + if (internalSyncMode === `eager`) { + return + } + + // Track if loadSubset has been called to prevent redundant fetches + // Using a promise to handle concurrent calls properly + let loadSubsetPromise: Promise | null = null + + // On-demand and progressive modes need loadSubset for query-driven data loading + const loadSubset = async ( + opts: { limit?: number } = {}, + ): Promise => { + // If already loading or completed, return the existing promise or resolve immediately + if (loadSubsetPromise) { + return loadSubsetPromise + } + + // In progressive mode after full sync is complete, no need to load more + if (internalSyncMode === `progressive` && fullSyncCompleted) { + return + } + + // Create the promise before any async work to prevent race conditions + loadSubsetPromise = (async () => { + const limit = opts.limit ?? 256 + const response = await config.recordApi.list({ + pagination: { limit }, + }) + const records = response?.records ?? [] + + if (records.length > 0) { + // Sort records by ID to ensure consistent insertion order (for deterministic tie-breaking) + // Decode base64 IDs to UUIDs for proper lexicographic sorting + const sortedRecords = [...records].sort( + (a: TRecord, b: TRecord) => { + const idA = decodeIdForSorting(a[`id` as keyof TRecord]) + const idB = decodeIdForSorting(b[`id` as keyof TRecord]) + return idA.localeCompare(idB) + }, + ) + + begin() + for (const item of sortedRecords) { + write({ type: `insert`, value: parse(item) }) + } + commit() + } + })() + + return loadSubsetPromise + } + + return { + loadSubset, + getSyncMetadata: () => + ({ + syncMode: internalSyncMode, + fullSyncComplete: fullSyncCompleted, + }) as const, + } }, // Expose the getSyncMetadata function - getSyncMetadata: undefined, + getSyncMetadata: () => + ({ + syncMode: internalSyncMode, + fullSyncComplete: fullSyncCompleted, + }) as const, } return { ...config, + syncMode: finalSyncMode, sync, getKey, onInsert: async ( @@ -352,7 +684,7 @@ export function trailBaseCollectionOptions< await awaitIds(ids) }, utils: { - cancel: cancelEventReader, + cancel: () => abortController.abort(), }, } } diff --git a/packages/trailbase-db-collection/testing-bin-linux/README.md b/packages/trailbase-db-collection/testing-bin-linux/README.md new file mode 100644 index 000000000..154389c8f --- /dev/null +++ b/packages/trailbase-db-collection/testing-bin-linux/README.md @@ -0,0 +1,27 @@ +# TrailBase Binary for E2E Testing + +Place the TrailBase binary in this directory to run e2e tests without Docker. + +**Important**: Make sure you download the correct architecture binary (x86_64 for most CI systems). + +## Download Instructions + +```bash +# Download TrailBase binary for your architecture +# For x86_64 Linux: +curl -L -o trail https://github.com/trailbase/trailbase/releases/latest/download/trail-x86_64-unknown-linux-gnu + +# For ARM64 Linux: +curl -L -o trail https://github.com/trailbase/trailbase/releases/latest/download/trail-aarch64-unknown-linux-gnu + +# Make executable +chmod +x trail + +# Move to this directory +mv trail packages/trailbase-db-collection/testing-bin-linux/trail +``` + +The setup also checks `packages/trailbase/test-linux-bin/trail` as an alternative location. + +The e2e test setup will automatically detect and use this binary if present. +If the binary is not found, it will fall back to Docker. diff --git a/packages/trailbase-db-collection/testing-bin-linux/trail b/packages/trailbase-db-collection/testing-bin-linux/trail new file mode 100755 index 000000000..5ccf7aebe Binary files /dev/null and b/packages/trailbase-db-collection/testing-bin-linux/trail differ diff --git a/packages/trailbase-db-collection/tests/trailbase.test.ts b/packages/trailbase-db-collection/tests/trailbase.test.ts index a6d282a30..34c6a920a 100644 --- a/packages/trailbase-db-collection/tests/trailbase.test.ts +++ b/packages/trailbase-db-collection/tests/trailbase.test.ts @@ -1,4 +1,4 @@ -import { describe, expect, it, vi } from 'vitest' +import { describe, it, vi } from 'vitest' import { createCollection } from '@tanstack/db' import { trailBaseCollectionOptions } from '../src/trailbase' import type { @@ -64,7 +64,6 @@ class MockRecordApi implements RecordApi { } function setUp(recordApi: MockRecordApi) { - // Get the options with utilities const options = trailBaseCollectionOptions({ recordApi, getKey: (item: Data): number | number => @@ -78,7 +77,7 @@ function setUp(recordApi: MockRecordApi) { } describe(`TrailBase Integration`, () => { - it(`initial fetch, receive update and cancel`, async () => { + it(`cancellation closes stream and double cancel is safe`, async () => { const records: Array = [ { id: 0, @@ -87,7 +86,6 @@ describe(`TrailBase Integration`, () => { }, ] - // Prepare mock API. const recordApi = new MockRecordApi() let listResolver: (value: boolean) => void const listPromise = new Promise((res) => { @@ -101,162 +99,19 @@ describe(`TrailBase Integration`, () => { }) const stream = new TransformStream() - const injectEvent = async (event: Event) => { - const writer = stream.writable.getWriter() - await writer.write(event) - writer.releaseLock() - } recordApi.subscribe.mockResolvedValue(stream.readable) const options = setUp(recordApi) - const collection = createCollection(options) + createCollection(options) - // Await initial fetch and assert state. + // Wait for initial fetch to complete await listPromise - expect(collection.state).toEqual(new Map(records.map((d) => [d.id, d]))) - // Inject an update event and assert state. - const updatedRecord: Data = { - ...records[0]!, - updated: 1, - } - - await injectEvent({ Update: updatedRecord }) - - expect(collection.state).toEqual( - new Map([updatedRecord].map((d) => [d.id, d])), - ) - - // Await cancellation. + // Cancel and verify stream closes options.utils.cancel() - await stream.readable.getReader().closed - // Check that double cancellation is fine. + // Verify double cancellation is safe options.utils.cancel() }) - - it(`receive inserts and delete updates`, async () => { - // Prepare mock API. - const recordApi = new MockRecordApi() - - const stream = new TransformStream() - const injectEvent = async (event: Event) => { - const writer = stream.writable.getWriter() - await writer.write(event) - writer.releaseLock() - } - recordApi.subscribe.mockResolvedValue(stream.readable) - - const options = setUp(recordApi) - const collection = createCollection(options) - - // Await initial fetch and assert state. - expect(collection.state).toEqual(new Map([])) - - // Inject an update event and assert state. - const data: Data = { - id: 0, - updated: 0, - data: `first`, - } - - await injectEvent({ - Insert: data, - }) - - expect(collection.state).toEqual(new Map([data].map((d) => [d.id, d]))) - - await injectEvent({ - Delete: data, - }) - - expect(collection.state).toEqual(new Map([])) - - stream.writable.close() - }) - - it(`local inserts, updates and deletes`, () => { - // Prepare mock API. - const recordApi = new MockRecordApi() - - const stream = new TransformStream() - recordApi.subscribe.mockResolvedValue(stream.readable) - - const createBulkMock = recordApi.createBulk.mockImplementation( - (records: Array): Promise> => { - setTimeout(() => { - const writer = stream.writable.getWriter() - for (const record of records) { - writer.write({ - Insert: record, - }) - } - writer.releaseLock() - }, 1) - - return Promise.resolve(records.map((r) => r.id ?? 0)) - }, - ) - - const options = setUp(recordApi) - const collection = createCollection(options) - - // Await initial fetch and assert state. - expect(collection.state).toEqual(new Map([])) - - const data: Data = { - id: 42, - updated: 0, - data: `first`, - } - - collection.insert(data) - - expect(createBulkMock).toHaveBeenCalledOnce() - - expect(collection.state).toEqual(new Map([[data.id, data]])) - - const updatedData: Data = { - ...data, - updated: 1, - } - - const updateMock = recordApi.update.mockImplementation( - (_id: string | number, record: Partial) => { - expect(record).toEqual({ updated: updatedData.updated }) - const writer = stream.writable.getWriter() - writer.write({ - Update: record, - }) - writer.releaseLock() - return Promise.resolve() - }, - ) - - collection.update(data.id, (old: Data) => { - old.updated = updatedData.updated - }) - - expect(updateMock).toHaveBeenCalledOnce() - - expect(collection.state).toEqual(new Map([[updatedData.id, updatedData]])) - - const deleteMock = recordApi.delete.mockImplementation( - (_id: string | number) => { - const writer = stream.writable.getWriter() - writer.write({ - Delete: updatedData, - }) - writer.releaseLock() - return Promise.resolve() - }, - ) - - collection.delete(updatedData.id!) - - expect(deleteMock).toHaveBeenCalledOnce() - - expect(collection.state).toEqual(new Map([])) - }) }) diff --git a/packages/trailbase-db-collection/tsconfig.json b/packages/trailbase-db-collection/tsconfig.json index f3c0ea369..eac958767 100644 --- a/packages/trailbase-db-collection/tsconfig.json +++ b/packages/trailbase-db-collection/tsconfig.json @@ -16,6 +16,6 @@ "@tanstack/store": ["../store/src"] } }, - "include": ["src", "tests", "vite.config.ts"], + "include": ["src", "tests", "e2e", "vite.config.ts", "vitest.e2e.config.ts"], "exclude": ["node_modules", "dist"] } diff --git a/packages/trailbase-db-collection/vitest.e2e.config.ts b/packages/trailbase-db-collection/vitest.e2e.config.ts new file mode 100644 index 000000000..3418e292a --- /dev/null +++ b/packages/trailbase-db-collection/vitest.e2e.config.ts @@ -0,0 +1,24 @@ +import { resolve } from 'node:path' +import { defineConfig } from 'vitest/config' + +const packagesDir = resolve(__dirname, '..') + +export default defineConfig({ + test: { + include: [`e2e/**/*.e2e.test.ts`], + globalSetup: `./e2e/global-setup.ts`, + fileParallelism: false, // Critical for shared database + testTimeout: 30000, + environment: `jsdom`, + }, + resolve: { + alias: { + '@tanstack/db': resolve(packagesDir, 'db/src/index.ts'), + '@tanstack/db-ivm': resolve(packagesDir, 'db-ivm/src/index.ts'), + '@tanstack/db-collection-e2e': resolve( + packagesDir, + 'db-collection-e2e/src/index.ts', + ), + }, + }, +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5d1727c7b..80926acf1 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1008,6 +1008,9 @@ importers: specifier: '>=4.7' version: 5.9.3 devDependencies: + '@tanstack/db-collection-e2e': + specifier: workspace:* + version: link:../db-collection-e2e '@types/debug': specifier: ^4.1.12 version: 4.1.12