diff --git a/examples/react-router-demo/test/react-router.smoke.e2e.test.ts b/examples/react-router-demo/test/react-router.smoke.e2e.test.ts index 9dd8a30423..fffce34b41 100644 --- a/examples/react-router-demo/test/react-router.smoke.e2e.test.ts +++ b/examples/react-router-demo/test/react-router.smoke.e2e.test.ts @@ -19,7 +19,7 @@ const contractJsonPath = join(exampleDir, 'src', 'prisma', 'contract.json'); const TEST_SCHEMA_SQL = ` create schema if not exists prisma_contract; create table if not exists prisma_contract.marker ( - id smallint primary key default 1, + space text not null primary key default 'app', core_hash text not null default '', profile_hash text not null default '', contract_json jsonb, diff --git a/package.json b/package.json index 78cf2cabbd..a2ed7659ae 100644 --- a/package.json +++ b/package.json @@ -24,7 +24,7 @@ "lint:fix:unsafe": "pnpm biome check --write --unsafe .", "lint:packages": "turbo run lint --filter='!./examples/**'", "lint:examples": "turbo run lint --filter='./examples/**'", - "lint:deps": "depcruise --config dependency-cruiser.config.mjs packages && node scripts/lint-framework-target-imports.mjs", + "lint:deps": "depcruise --config dependency-cruiser.config.mjs packages && node scripts/lint-framework-target-imports.mjs && node scripts/lint-app-space-id.mjs", "lint:rules": "node scripts/validate-rules.mjs", "lint:rules:footprint": "node scripts/rules-footprint.mjs --check", "rules:footprint": "node scripts/rules-footprint.mjs", diff --git a/packages/1-framework/1-core/framework-components/src/control/control-instances.ts b/packages/1-framework/1-core/framework-components/src/control/control-instances.ts index 12f043a164..e992a22bff 100644 --- a/packages/1-framework/1-core/framework-components/src/control/control-instances.ts +++ b/packages/1-framework/1-core/framework-components/src/control/control-instances.ts @@ -41,8 +41,27 @@ export interface ControlFamilyInstance readonly configPath?: string; }): Promise; + /** + * Reads the contract marker for `space` from the database, returning + * `null` if no marker row exists for that space (or if the marker + * table itself is missing). + * + * `space` is required at every call site so the type system surfaces + * every place that needs to thread the value: callers in single-app + * paths pass {@link import('./control-spaces').APP_SPACE_ID} + * (`'app'`); per-extension callers pass the extension's space id. + * Defaulting at the family-interface level was a silent bug door — + * it let multi-space-aware callers forget to pass `space` and + * collapse onto the app's marker row. + * + * Families whose underlying storage doesn't yet support per-space + * markers (Mongo, today) accept `space` for interface conformance and + * reject any non-`APP_SPACE_ID` value rather than silently ignoring + * it; see the family-specific implementation for details. + */ readMarker(options: { readonly driver: ControlDriverInstance; + readonly space: string; }): Promise; introspect(options: { diff --git a/packages/1-framework/1-core/framework-components/src/control/control-migration-types.ts b/packages/1-framework/1-core/framework-components/src/control/control-migration-types.ts index d1034fca84..c44b0f2a8d 100644 --- a/packages/1-framework/1-core/framework-components/src/control/control-migration-types.ts +++ b/packages/1-framework/1-core/framework-components/src/control/control-migration-types.ts @@ -14,6 +14,61 @@ import type { Result } from '@prisma-next/utils/result'; import type { TargetBoundComponentDescriptor } from '../shared/framework-components'; import type { ControlDriverInstance, ControlFamilyInstance } from './control-instances'; +// ============================================================================ +// Migration Package Metadata +// ============================================================================ + +/** + * Planner provenance recorded inside {@link MigrationMetadata}. + * + * `used` / `applied` track which migration hints the planner consulted + * vs. which it actually applied during emission; `plannerVersion` + * pins the planner build that produced the migration so future + * verification passes can recognise plans authored against an older + * planner. + */ +export interface MigrationHints { + readonly used: readonly string[]; + readonly applied: readonly string[]; + readonly plannerVersion: string; +} + +/** + * In-memory migration metadata envelope. Every migration is + * content-addressed: the `migrationHash` is a hash over the metadata + * envelope plus the operations list, computed at write time. There is no + * draft state — a migration directory either exists with fully attested + * metadata or it does not. + * + * When the planner cannot lower an operation because of an unfilled + * `placeholder(...)` slot, the migration is still written with + * `migrationHash` hashed over `ops: []`. Re-running self-emit after the + * user fills the placeholder produces a *different* `migrationHash` + * (committed to the real ops); this is intentional. + * + * The on-disk JSON shape in `migration.json` matches this type + * field-for-field — `JSON.stringify(metadata, null, 2)` is the canonical + * writer output (defined in `@prisma-next/migration-tools/io`). + */ +export interface MigrationMetadata { + readonly migrationHash: string; + readonly from: string | null; + readonly to: string; + readonly fromContract: Contract | null; + readonly toContract: Contract; + readonly hints: MigrationHints; + readonly labels: readonly string[]; + /** + * Sorted, deduplicated list of `invariantId`s declared by the + * migration's data-transform ops. Always present; an empty array + * means the migration has no routing-visible data transforms. + */ + readonly providedInvariants: readonly string[]; + readonly authorship?: { readonly author?: string; readonly email?: string }; + readonly signature?: { readonly keyId: string; readonly value: string } | null; + readonly createdAt: string; +} + // ============================================================================ // Operation Classes and Policy // ============================================================================ @@ -298,6 +353,13 @@ export interface MigrationPlanner< readonly frameworkComponents: ReadonlyArray< TargetBoundComponentDescriptor >; + /** + * Contract space this plan applies to. Stamped onto the produced + * plan so the runner keys the marker row by the right space when + * executing. App-plan callers pass `APP_SPACE_ID` (`'app'`); + * per-extension callers pass the extension's space id. + */ + readonly spaceId: string; }): MigrationPlannerResult; /** @@ -306,8 +368,15 @@ export interface MigrationPlanner< * Used by `migration new` to scaffold a fresh `migration.ts`. The * returned plan has no operations; its `renderTypeScript()` yields a * stub the user can edit. + * + * `spaceId` is stamped onto the produced plan; reconciliation flows + * (`db init`, `db update`) and authoring flows (`migration new`) all + * pass it explicitly. */ - emptyMigration(context: MigrationScaffoldContext): MigrationPlanWithAuthoringSurface; + emptyMigration( + context: MigrationScaffoldContext, + spaceId: string, + ): MigrationPlanWithAuthoringSurface; } /** diff --git a/packages/1-framework/1-core/framework-components/src/control/control-spaces.ts b/packages/1-framework/1-core/framework-components/src/control/control-spaces.ts new file mode 100644 index 0000000000..40f78a0225 --- /dev/null +++ b/packages/1-framework/1-core/framework-components/src/control/control-spaces.ts @@ -0,0 +1,82 @@ +import type { Contract } from '@prisma-next/contract/types'; +import type { MigrationMetadata, MigrationPlanOperation } from './control-migration-types'; + +/** + * Canonical control-plane identifiers for contract spaces. + * + * A contract space is the disjoint `(contract.json, migration-graph)` unit + * the per-space planner / runner / verifier (project: extension contract + * spaces, TML-2397) operates on. The application owns one well-known + * space — the value below — and each loaded extension that contributes + * schema owns a uniquely-named space. + * + * Lives in `framework-components/control` so every layer that has to + * reason about space identity (the migration tooling, the SQL runtime's + * marker reader, target-side statement builders, target-side adapters) + * can import a single value rather than duplicating the literal. Raw + * `'app'` string literals in framework / target / runtime / adapter + * source code are forbidden and policed by + * `scripts/lint-app-space-id.mjs` (wired into `pnpm lint:deps`). + * + * @see specs/framework-mechanism.spec.md § 3 — Layout convention (γ). + */ +export const APP_SPACE_ID = 'app' as const; + +/** + * Pinned head ref for a contract space — the `(hash, invariants)` tuple + * a runner targets when applying that space's migration graph. Identical + * in shape to the on-disk `migrations//refs/head.json` the + * framework writes per loaded extension, and to the app-space + * `/refs/head.json`. Family-agnostic: SQL, Mongo, and any + * future family share the same head-ref shape. + * + * @see specs/framework-mechanism.spec.md § 1. + */ +export interface ContractSpaceHeadRef { + readonly hash: string; + readonly invariants: readonly string[]; +} + +/** + * Canonical structural shape of a migration package — the unit a planner + * produces and a runner consumes: a directory name, the ADR 197 metadata + * envelope (which carries the `toContract` snapshot), and the operation + * list. + * + * In-memory by default. Readers in `@prisma-next/migration-tools` + * (`readMigrationPackage` / `readMigrationsDir`) return the augmented + * {@link import('@prisma-next/migration-tools/package').OnDiskMigrationPackage} + * variant which adds `dirPath`; everything else operates against the + * canonical shape so the same value flows through pre-emission + * authoring, on-disk loading, and runner execution without conversion. + * + * @see specs/framework-mechanism.spec.md § 1. + */ +export interface MigrationPackage { + readonly dirName: string; + readonly metadata: MigrationMetadata; + readonly ops: readonly MigrationPlanOperation[]; +} + +/** + * Canonical structural shape of a contract space — one disjoint + * `(contractJson, migration-graph)` unit the per-space planner / runner + * / verifier operates on. The application owns one well-known space + * ({@link APP_SPACE_ID}); each loaded extension that contributes schema + * owns a uniquely-named space. Whether a value is the app's space or an + * extension's space is a control-plane concern; the type carries no + * such distinction. + * + * Generic over the contract so each family pins a typed contract value + * at consumption time. The SQL family specialises to + * `ContractSpace>` at the descriptor surface; + * Mongo's symmetrical `ContractSpace>` will land + * with that family. + * + * @see specs/framework-mechanism.spec.md § 1. + */ +export interface ContractSpace { + readonly contractJson: TContract; + readonly migrations: readonly MigrationPackage[]; + readonly headRef: ContractSpaceHeadRef; +} diff --git a/packages/1-framework/1-core/framework-components/src/exports/control.ts b/packages/1-framework/1-core/framework-components/src/exports/control.ts index a12e9dcaf3..fb0f0fe166 100644 --- a/packages/1-framework/1-core/framework-components/src/exports/control.ts +++ b/packages/1-framework/1-core/framework-components/src/exports/control.ts @@ -25,6 +25,8 @@ export type { ControlTargetInstance, } from '../control/control-instances'; export type { + MigrationHints, + MigrationMetadata, MigrationOperationClass, MigrationOperationPolicy, MigrationPlan, @@ -74,6 +76,12 @@ export type { SchemaTreeVisitor, } from '../control/control-schema-view'; export { SchemaTreeNode } from '../control/control-schema-view'; +export type { + ContractSpace, + ContractSpaceHeadRef, + MigrationPackage, +} from '../control/control-spaces'; +export { APP_SPACE_ID } from '../control/control-spaces'; export type { AssembledAuthoringContributions, ControlStack, diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts index 18fca8932e..736acd2f3f 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts @@ -5,7 +5,7 @@ import { MigrationToolsError, } from '@prisma-next/migration-tools/errors'; import { findPathWithDecision } from '@prisma-next/migration-tools/migration-graph'; -import type { MigrationPackage } from '@prisma-next/migration-tools/package'; +import type { OnDiskMigrationPackage } from '@prisma-next/migration-tools/package'; import type { RefEntry } from '@prisma-next/migration-tools/refs'; import { readRefs, resolveRef } from '@prisma-next/migration-tools/refs'; import { ifDefined } from '@prisma-next/utils/defined'; @@ -92,7 +92,7 @@ function mapApplyFailure(failure: MigrationApplyFailure): CliStructuredErrorType }); } -function packageToStep(pkg: MigrationPackage): MigrationApplyStep { +function packageToStep(pkg: OnDiskMigrationPackage): MigrationApplyStep { return { dirName: pkg.dirName, from: pkg.metadata.from, diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts index 01b96803cd..510f387bd1 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts @@ -11,7 +11,7 @@ import { readFileSync } from 'node:fs'; import type { Contract } from '@prisma-next/contract/types'; import { getEmittedArtifactPaths } from '@prisma-next/emitter'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { APP_SPACE_ID, createControlStack } from '@prisma-next/framework-components/control'; import { MigrationToolsError } from '@prisma-next/migration-tools/errors'; import { computeMigrationHash } from '@prisma-next/migration-tools/hash'; import { @@ -231,12 +231,15 @@ async function executeMigrationNewCommand( const stack = createControlStack(config); const familyInstance = config.family.create(stack); const planner = migrations.createPlanner(familyInstance); - const emptyPlan = planner.emptyMigration({ - packageDir, - contractJsonPath: join(packageDir, 'end-contract.json'), - fromHash, - toHash: toStorageHash, - }); + const emptyPlan = planner.emptyMigration( + { + packageDir, + contractJsonPath: join(packageDir, 'end-contract.json'), + fromHash, + toHash: toStorageHash, + }, + APP_SPACE_ID, + ); await writeMigrationTs(packageDir, emptyPlan.renderTypeScript()); return ok({ diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts index 7e209c5467..cdf87a3d44 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts @@ -2,6 +2,7 @@ import { readFile } from 'node:fs/promises'; import type { Contract } from '@prisma-next/contract/types'; import { getEmittedArtifactPaths } from '@prisma-next/emitter'; import { + APP_SPACE_ID, createControlStack, hasOperationPreview, type MigrationPlanOperation, @@ -279,6 +280,7 @@ async function executeMigrationPlanCommand( policy: { allowedOperationClasses: ['additive', 'widening', 'destructive', 'data'] }, fromContract, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (plannerResult.kind === 'failure') { return notOk( diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-show.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-show.ts index f53de01e41..85b5c7ce61 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-show.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-show.ts @@ -8,7 +8,7 @@ import { findLatestMigration, reconstructGraph, } from '@prisma-next/migration-tools/migration-graph'; -import type { MigrationPackage } from '@prisma-next/migration-tools/package'; +import type { OnDiskMigrationPackage } from '@prisma-next/migration-tools/package'; import { notOk, ok, type Result } from '@prisma-next/utils/result'; import { Command } from 'commander'; import { relative, resolve } from 'pathe'; @@ -64,9 +64,9 @@ function looksLikePath(target: string): boolean { } export function resolveByHashPrefix( - packages: readonly MigrationPackage[], + packages: readonly OnDiskMigrationPackage[], prefix: string, -): Result { +): Result { const normalizedPrefix = prefix.startsWith('sha256:') ? prefix : `sha256:${prefix}`; const matches = packages.filter((p) => p.metadata.migrationHash.startsWith(normalizedPrefix)); @@ -126,7 +126,7 @@ async function executeMigrationShowCommand( ui.stderr(header); } - let pkg: MigrationPackage; + let pkg: OnDiskMigrationPackage; try { if (target && looksLikePath(target)) { diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts index ad1eef76b7..3fa9e1e37c 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts @@ -11,7 +11,7 @@ import { findPathWithDecision, findReachableLeaves, } from '@prisma-next/migration-tools/migration-graph'; -import type { MigrationPackage } from '@prisma-next/migration-tools/package'; +import type { OnDiskMigrationPackage } from '@prisma-next/migration-tools/package'; import type { RefEntry, Refs } from '@prisma-next/migration-tools/refs'; import { readRefs, resolveRef } from '@prisma-next/migration-tools/refs'; import { ifDefined } from '@prisma-next/utils/defined'; @@ -118,7 +118,7 @@ export interface MigrationStatusResult { readonly summary: string; readonly diagnostics: readonly StatusDiagnostic[]; readonly graph?: MigrationGraph; - readonly bundles?: readonly MigrationPackage[]; + readonly bundles?: readonly OnDiskMigrationPackage[]; readonly edgeStatuses?: readonly EdgeStatus[]; readonly activeRefHash?: string; readonly activeRefName?: string; @@ -249,7 +249,7 @@ export function deriveEdgeStatuses( */ function buildMigrationEntries( chain: readonly MigrationEdge[], - packages: readonly MigrationPackage[], + packages: readonly OnDiskMigrationPackage[], mode: 'online' | 'offline', markerHash: string | undefined, edgeStatuses?: readonly EdgeStatus[], @@ -451,7 +451,7 @@ async function executeMigrationStatusCommand( }); } - let bundles: readonly MigrationPackage[]; + let bundles: readonly OnDiskMigrationPackage[]; let graph: MigrationGraph; try { ({ bundles, graph } = await loadMigrationPackages(migrationsDir)); diff --git a/packages/1-framework/3-tooling/cli/src/control-api/client.ts b/packages/1-framework/3-tooling/cli/src/control-api/client.ts index 569925713a..e628a9fa0c 100644 --- a/packages/1-framework/3-tooling/cli/src/control-api/client.ts +++ b/packages/1-framework/3-tooling/cli/src/control-api/client.ts @@ -13,6 +13,7 @@ import type { VerifyDatabaseSchemaResult, } from '@prisma-next/framework-components/control'; import { + APP_SPACE_ID, createControlStack, hasMigrations, hasOperationPreview, @@ -402,7 +403,11 @@ class ControlClientImpl implements ControlClient { async readMarker(): Promise { const { driver, familyInstance } = await this.ensureConnected(); - return familyInstance.readMarker({ driver }); + // The CLI client's readMarker reads the app's marker. Per-extension + // readers go through the orchestrator's per-space planner / runner + // boundary, which threads the extension's space id through the + // family interface explicitly. + return familyInstance.readMarker({ driver, space: APP_SPACE_ID }); } async migrationApply(options: MigrationApplyOptions): Promise { diff --git a/packages/1-framework/3-tooling/cli/src/control-api/operations/db-init.ts b/packages/1-framework/3-tooling/cli/src/control-api/operations/db-init.ts index 0d4ae97762..22de4d27af 100644 --- a/packages/1-framework/3-tooling/cli/src/control-api/operations/db-init.ts +++ b/packages/1-framework/3-tooling/cli/src/control-api/operations/db-init.ts @@ -8,7 +8,7 @@ import type { MigrationRunnerResult, TargetMigrationsCapability, } from '@prisma-next/framework-components/control'; -import { hasOperationPreview } from '@prisma-next/framework-components/control'; +import { APP_SPACE_ID, hasOperationPreview } from '@prisma-next/framework-components/control'; import { ifDefined } from '@prisma-next/utils/defined'; import { notOk, ok } from '@prisma-next/utils/result'; import type { DbInitResult, DbInitSuccess, OnControlProgress } from '../types'; @@ -89,6 +89,7 @@ export async function executeDbInit { const bundles = await readMigrationsDir(migrationsDir); diff --git a/packages/1-framework/3-tooling/cli/test/commands/migration-show.test.ts b/packages/1-framework/3-tooling/cli/test/commands/migration-show.test.ts index f684de624b..10aa1f4b2e 100644 --- a/packages/1-framework/3-tooling/cli/test/commands/migration-show.test.ts +++ b/packages/1-framework/3-tooling/cli/test/commands/migration-show.test.ts @@ -12,7 +12,7 @@ import { writeMigrationPackage, } from '@prisma-next/migration-tools/io'; import type { MigrationMetadata } from '@prisma-next/migration-tools/metadata'; -import type { MigrationPackage } from '@prisma-next/migration-tools/package'; +import type { OnDiskMigrationPackage } from '@prisma-next/migration-tools/package'; import stripAnsi from 'strip-ansi'; import { describe, expect, it } from 'vitest'; import { resolveByHashPrefix } from '../../src/commands/migration-show'; @@ -146,7 +146,7 @@ describe('resolveByHashPrefix', () => { }); it('returns error for no matches', () => { - const packages: MigrationPackage[] = [ + const packages: OnDiskMigrationPackage[] = [ { dirName: '20260101_100000_test', dirPath: '/tmp/test', @@ -174,7 +174,7 @@ describe('resolveByHashPrefix', () => { it('returns error for ambiguous prefix', () => { const contract = createContract(); - const packages: MigrationPackage[] = [ + const packages: OnDiskMigrationPackage[] = [ { dirName: '20260101_100000_first', dirPath: '/tmp/first', @@ -217,7 +217,7 @@ describe('resolveByHashPrefix', () => { }); it('resolves prefix without sha256: scheme', () => { - const packages: MigrationPackage[] = [ + const packages: OnDiskMigrationPackage[] = [ { dirName: '20260101_100000_test', dirPath: '/tmp/test', @@ -248,7 +248,7 @@ describe('resolveByHashPrefix', () => { // `migrationHash` — there is no longer a "skip draft" branch. The // prefix lookup simply returns no-match if nothing in the chain // shares the requested prefix. - const packages: MigrationPackage[] = [ + const packages: OnDiskMigrationPackage[] = [ { dirName: '20260101_100000_only', dirPath: '/tmp/only', diff --git a/packages/1-framework/3-tooling/migration/package.json b/packages/1-framework/3-tooling/migration/package.json index 4c629a38a7..99af1bad4b 100644 --- a/packages/1-framework/3-tooling/migration/package.json +++ b/packages/1-framework/3-tooling/migration/package.json @@ -86,6 +86,10 @@ "types": "./dist/exports/migration.d.mts", "import": "./dist/exports/migration.mjs" }, + "./spaces": { + "types": "./dist/exports/spaces.d.mts", + "import": "./dist/exports/spaces.mjs" + }, "./package.json": "./package.json" }, "repository": { diff --git a/packages/1-framework/3-tooling/migration/src/concatenate-space-apply-inputs.ts b/packages/1-framework/3-tooling/migration/src/concatenate-space-apply-inputs.ts new file mode 100644 index 0000000000..aa65464558 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/concatenate-space-apply-inputs.ts @@ -0,0 +1,90 @@ +import { errorDuplicateSpaceId } from './errors'; +import { APP_SPACE_ID } from './space-layout'; + +/** + * Per-space input the runner consumes when applying a migration. + * + * The shape is target-agnostic: callers (today the SQL family; later + * any other family) bind `TOp` to their own per-target operation type + * (e.g. `SqlMigrationPlanOperation` for the SQL family) + * and the helper preserves it through the concatenation. + * + * - `migrationDirectory` is the on-disk migration directory for the + * space — `/migrations` for `'app'` and + * `/migrations/` for an extension space. + * - `currentMarkerHash` and `currentMarkerInvariants` are the values + * read from the `prisma_contract.marker` row keyed by `space = ` + * (T1.1). `null` hash = no marker row yet. + * - `path` is the per-space operation list resolved from + * `findPathWithDecision(currentMarker, ref.hash, effectiveRequired)` + * per ADR 208, materialised against the on-disk migration packages. + * + * @see specs/framework-mechanism.spec.md § 4 — Runner. + */ +export interface SpaceApplyInput { + readonly spaceId: string; + readonly migrationDirectory: string; + readonly currentMarkerHash: string | null; + readonly currentMarkerInvariants: readonly string[]; + readonly path: readonly TOp[]; +} + +/** + * Order a set of per-space apply inputs into the canonical cross-space + * sequence the runner applies under a single transaction. + * + * Cross-space ordering convention (sub-spec § 4): + * + * 1. **Extension spaces first**, alphabetically by `spaceId`. + * 2. **App space last** — only one `'app'` entry expected, at most. + * + * Rationale: extensions install their own structural objects (types, + * functions, helper tables) before the app's structural ops reference + * them. Putting app-space last lets app-space ops freely depend on any + * extension-space declaration in the same transaction. + * + * Determinism (NFR6): the output order is independent of the input + * order, so two callers with the same set of `extensionPacks` produce + * identical apply sequences. + * + * Atomicity: rejects duplicate `spaceId`s with + * `MIGRATION.DUPLICATE_SPACE_ID` before producing any output. This + * mirrors {@link import('./plan-all-spaces').planAllSpaces} so the + * planner-side and runner-side helpers reject malformed inputs the same + * way (callers don't need a separate dedup pass). + * + * Synchronous, pure, no I/O: callers resolve marker rows and `path` + * before invoking this helper. The actual DB application — driving the + * transaction, committing marker writes, recording the per-space marker + * rows — happens at the SQL-family consumption site (per the + * helper-location convention from R3). + */ +export function concatenateSpaceApplyInputs( + inputs: readonly SpaceApplyInput[], +): readonly SpaceApplyInput[] { + const seen = new Set(); + for (const input of inputs) { + if (seen.has(input.spaceId)) { + throw errorDuplicateSpaceId(input.spaceId); + } + seen.add(input.spaceId); + } + + const extensions: SpaceApplyInput[] = []; + let appSpace: SpaceApplyInput | undefined; + for (const input of inputs) { + if (input.spaceId === APP_SPACE_ID) { + appSpace = input; + } else { + extensions.push(input); + } + } + + extensions.sort((a, b) => { + if (a.spaceId < b.spaceId) return -1; + if (a.spaceId > b.spaceId) return 1; + return 0; + }); + + return appSpace ? [...extensions, appSpace] : extensions; +} diff --git a/packages/1-framework/3-tooling/migration/src/detect-space-contract-drift.ts b/packages/1-framework/3-tooling/migration/src/detect-space-contract-drift.ts new file mode 100644 index 0000000000..f1a31e709d --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/detect-space-contract-drift.ts @@ -0,0 +1,95 @@ +/** + * Inputs for {@link detectSpaceContractDrift}. + * + * Both hashes are produced by the caller (the SQL-family wiring at the + * consumption site) using the canonical contract hashing pipeline. + * Keeping the helper pure lets `migration-tools` stay framework-neutral + * — the SQL family already speaks `Contract`, the Mongo + * family speaks its own contract type, and both reduce to a hash string + * before drift detection runs. + * + * `pinnedHash` is `null` when no pinned `contract.json` exists yet for + * the space (the descriptor declares an extension that has never been + * emitted into the user's repo). That's the "first emit" case — no + * drift to surface; the migrate emit will create the pinned files. + * + * @see specs/framework-mechanism.spec.md § 3 — Drift detection (T1.9). + */ +export interface DetectSpaceContractDriftInputs { + readonly descriptorHash: string; + readonly pinnedHash: string | null; +} + +/** + * Result discriminant for {@link detectSpaceContractDrift}. + * + * - `noDrift`: descriptor hash and pinned hash agree byte-for-byte. + * The migrate emit can proceed with no warning. + * - `firstEmit`: no pinned `contract.json` on disk yet. The extension + * was just added to `extensionPacks`; this run will create the + * pinned files. No warning either — the user's intent is to install + * the extension, not to "drift" from a state they haven't pinned. + * - `drift`: descriptor hash differs from pinned hash. The caller + * surfaces a non-fatal warning naming the extension and the + * diff direction (descriptor → pinned). The migrate emit proceeds + * normally so the bump is materialised this run; the warning just + * confirms the bump is being captured. + * + * `spaceId`, `descriptorHash`, and `pinnedHash` are threaded through + * verbatim so the caller (logger / TerminalUI / strict-mode envelope) + * has everything it needs to format the warning message without + * re-reading the descriptor or the pinned file. + */ +export type SpaceContractDriftResult = { + readonly kind: 'noDrift' | 'firstEmit' | 'drift'; + readonly spaceId: string; + readonly descriptorHash: string; + readonly pinnedHash: string | null; +}; + +/** + * Pure drift-detection primitive for a single contract space. + * + * Runs once per loaded extension space, just before computing the + * `priorContract` that feeds {@link import('./plan-all-spaces').planAllSpaces}. + * Hash equality is byte-for-byte (no normalisation) — both sides are + * already canonical hashes produced by the same pipeline, so any + * difference is meaningful drift. + * + * Synchronous, pure, no I/O. The caller (SQL family in M2 R1) reads + * the pinned `contract.json` and computes its hash, then invokes this + * helper alongside the descriptor's `headRef.hash`. Composes naturally + * with {@link import('./read-pinned-contract-hash').readPinnedContractHash} + * which provides the read-side primitive. + * + * @see specs/framework-mechanism.spec.md § 3 — Drift detection (T1.9). + * @see specs/framework-mechanism.spec.md AM7 — drift warning surfaces + * the extension name and the diff direction. + */ +export function detectSpaceContractDrift( + spaceId: string, + inputs: DetectSpaceContractDriftInputs, +): SpaceContractDriftResult { + if (inputs.pinnedHash === null) { + return { + kind: 'firstEmit', + spaceId, + descriptorHash: inputs.descriptorHash, + pinnedHash: null, + }; + } + if (inputs.descriptorHash === inputs.pinnedHash) { + return { + kind: 'noDrift', + spaceId, + descriptorHash: inputs.descriptorHash, + pinnedHash: inputs.pinnedHash, + }; + } + return { + kind: 'drift', + spaceId, + descriptorHash: inputs.descriptorHash, + pinnedHash: inputs.pinnedHash, + }; +} diff --git a/packages/1-framework/3-tooling/migration/src/emit-pinned-space-artefacts.ts b/packages/1-framework/3-tooling/migration/src/emit-pinned-space-artefacts.ts new file mode 100644 index 0000000000..5afa07eeb5 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/emit-pinned-space-artefacts.ts @@ -0,0 +1,89 @@ +import { mkdir, writeFile } from 'node:fs/promises'; +import { join } from 'pathe'; +import { canonicalizeJson } from './canonicalize-json'; +import { errorPinnedArtefactsAppSpace } from './errors'; +import { APP_SPACE_ID, assertValidSpaceId } from './space-layout'; + +/** + * Pinned head reference for a contract space — `(hash, invariants)`. + * Mirrors {@link import('./refs').RefEntry} but is redeclared locally so + * callers can construct the input without depending on the refs module. + */ +export interface PinnedSpaceHeadRef { + readonly hash: string; + readonly invariants: readonly string[]; +} + +/** + * Inputs for {@link emitPinnedSpaceArtefacts}. + * + * - `contract` is the canonical contract value the framework just emitted + * for the space; it is serialised through {@link canonicalizeJson}, so + * it must be a JSON-compatible value (objects / arrays / primitives). + * Typed as `unknown` rather than the SQL-family `Contract` + * to keep `migration-tools` framework-neutral; SQL-family callers pass + * their typed value through unchanged. + * + * - `contractDts` is the pre-rendered `.d.ts` text. Rendering happens in + * the SQL family (which owns the codec / typemap input the renderer + * needs), so this helper accepts the text verbatim and writes it out + * without further transformation. + * + * - `headRef` is the pinned head reference for the space. + * `invariants` are sorted alphabetically before serialisation so two + * callers passing the same set in different orders produce + * byte-identical `refs/head.json`. + */ +export interface PinnedSpaceArtefactInputs { + readonly contract: unknown; + readonly contractDts: string; + readonly headRef: PinnedSpaceHeadRef; +} + +/** + * Emit the pinned per-space artefacts (`contract.json`, `contract.d.ts`, + * `refs/head.json`) under `//`. + * + * Always-overwrite: the framework owns these files; running `migrate` + * twice with the same inputs is a no-op observably (idempotent), but the + * helper does not check pre-existing contents — re-emit always wins. + * + * Path layout matches the convention in + * [`spaceMigrationDirectory`](./space-layout.ts), with two restrictions + * specific to pinned artefacts: + * + * - Rejects the app space (`spaceId === APP_SPACE_ID`): the app space's + * canonical `contract.json` lives at the project root, not under + * `migrations/`. Callers that want to emit it use the app-space + * contract emit pipeline. + * - Validates `spaceId` against `[a-z][a-z0-9_-]{0,63}` via + * {@link assertValidSpaceId} for the same filesystem-safety reasons. + * + * The migrations directory and space subdirectory are created if they + * do not yet exist (`mkdir { recursive: true }`). + * + * @see specs/framework-mechanism.spec.md § 3 — Pinned artefact emission (T1.8). + */ +export async function emitPinnedSpaceArtefacts( + projectMigrationsDir: string, + spaceId: string, + inputs: PinnedSpaceArtefactInputs, +): Promise { + if (spaceId === APP_SPACE_ID) { + throw errorPinnedArtefactsAppSpace(); + } + assertValidSpaceId(spaceId); + + const dir = join(projectMigrationsDir, spaceId); + await mkdir(join(dir, 'refs'), { recursive: true }); + + await writeFile(join(dir, 'contract.json'), `${canonicalizeJson(inputs.contract)}\n`); + await writeFile(join(dir, 'contract.d.ts'), inputs.contractDts); + + const sortedInvariants = [...inputs.headRef.invariants].sort(); + const headJson = canonicalizeJson({ + hash: inputs.headRef.hash, + invariants: sortedInvariants, + }); + await writeFile(join(dir, 'refs', 'head.json'), `${headJson}\n`); +} diff --git a/packages/1-framework/3-tooling/migration/src/errors.ts b/packages/1-framework/3-tooling/migration/src/errors.ts index dd916f5b68..b4f8746527 100644 --- a/packages/1-framework/3-tooling/migration/src/errors.ts +++ b/packages/1-framework/3-tooling/migration/src/errors.ts @@ -148,6 +148,41 @@ export function errorInvalidDestName(destName: string): MigrationToolsError { }); } +export function errorInvalidSpaceId(spaceId: string): MigrationToolsError { + return new MigrationToolsError( + 'MIGRATION.INVALID_SPACE_ID', + 'Invalid contract space identifier', + { + why: `The space id "${spaceId}" does not match the required pattern /^[a-z][a-z0-9_-]{0,63}$/. Space ids are used as filesystem directory names under \`migrations/\`, so the pattern is conservative on purpose.`, + fix: 'Pick a lowercase identifier that begins with a letter and contains only lowercase letters, digits, hyphens, or underscores; max 64 characters total.', + details: { spaceId }, + }, + ); +} + +export function errorPinnedArtefactsAppSpace(): MigrationToolsError { + return new MigrationToolsError( + 'MIGRATION.PINNED_ARTEFACTS_APP_SPACE', + 'Pinned per-space artefacts do not apply to the app space', + { + why: "Pinned `contract.json`/`contract.d.ts`/`refs/head.json` files only exist for extension spaces under `migrations//`. The app space's canonical contract lives at the project root (`contract.json`) — `emitPinnedSpaceArtefacts` is the wrong helper for it.", + fix: 'Pass an extension space id, or use the app-space contract emit pipeline for the project-root `contract.json` / `contract.d.ts`.', + }, + ); +} + +export function errorDuplicateSpaceId(spaceId: string): MigrationToolsError { + return new MigrationToolsError( + 'MIGRATION.DUPLICATE_SPACE_ID', + 'Duplicate contract space identifier', + { + why: `The space id "${spaceId}" appears more than once in the per-space planner input. Each space id must be unique across the inputs (the per-space planner emits one output entry per id).`, + fix: 'Deduplicate the inputs before passing them to `planAllSpaces` — typically by checking your `extensionPacks` declaration for repeated entries.', + details: { spaceId }, + }, + ); +} + export function errorSameSourceAndTarget(dir: string, hash: string): MigrationToolsError { const dirName = basename(dir); return new MigrationToolsError( diff --git a/packages/1-framework/3-tooling/migration/src/exports/io.ts b/packages/1-framework/3-tooling/migration/src/exports/io.ts index 52fdec4156..1838a91879 100644 --- a/packages/1-framework/3-tooling/migration/src/exports/io.ts +++ b/packages/1-framework/3-tooling/migration/src/exports/io.ts @@ -1,6 +1,7 @@ export { copyFilesWithRename, formatMigrationDirName, + materialiseMigrationPackage, readMigrationPackage, readMigrationsDir, writeMigrationMetadata, diff --git a/packages/1-framework/3-tooling/migration/src/exports/package.ts b/packages/1-framework/3-tooling/migration/src/exports/package.ts index a4255bc526..826d50b412 100644 --- a/packages/1-framework/3-tooling/migration/src/exports/package.ts +++ b/packages/1-framework/3-tooling/migration/src/exports/package.ts @@ -1 +1,2 @@ -export type { MigrationOps, MigrationPackage } from '../package'; +export type { MigrationPackage } from '@prisma-next/framework-components/control'; +export type { MigrationOps, OnDiskMigrationPackage } from '../package'; diff --git a/packages/1-framework/3-tooling/migration/src/exports/spaces.ts b/packages/1-framework/3-tooling/migration/src/exports/spaces.ts new file mode 100644 index 0000000000..af29babeb2 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/exports/spaces.ts @@ -0,0 +1,36 @@ +export { + concatenateSpaceApplyInputs, + type SpaceApplyInput, +} from '../concatenate-space-apply-inputs'; +export { + type DetectSpaceContractDriftInputs, + detectSpaceContractDrift, + type SpaceContractDriftResult, +} from '../detect-space-contract-drift'; +export { + emitPinnedSpaceArtefacts, + type PinnedSpaceArtefactInputs, + type PinnedSpaceHeadRef, +} from '../emit-pinned-space-artefacts'; +export { + planAllSpaces, + type SpacePlanInput, + type SpacePlanOutput, +} from '../plan-all-spaces'; +export { readPinnedContractHash } from '../read-pinned-contract-hash'; +export { + APP_SPACE_ID, + assertValidSpaceId, + isValidSpaceId, + spaceMigrationDirectory, + type ValidSpaceId, +} from '../space-layout'; +export { + listPinnedSpaceDirectories, + type SpaceMarkerRecord, + type SpacePinnedHashRecord, + type SpaceVerifierViolation, + type VerifyContractSpacesInputs, + type VerifyContractSpacesResult, + verifyContractSpaces, +} from '../verify-contract-spaces'; diff --git a/packages/1-framework/3-tooling/migration/src/hash.ts b/packages/1-framework/3-tooling/migration/src/hash.ts index bb8b6ad11f..d84713084c 100644 --- a/packages/1-framework/3-tooling/migration/src/hash.ts +++ b/packages/1-framework/3-tooling/migration/src/hash.ts @@ -1,7 +1,7 @@ import { createHash } from 'node:crypto'; import { canonicalizeJson } from './canonicalize-json'; import type { MigrationMetadata } from './metadata'; -import type { MigrationOps, MigrationPackage } from './package'; +import type { MigrationOps, OnDiskMigrationPackage } from './package'; export interface VerifyResult { readonly ok: boolean; @@ -71,7 +71,7 @@ export function computeMigrationHash( * not — typically a sign of FS corruption, partial writes, or a post-emit * hand edit. */ -export function verifyMigrationHash(pkg: MigrationPackage): VerifyResult { +export function verifyMigrationHash(pkg: OnDiskMigrationPackage): VerifyResult { const computed = computeMigrationHash(pkg.metadata, pkg.ops); if (pkg.metadata.migrationHash === computed) { diff --git a/packages/1-framework/3-tooling/migration/src/io.ts b/packages/1-framework/3-tooling/migration/src/io.ts index 1cd7575ed0..c7e0095f02 100644 --- a/packages/1-framework/3-tooling/migration/src/io.ts +++ b/packages/1-framework/3-tooling/migration/src/io.ts @@ -1,6 +1,11 @@ -import { copyFile, mkdir, readdir, readFile, stat, writeFile } from 'node:fs/promises'; +import { copyFile, mkdir, readdir, readFile, rm, stat, writeFile } from 'node:fs/promises'; +import type { + MigrationMetadata, + MigrationPackage, +} from '@prisma-next/framework-components/control'; import { type } from 'arktype'; -import { basename, dirname, join } from 'pathe'; +import { basename, dirname, join, resolve } from 'pathe'; +import { canonicalizeJson } from './canonicalize-json'; import { errorDirectoryExists, errorInvalidDestName, @@ -13,11 +18,10 @@ import { } from './errors'; import { verifyMigrationHash } from './hash'; import { deriveProvidedInvariants } from './invariants'; -import type { MigrationMetadata } from './metadata'; import { MigrationOpsSchema } from './op-schema'; -import type { MigrationOps, MigrationPackage } from './package'; +import type { MigrationOps, OnDiskMigrationPackage } from './package'; -const MANIFEST_FILE = 'migration.json'; +export const MANIFEST_FILE = 'migration.json'; const OPS_FILE = 'ops.json'; const MAX_SLUG_LENGTH = 64; @@ -74,6 +78,52 @@ export async function writeMigrationPackage( await writeFile(join(dir, OPS_FILE), JSON.stringify(ops, null, 2), { flag: 'wx' }); } +/** + * Materialise an in-memory {@link MigrationPackage} to a per-space + * directory on disk. + * + * Writes three files under `//`: + * + * - `migration.json` — the manifest (pretty-printed, matches + * {@link writeMigrationPackage}'s output for byte-for-byte parity with + * app-space migrations). + * - `ops.json` — the operation list (pretty-printed). + * - `contract.json` — the canonical-JSON serialisation of + * `metadata.toContract`. This is the per-package post-state contract + * snapshot; the canonicalisation pass guarantees byte-determinism so + * re-emitting the same package across machines / runs produces an + * identical file. + * + * Distinct verb from the lower-level {@link writeMigrationPackage} + * (which takes constituent `(metadata, ops)`): callers reading + * `materialise…` know they are persisting a struct-typed package + * including its contract-snapshot side car. + * + * Overwrite-idempotent: the per-package directory is cleared before + * each emit, so re-running against the same `targetDir` produces + * byte-identical contents and never leaves stale files behind. The + * spec's "re-emitting the same package across runs / machines produces + * byte-identical files" guarantee (§ 3) covers both same-dir and + * fresh-dir re-emits. The lower-level {@link writeMigrationPackage} + * stays strict because the CLI authoring path (`migration plan` / + * `migration new`) deliberately refuses to clobber an existing + * authored migration; this helper is the re-emit path that is + * supposed to converge on a single canonical on-disk shape. + * + * @see specs/framework-mechanism.spec.md § 3 — Emission helper (T1.7). + */ +export async function materialiseMigrationPackage( + targetDir: string, + pkg: MigrationPackage, +): Promise { + const dir = join(targetDir, pkg.dirName); + await rm(dir, { recursive: true, force: true }); + await writeMigrationPackage(dir, pkg.metadata, pkg.ops); + await writeFile(join(dir, 'contract.json'), `${canonicalizeJson(pkg.metadata.toContract)}\n`, { + flag: 'wx', + }); +} + /** * Copy a list of files into `destDir`, optionally renaming each one. * @@ -109,16 +159,17 @@ export async function writeMigrationOps(dir: string, ops: MigrationOps): Promise await writeFile(join(dir, OPS_FILE), `${JSON.stringify(ops, null, 2)}\n`); } -export async function readMigrationPackage(dir: string): Promise { - const manifestPath = join(dir, MANIFEST_FILE); - const opsPath = join(dir, OPS_FILE); +export async function readMigrationPackage(dir: string): Promise { + const absoluteDir = resolve(dir); + const manifestPath = join(absoluteDir, MANIFEST_FILE); + const opsPath = join(absoluteDir, OPS_FILE); let manifestRaw: string; try { manifestRaw = await readFile(manifestPath, 'utf-8'); } catch (error) { if (hasErrnoCode(error, 'ENOENT')) { - throw errorMissingFile(MANIFEST_FILE, dir); + throw errorMissingFile(MANIFEST_FILE, absoluteDir); } throw error; } @@ -128,7 +179,7 @@ export async function readMigrationPackage(dir: string): Promise { +): Promise { let entries: string[]; try { entries = await readdir(migrationsRoot); @@ -214,7 +269,7 @@ export async function readMigrationsDir( throw error; } - const packages: MigrationPackage[] = []; + const packages: OnDiskMigrationPackage[] = []; for (const entry of entries.sort()) { const entryPath = join(migrationsRoot, entry); diff --git a/packages/1-framework/3-tooling/migration/src/metadata.ts b/packages/1-framework/3-tooling/migration/src/metadata.ts index d1a1ad0664..249c314fa8 100644 --- a/packages/1-framework/3-tooling/migration/src/metadata.ts +++ b/packages/1-framework/3-tooling/migration/src/metadata.ts @@ -1,41 +1 @@ -import type { Contract } from '@prisma-next/contract/types'; - -export interface MigrationHints { - readonly used: readonly string[]; - readonly applied: readonly string[]; - readonly plannerVersion: string; -} - -/** - * In-memory migration metadata envelope. Every migration is content-addressed: - * the `migrationHash` is a hash over the metadata envelope plus the operations - * list, computed at write time. There is no draft state — a migration - * directory either exists with fully attested metadata or it does not. - * - * When the planner cannot lower an operation because of an unfilled - * `placeholder(...)` slot, the migration is still written with `migrationHash` - * hashed over `ops: []`. Re-running self-emit after the user fills the - * placeholder produces a *different* `migrationHash` (committed to the real - * ops); this is intentional. - * - * The on-disk JSON shape in `migration.json` matches this type field-for-field - * — `JSON.stringify(metadata, null, 2)` is the canonical writer output. - */ -export interface MigrationMetadata { - readonly migrationHash: string; - readonly from: string | null; - readonly to: string; - readonly fromContract: Contract | null; - readonly toContract: Contract; - readonly hints: MigrationHints; - readonly labels: readonly string[]; - /** - * Sorted, deduplicated list of `invariantId`s declared by the - * migration's data-transform ops. Always present; an empty array - * means the migration has no routing-visible data transforms. - */ - readonly providedInvariants: readonly string[]; - readonly authorship?: { readonly author?: string; readonly email?: string }; - readonly signature?: { readonly keyId: string; readonly value: string } | null; - readonly createdAt: string; -} +export type { MigrationHints, MigrationMetadata } from '@prisma-next/framework-components/control'; diff --git a/packages/1-framework/3-tooling/migration/src/migration-graph.ts b/packages/1-framework/3-tooling/migration/src/migration-graph.ts index b8f8978173..bd446b708b 100644 --- a/packages/1-framework/3-tooling/migration/src/migration-graph.ts +++ b/packages/1-framework/3-tooling/migration/src/migration-graph.ts @@ -9,7 +9,7 @@ import { } from './errors'; import type { MigrationEdge, MigrationGraph } from './graph'; import { bfs } from './graph-ops'; -import type { MigrationPackage } from './package'; +import type { OnDiskMigrationPackage } from './package'; /** Forward-edge neighbours: edge `e` from `n` visits `e.to` next. */ function forwardNeighbours(graph: MigrationGraph, node: string) { @@ -36,7 +36,7 @@ function appendEdge(map: Map, key: string, entry: Migra else map.set(key, [entry]); } -export function reconstructGraph(packages: readonly MigrationPackage[]): MigrationGraph { +export function reconstructGraph(packages: readonly OnDiskMigrationPackage[]): MigrationGraph { const nodes = new Set(); const forwardChain = new Map(); const reverseChain = new Map(); diff --git a/packages/1-framework/3-tooling/migration/src/package.ts b/packages/1-framework/3-tooling/migration/src/package.ts index 2d4097677c..257fe0d25e 100644 --- a/packages/1-framework/3-tooling/migration/src/package.ts +++ b/packages/1-framework/3-tooling/migration/src/package.ts @@ -1,18 +1,21 @@ -import type { MigrationPlanOperation } from '@prisma-next/framework-components/control'; -import type { MigrationMetadata } from './metadata'; +import type { + MigrationPackage, + MigrationPlanOperation, +} from '@prisma-next/framework-components/control'; export type MigrationOps = readonly MigrationPlanOperation[]; /** - * An on-disk migration directory (a "package") with its parsed metadata and - * operations. Returned from `readMigrationPackage` / `readMigrationsDir` only - * after the loader has verified the package's integrity (hash recomputation - * against the stored `migrationHash`); holding a `MigrationPackage` value - * therefore implies the package is internally consistent. + * Augmented form of the canonical {@link MigrationPackage} returned by + * the on-disk readers (`readMigrationPackage`, `readMigrationsDir`). + * Adds `dirPath` — the absolute path the package was loaded from — so + * downstream diagnostics can point operators at a concrete directory. + * + * Holding an `OnDiskMigrationPackage` value implies the loader verified + * the package's integrity (hash recomputation against the stored + * `migrationHash`); the canonical structural shape carries no such + * guarantee on its own. */ -export interface MigrationPackage { - readonly dirName: string; +export interface OnDiskMigrationPackage extends MigrationPackage { readonly dirPath: string; - readonly metadata: MigrationMetadata; - readonly ops: MigrationOps; } diff --git a/packages/1-framework/3-tooling/migration/src/plan-all-spaces.ts b/packages/1-framework/3-tooling/migration/src/plan-all-spaces.ts new file mode 100644 index 0000000000..5bf253e0eb --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/plan-all-spaces.ts @@ -0,0 +1,80 @@ +import { errorDuplicateSpaceId } from './errors'; + +/** + * Per-space input for {@link planAllSpaces}. One entry per loaded + * contract space (the application's `'app'` plus each extension that + * exposes a `contractSpace`). + * + * - `priorContract` is `null` for a space that has never been emitted + * (no `migrations//contract.json` on disk yet); otherwise it + * is the canonical contract value pinned for that space. + * - `newContract` is the canonical contract value the planner is about + * to emit for that space — for app-space, the just-emitted root + * `contract.json`; for an extension space, the descriptor's + * `contractSpace.contractJson`. + * + * @see specs/framework-mechanism.spec.md § 3. + */ +export interface SpacePlanInput { + readonly spaceId: string; + readonly priorContract: TContract | null; + readonly newContract: TContract; +} + +export interface SpacePlanOutput { + readonly spaceId: string; + readonly migrationPackages: readonly TPackage[]; +} + +/** + * Iterate the per-space planner across a set of loaded contract spaces + * and return a deterministic shape regardless of declaration order. + * + * Behaviour: + * + * - The output is sorted alphabetically by `spaceId` (AM3). Two callers + * passing the same set of inputs in different orders observe + * byte-identical outputs. + * - The per-space planner (`planSpace`) is called exactly once per + * input, in alphabetical-by-spaceId order. Its return value is + * attached to the corresponding output entry verbatim. + * - Duplicate `spaceId`s in the input array throw + * `MIGRATION.DUPLICATE_SPACE_ID` before any `planSpace` call runs, + * keeping the planner pure when the input is malformed. + * + * The signature is generic over `TContract` and `TPackage` because the + * shape is framework-neutral (SQL family today, Mongo family + * eventually). Callers wire in whatever contract value and migration + * package shape their family already speaks. + * + * Synchronous: the underlying per-space planner (target's + * `MigrationPlanner.plan(...)`) is synchronous; callers that need to + * resolve async I/O (e.g. reading pinned `contract.json` from disk) + * resolve it before calling `planAllSpaces` and pass the materialised + * inputs through. + * + * @see specs/framework-mechanism.spec.md § 3 — Per-space planner (T1.3). + */ +export function planAllSpaces( + inputs: readonly SpacePlanInput[], + planSpace: (input: SpacePlanInput) => readonly TPackage[], +): readonly SpacePlanOutput[] { + const seen = new Set(); + for (const input of inputs) { + if (seen.has(input.spaceId)) { + throw errorDuplicateSpaceId(input.spaceId); + } + seen.add(input.spaceId); + } + + const sorted = [...inputs].sort((a, b) => { + if (a.spaceId < b.spaceId) return -1; + if (a.spaceId > b.spaceId) return 1; + return 0; + }); + + return sorted.map((input) => ({ + spaceId: input.spaceId, + migrationPackages: planSpace(input), + })); +} diff --git a/packages/1-framework/3-tooling/migration/src/read-pinned-contract-hash.ts b/packages/1-framework/3-tooling/migration/src/read-pinned-contract-hash.ts new file mode 100644 index 0000000000..29d8cb8d22 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/read-pinned-contract-hash.ts @@ -0,0 +1,77 @@ +import { readFile } from 'node:fs/promises'; +import { join } from 'pathe'; +import { errorInvalidJson, errorInvalidRefFile, errorPinnedArtefactsAppSpace } from './errors'; +import { APP_SPACE_ID, assertValidSpaceId } from './space-layout'; + +function hasErrnoCode(error: unknown, code: string): boolean { + return error instanceof Error && (error as { code?: string }).code === code; +} + +/** + * Read the pinned head hash for an extension space. + * + * Returns the `hash` field of `//refs/head.json` + * — i.e. the canonical contract hash the framework wrote on the last + * `migrate` for this space. Returns `null` when the file does not exist + * (or the migrations directory is missing entirely), which is the + * "first emit" signal {@link import('./detect-space-contract-drift').detectSpaceContractDrift} + * uses to distinguish a brand-new extension from drift. + * + * Pure I/O (read + parse). The "comparison hash" is stored on disk by + * {@link import('./emit-pinned-space-artefacts').emitPinnedSpaceArtefacts} + * via the descriptor's `headRef.hash`, so reading it back here matches + * the descriptor's hashing pipeline by construction — neither side + * recomputes anything. + * + * Validation: + * + * - Rejects the app space — pinned head refs are an extension-space + * concept; the app space's contract-of-record lives at the project + * root, not under `migrations/`. + * - Validates the space id against the same `[a-z][a-z0-9_-]{0,63}` + * pattern as the rest of the per-space helpers. + * - Surfaces `MIGRATION.INVALID_JSON` / `MIGRATION.INVALID_REF_FILE` + * on a corrupt `refs/head.json` so callers can distinguish "no + * pinned file" (returns `null`) from "pinned file but unreadable" + * (throws). + * + * @see specs/framework-mechanism.spec.md § 3 — Drift detection (T1.9). + */ +export async function readPinnedContractHash( + projectMigrationsDir: string, + spaceId: string, +): Promise { + if (spaceId === APP_SPACE_ID) { + throw errorPinnedArtefactsAppSpace(); + } + assertValidSpaceId(spaceId); + + const filePath = join(projectMigrationsDir, spaceId, 'refs', 'head.json'); + + let raw: string; + try { + raw = await readFile(filePath, 'utf-8'); + } catch (error) { + if (hasErrnoCode(error, 'ENOENT')) { + return null; + } + throw error; + } + + let parsed: unknown; + try { + parsed = JSON.parse(raw); + } catch (e) { + throw errorInvalidJson(filePath, e instanceof Error ? e.message : String(e)); + } + + if ( + typeof parsed !== 'object' || + parsed === null || + typeof (parsed as { hash?: unknown }).hash !== 'string' + ) { + throw errorInvalidRefFile(filePath, 'expected an object with a string `hash` field'); + } + + return (parsed as { hash: string }).hash; +} diff --git a/packages/1-framework/3-tooling/migration/src/space-layout.ts b/packages/1-framework/3-tooling/migration/src/space-layout.ts new file mode 100644 index 0000000000..c680d7786e --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/space-layout.ts @@ -0,0 +1,55 @@ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; +import { join } from 'pathe'; +import { errorInvalidSpaceId } from './errors'; + +export { APP_SPACE_ID }; + +/** + * Branded string carrying a compile-time guarantee that the value has + * been validated by {@link assertValidSpaceId}. Downstream filesystem + * helpers (e.g. {@link spaceMigrationDirectory}) accept this type to + * make "validated" tracking visible at the type level rather than + * relying purely on a runtime check. + */ +export type ValidSpaceId = string & { readonly __brand: 'ValidSpaceId' }; + +/** + * Pattern a contract-space identifier must match. The constraint is + * filesystem-friendly: lowercase letters / digits / hyphen / underscore, + * starts with a letter, max 64 characters. + * + * @see specs/framework-mechanism.spec.md § 3. + */ +const SPACE_ID_PATTERN = /^[a-z][a-z0-9_-]{0,63}$/; + +export function isValidSpaceId(spaceId: string): spaceId is ValidSpaceId { + return SPACE_ID_PATTERN.test(spaceId); +} + +export function assertValidSpaceId(spaceId: string): asserts spaceId is ValidSpaceId { + if (!isValidSpaceId(spaceId)) { + throw errorInvalidSpaceId(spaceId); + } +} + +/** + * Resolve the migrations subdirectory for a given contract space. + * + * - **App space** (`spaceId === APP_SPACE_ID`) keeps today's layout: the + * project's `migrations/` directory is the migrations directory, no + * subdirectory. + * - **Extension space** lands under `//`. + * The space id is validated against {@link SPACE_ID_PATTERN} because + * it becomes a filesystem directory name verbatim. + * + * `projectMigrationsDir` is the project's top-level `migrations/` + * directory; the helper does not assume anything about its absolute / + * relative shape and is symmetric with `pathe.join`. + */ +export function spaceMigrationDirectory(projectMigrationsDir: string, spaceId: string): string { + if (spaceId === APP_SPACE_ID) { + return projectMigrationsDir; + } + assertValidSpaceId(spaceId); + return join(projectMigrationsDir, spaceId); +} diff --git a/packages/1-framework/3-tooling/migration/src/verify-contract-spaces.ts b/packages/1-framework/3-tooling/migration/src/verify-contract-spaces.ts new file mode 100644 index 0000000000..4bf61b11d1 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/verify-contract-spaces.ts @@ -0,0 +1,276 @@ +import { readdir, stat } from 'node:fs/promises'; +import { join } from 'pathe'; +import { MANIFEST_FILE } from './io'; +import { APP_SPACE_ID } from './space-layout'; + +function hasErrnoCode(error: unknown, code: string): boolean { + return error instanceof Error && (error as { code?: string }).code === code; +} + +/** + * List the per-space pinned subdirectories under + * `/migrations/`. Returns space-id directory names (sorted + * alphabetically) — i.e. any non-dot-prefixed subdirectory whose root + * does **not** contain a `migration.json` manifest. The manifest is the + * structural marker of a user-authored migration directory (see + * `readMigrationsDir` in `./io`); directory names themselves belong to + * the user and are not part of the contract. + * + * Returns `[]` if the migrations directory does not exist (greenfield + * project). + * + * Reads only the user's repo. **No descriptor import.** The caller + * (verifier) feeds the result into {@link verifyContractSpaces} alongside + * the loaded-space set and the marker rows. + * + * @see specs/framework-mechanism.spec.md § 4 — Verifier (steps 5–6). + */ +export async function listPinnedSpaceDirectories( + projectMigrationsDir: string, +): Promise { + let entries: { readonly name: string; readonly isDirectory: boolean }[]; + try { + const dirents = await readdir(projectMigrationsDir, { withFileTypes: true }); + entries = dirents.map((d) => ({ name: d.name, isDirectory: d.isDirectory() })); + } catch (error) { + if (hasErrnoCode(error, 'ENOENT')) { + return []; + } + throw error; + } + + const namedCandidates = entries + .filter((e) => e.isDirectory) + .map((e) => e.name) + .filter((name) => !name.startsWith('.')) + .sort(); + + const manifestChecks = await Promise.all( + namedCandidates.map(async (name) => { + try { + await stat(join(projectMigrationsDir, name, MANIFEST_FILE)); + return { name, isMigrationDir: true }; + } catch (error) { + if (hasErrnoCode(error, 'ENOENT')) { + return { name, isMigrationDir: false }; + } + throw error; + } + }), + ); + + return manifestChecks.filter((c) => !c.isMigrationDir).map((c) => c.name); +} + +/** + * Pinned head value (`(hash, invariants)`) for one contract space. + * The verifier compares this against the marker row for the same space + * to detect drift between the user-emitted artefacts and the live DB + * marker. + */ +export interface SpacePinnedHashRecord { + readonly hash: string; + readonly invariants: readonly string[]; +} + +/** + * Marker row read from `prisma_contract.marker` (one per `space`). + * Caller resolves these via the family runtime's marker reader (T1.1) + * before invoking {@link verifyContractSpaces}. + */ +export interface SpaceMarkerRecord { + readonly hash: string; + readonly invariants: readonly string[]; +} + +export interface VerifyContractSpacesInputs { + /** + * Set of contract spaces the project declares: `'app'` plus each + * extension space in `extensionPacks`. The caller's discovery path + * never reads the extension descriptor module — it walks the + * `extensionPacks` configuration in `prisma-next.config.ts` for the + * space ids. + */ + readonly loadedSpaces: ReadonlySet; + + /** + * Pinned per-space subdirectories observed under + * `/migrations/`. Resolved via + * {@link listPinnedSpaceDirectories}. + */ + readonly pinnedDirsOnDisk: readonly string[]; + + /** + * Pinned head ref per space, keyed by space id. Caller reads + * `/migrations//contract.json` and + * `refs/head.json` (or, for app-space if its pinned shape ever moves + * under `migrations/`, the equivalent files) to construct this map. + * Spaces with no pinned dir on disk simply omit a map entry. + */ + readonly pinnedHashesBySpace: ReadonlyMap; + + /** + * Marker rows keyed by `space`. Caller reads them from the + * `prisma_contract.marker` table. + */ + readonly markerRowsBySpace: ReadonlyMap; +} + +export type SpaceVerifierViolation = + | { + readonly kind: 'declaredButUnmigrated'; + readonly spaceId: string; + readonly remediation: string; + } + | { + readonly kind: 'orphanMarker'; + readonly spaceId: string; + readonly remediation: string; + } + | { + readonly kind: 'orphanPinnedDir'; + readonly spaceId: string; + readonly remediation: string; + } + | { + readonly kind: 'hashMismatch'; + readonly spaceId: string; + readonly pinnedHash: string; + readonly markerHash: string; + readonly remediation: string; + } + | { + readonly kind: 'invariantsMismatch'; + readonly spaceId: string; + readonly pinnedInvariants: readonly string[]; + readonly markerInvariants: readonly string[]; + readonly remediation: string; + }; + +export type VerifyContractSpacesResult = + | { readonly ok: true } + | { readonly ok: false; readonly violations: readonly SpaceVerifierViolation[] }; + +/** + * Pure structural verifier for the per-space mechanism. Aggregates the + * three orphan / missing checks (FR6 cases a–c) plus per-space hash and + * invariant comparison. + * + * Algorithm (sub-spec § 4): + * + * - For every extension space declared in `loadedSpaces` (`'app'` + * excluded — its pinned `contract.json` lives at the project root): + * - If no pinned dir on disk → `declaredButUnmigrated`. + * - Else if `markerRowsBySpace` lacks an entry → no violation here; + * the live-DB compare in step 8 (out of scope of this helper) is + * where the absence shows up. + * - Else compare marker hash / invariants vs. pinned hash / + * invariants → `hashMismatch` / `invariantsMismatch` on drift. + * - For every pinned dir on disk that is not in `loadedSpaces` → + * `orphanPinnedDir`. + * - For every marker row whose `space` is not in `loadedSpaces` → + * `orphanMarker`. The app-space marker is always loaded (`'app'` is + * in `loadedSpaces` by definition). + * + * Output is deterministic (NFR6): violations are sorted first by `kind` + * (`declaredButUnmigrated` → `orphanMarker` → `orphanPinnedDir` → + * `hashMismatch` → `invariantsMismatch`) then by `spaceId`. Two callers + * passing equivalent inputs see byte-identical violation lists. + * + * Synchronous, pure, no I/O. **Does not import the extension descriptor** + * (the inputs are pre-resolved by the caller). This is the property + * AC-15 / AC-26 ("verifier reads only the user repo, not + * `node_modules`") locks in. + * + * @see specs/framework-mechanism.spec.md § 4 — Verifier (T1.5). + */ +export function verifyContractSpaces( + inputs: VerifyContractSpacesInputs, +): VerifyContractSpacesResult { + const violations: SpaceVerifierViolation[] = []; + + for (const spaceId of [...inputs.loadedSpaces].sort()) { + if (spaceId === APP_SPACE_ID) continue; + + if (!inputs.pinnedDirsOnDisk.includes(spaceId)) { + violations.push({ + kind: 'declaredButUnmigrated', + spaceId, + remediation: `Extension '${spaceId}' is declared in extensionPacks but has not been emitted; run \`prisma-next migrate\`.`, + }); + continue; + } + + const pinned = inputs.pinnedHashesBySpace.get(spaceId); + const marker = inputs.markerRowsBySpace.get(spaceId); + if (!pinned || !marker) { + continue; + } + + if (pinned.hash !== marker.hash) { + violations.push({ + kind: 'hashMismatch', + spaceId, + pinnedHash: pinned.hash, + markerHash: marker.hash, + remediation: `Marker row for space '${spaceId}' is keyed at ${marker.hash}, but the pinned ${join('migrations', spaceId, 'contract.json')} resolves to ${pinned.hash}. Run \`prisma-next db update\` to advance the database, or \`prisma-next migrate\` if the descriptor was bumped without re-emitting.`, + }); + continue; + } + + const pinnedInvariants = [...pinned.invariants].sort(); + const markerInvariants = new Set(marker.invariants); + const missing = pinnedInvariants.filter((id) => !markerInvariants.has(id)); + if (missing.length > 0) { + violations.push({ + kind: 'invariantsMismatch', + spaceId, + pinnedInvariants, + markerInvariants: [...marker.invariants].sort(), + remediation: `Marker row for space '${spaceId}' is missing invariants [${missing.map((s) => JSON.stringify(s)).join(', ')}]. Run \`prisma-next db update\` to apply the corresponding data-transform migrations.`, + }); + } + } + + for (const dir of [...inputs.pinnedDirsOnDisk].sort()) { + if (!inputs.loadedSpaces.has(dir)) { + violations.push({ + kind: 'orphanPinnedDir', + spaceId: dir, + remediation: `Orphan pinned directory \`${join('migrations', dir)}/\` for an extension not in extensionPacks; remove the directory or re-add the extension.`, + }); + } + } + + for (const space of [...inputs.markerRowsBySpace.keys()].sort()) { + if (!inputs.loadedSpaces.has(space)) { + violations.push({ + kind: 'orphanMarker', + spaceId: space, + remediation: `Orphan marker row for space '${space}' (no longer in extensionPacks); remediation: manually delete the row from \`prisma_contract.marker\`.`, + }); + } + } + + if (violations.length === 0) { + return { ok: true }; + } + + const kindOrder: Record = { + declaredButUnmigrated: 0, + orphanMarker: 1, + orphanPinnedDir: 2, + hashMismatch: 3, + invariantsMismatch: 4, + }; + + violations.sort((a, b) => { + const k = kindOrder[a.kind] - kindOrder[b.kind]; + if (k !== 0) return k; + if (a.spaceId < b.spaceId) return -1; + if (a.spaceId > b.spaceId) return 1; + return 0; + }); + + return { ok: false, violations }; +} diff --git a/packages/1-framework/3-tooling/migration/test/concatenate-space-apply-inputs.test.ts b/packages/1-framework/3-tooling/migration/test/concatenate-space-apply-inputs.test.ts new file mode 100644 index 0000000000..f012d0baac --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/concatenate-space-apply-inputs.test.ts @@ -0,0 +1,104 @@ +import { describe, expect, it } from 'vitest'; +import { + concatenateSpaceApplyInputs, + type SpaceApplyInput, +} from '../src/concatenate-space-apply-inputs'; + +interface FakeOp { + readonly id: string; +} + +const makeInput = ( + spaceId: string, + ops: readonly FakeOp[] = [{ id: `${spaceId}-op` }], +): SpaceApplyInput => ({ + spaceId, + migrationDirectory: `/repo/migrations/${spaceId === 'app' ? '' : spaceId}`, + currentMarkerHash: null, + currentMarkerInvariants: [], + path: ops, +}); + +describe('concatenateSpaceApplyInputs', () => { + it('puts the app-space input last', () => { + const result = concatenateSpaceApplyInputs([ + makeInput('app'), + makeInput('cipherstash'), + makeInput('pgvector'), + ]); + + expect(result.map((r) => r.spaceId)).toEqual(['cipherstash', 'pgvector', 'app']); + }); + + it('orders extension spaces alphabetically by spaceId', () => { + const result = concatenateSpaceApplyInputs([ + makeInput('pgvector'), + makeInput('cipherstash'), + makeInput('audit'), + ]); + + expect(result.map((r) => r.spaceId)).toEqual(['audit', 'cipherstash', 'pgvector']); + }); + + it('produces deterministic ordering regardless of declaration order', () => { + const a = concatenateSpaceApplyInputs([ + makeInput('cipherstash'), + makeInput('app'), + makeInput('pgvector'), + ]); + const b = concatenateSpaceApplyInputs([ + makeInput('app'), + makeInput('pgvector'), + makeInput('cipherstash'), + ]); + const c = concatenateSpaceApplyInputs([ + makeInput('pgvector'), + makeInput('cipherstash'), + makeInput('app'), + ]); + + const order = (xs: readonly SpaceApplyInput[]) => xs.map((x) => x.spaceId); + expect(order(a)).toEqual(['cipherstash', 'pgvector', 'app']); + expect(order(b)).toEqual(order(a)); + expect(order(c)).toEqual(order(a)); + }); + + it("handles a single app-space input (today's behaviour)", () => { + const result = concatenateSpaceApplyInputs([makeInput('app')]); + expect(result.map((r) => r.spaceId)).toEqual(['app']); + }); + + it('returns an empty array unchanged', () => { + expect(concatenateSpaceApplyInputs([])).toEqual([]); + }); + + it('preserves each input verbatim (path arrays, marker fields, directory)', () => { + const input = makeInput('cipherstash', [{ id: 'op-1' }, { id: 'op-2' }]); + const result = concatenateSpaceApplyInputs([input]); + expect(result[0]).toBe(input); + }); + + it('rejects duplicate spaceIds', () => { + let captured: unknown; + try { + concatenateSpaceApplyInputs([makeInput('cipherstash'), makeInput('cipherstash')]); + } catch (err) { + captured = err; + } + + expect(captured).toBeInstanceOf(Error); + expect((captured as { code: string }).code).toBe('MIGRATION.DUPLICATE_SPACE_ID'); + }); + + it('does not mutate the input array', () => { + const inputs = [makeInput('pgvector'), makeInput('app'), makeInput('cipherstash')]; + const snapshot = inputs.map((i) => i.spaceId); + concatenateSpaceApplyInputs(inputs); + expect(inputs.map((i) => i.spaceId)).toEqual(snapshot); + }); + + it('tolerates input where there is no app-space entry (extensions-only)', () => { + const result = concatenateSpaceApplyInputs([makeInput('pgvector'), makeInput('cipherstash')]); + expect(result.map((r) => r.spaceId)).toEqual(['cipherstash', 'pgvector']); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/deletable-node-modules.test.ts b/packages/1-framework/3-tooling/migration/test/deletable-node-modules.test.ts new file mode 100644 index 0000000000..1298b120fa --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/deletable-node-modules.test.ts @@ -0,0 +1,174 @@ +/** + * "Deletable `node_modules`" fixture for AC-15 / TC-26. + * + * Locks in the property that the per-space verifier and runner **read + * only the user's repo** — pinned `contract.json` / `contract.d.ts` / + * `refs/head.json` files under `migrations//` plus the live + * marker rows. Neither helper imports the extension descriptor module, + * so the absence of `node_modules` (or any other path that resolves the + * descriptor) does not affect verify / apply outcomes. + * + * Scoped to the framework helpers shipped in this round + * (`emitPinnedSpaceArtefacts` + `listPinnedSpaceDirectories` + + * `verifyContractSpaces` + `concatenateSpaceApplyInputs`). The test + * intentionally **does not import** the synthetic + * `test-contract-space` fixture (today hosted under + * `test/integration/test/contract-space-fixture/`) — that is the + * point. The test invents a `'test-contract-space'` space id inline + * and runs the helpers against pinned files on disk plus a fake set of + * marker rows. + * + * @see specs/framework-mechanism.spec.md § 4 — Verifier (T1.5). + * @see projects/extension-contract-spaces/spec.md AC-15 / TC-26. + */ + +import { mkdir, mkdtemp, readdir, readFile, rm } from 'node:fs/promises'; +import { tmpdir } from 'node:os'; +import { join } from 'pathe'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { canonicalizeJson } from '../src/canonicalize-json'; +import { + concatenateSpaceApplyInputs, + emitPinnedSpaceArtefacts, + listPinnedSpaceDirectories, + type SpaceApplyInput, + type SpaceMarkerRecord, + type SpacePinnedHashRecord, + verifyContractSpaces, +} from '../src/exports/spaces'; + +const TEST_SPACE_ID = 'test-contract-space'; +const TEST_HEAD_HASH = 'sha256:0000000000000000000000000000000000000000000000000000000000000abc'; +const TEST_INVARIANT = 'test-contract-space:create-test_box-v1'; + +const testContract = { + storageHash: TEST_HEAD_HASH, + tables: { test_box: { columns: { x: 'int', y: 'int' } } }, +}; +const testContractDts = + '// rendered .d.ts for the test contract space\nexport interface Contract {}\n'; + +interface ProjectFixture { + readonly projectRoot: string; + readonly projectMigrationsDir: string; + readonly nodeModulesPath: string; +} + +async function setupProjectWithPinnedTestSpace(): Promise { + const projectRoot = await mkdtemp(join(tmpdir(), 'no-descriptor-')); + const projectMigrationsDir = join(projectRoot, 'migrations'); + const nodeModulesPath = join(projectRoot, 'node_modules'); + + // Stand-in for an installed extension package — the descriptor module + // would normally live under `node_modules//...`. The test deletes + // this directory before invoking the verifier to model the AC-15 case + // ("verifier + runner succeed when extension descriptor not + // importable, e.g. node_modules removed"). + await mkdir(join(nodeModulesPath, '@prisma-next', 'synthetic-extension-stand-in'), { + recursive: true, + }); + + await emitPinnedSpaceArtefacts(projectMigrationsDir, TEST_SPACE_ID, { + contract: testContract, + contractDts: testContractDts, + headRef: { hash: TEST_HEAD_HASH, invariants: [TEST_INVARIANT] }, + }); + + return { projectRoot, projectMigrationsDir, nodeModulesPath }; +} + +describe('per-space verifier + runner against a project with deleted node_modules (AC-15 / TC-26)', () => { + let fixture: ProjectFixture; + + beforeEach(async () => { + fixture = await setupProjectWithPinnedTestSpace(); + await rm(fixture.nodeModulesPath, { recursive: true, force: true }); + const remaining = await readdir(fixture.projectRoot); + expect(remaining.includes('node_modules')).toBe(false); + }); + + afterEach(async () => { + await rm(fixture.projectRoot, { recursive: true, force: true }); + }); + + it('listPinnedSpaceDirectories discovers the test space without descriptor access', async () => { + const dirs = await listPinnedSpaceDirectories(fixture.projectMigrationsDir); + expect(dirs).toEqual([TEST_SPACE_ID]); + }); + + it('verifyContractSpaces returns ok when pinned files + marker rows match — no descriptor needed', async () => { + const pinnedRaw = await readFile( + join(fixture.projectMigrationsDir, TEST_SPACE_ID, 'contract.json'), + 'utf-8', + ); + expect(pinnedRaw.trimEnd()).toBe(canonicalizeJson(testContract)); + + const headRaw = await readFile( + join(fixture.projectMigrationsDir, TEST_SPACE_ID, 'refs', 'head.json'), + 'utf-8', + ); + const headJson = JSON.parse(headRaw) as SpacePinnedHashRecord; + + const dirs = await listPinnedSpaceDirectories(fixture.projectMigrationsDir); + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', TEST_SPACE_ID]), + pinnedDirsOnDisk: dirs, + pinnedHashesBySpace: new Map([[TEST_SPACE_ID, headJson]]), + markerRowsBySpace: new Map([ + [TEST_SPACE_ID, { hash: headJson.hash, invariants: [...headJson.invariants] }], + ]), + }); + + expect(result.ok).toBe(true); + }); + + it('verifyContractSpaces flags hash drift on the test space, again without descriptor access', async () => { + const dirs = await listPinnedSpaceDirectories(fixture.projectMigrationsDir); + + const driftedMarker: SpaceMarkerRecord = { + hash: 'sha256:00000000000000000000000000000000000000000000000000000000deadbeef', + invariants: [TEST_INVARIANT], + }; + + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', TEST_SPACE_ID]), + pinnedDirsOnDisk: dirs, + pinnedHashesBySpace: new Map([ + [ + TEST_SPACE_ID, + { hash: TEST_HEAD_HASH, invariants: [TEST_INVARIANT] } satisfies SpacePinnedHashRecord, + ], + ]), + markerRowsBySpace: new Map([[TEST_SPACE_ID, driftedMarker]]), + }); + + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.violations).toContainEqual( + expect.objectContaining({ + kind: 'hashMismatch', + spaceId: TEST_SPACE_ID, + }), + ); + }); + + it('concatenateSpaceApplyInputs orders the test space ahead of app — driven by on-disk inputs only', () => { + const appInput: SpaceApplyInput<{ readonly id: string }> = { + spaceId: 'app', + migrationDirectory: fixture.projectMigrationsDir, + currentMarkerHash: null, + currentMarkerInvariants: [], + path: [{ id: 'app-create-table' }], + }; + const testSpaceInput: SpaceApplyInput<{ readonly id: string }> = { + spaceId: TEST_SPACE_ID, + migrationDirectory: join(fixture.projectMigrationsDir, TEST_SPACE_ID), + currentMarkerHash: null, + currentMarkerInvariants: [], + path: [{ id: 'test-contract-space-create-test_box' }], + }; + + const ordered = concatenateSpaceApplyInputs([appInput, testSpaceInput]); + expect(ordered.map((i) => i.spaceId)).toEqual([TEST_SPACE_ID, 'app']); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/detect-space-contract-drift.test.ts b/packages/1-framework/3-tooling/migration/test/detect-space-contract-drift.test.ts new file mode 100644 index 0000000000..71a280ab71 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/detect-space-contract-drift.test.ts @@ -0,0 +1,83 @@ +import { describe, expect, it } from 'vitest'; +import { detectSpaceContractDrift } from '../src/detect-space-contract-drift'; + +const HASH_A = 'sha256:0000000000000000000000000000000000000000000000000000000000000aaa'; +const HASH_B = 'sha256:0000000000000000000000000000000000000000000000000000000000000bbb'; + +describe('detectSpaceContractDrift', () => { + it("returns 'noDrift' when descriptor hash equals pinned hash", () => { + const result = detectSpaceContractDrift('cipherstash', { + descriptorHash: HASH_A, + pinnedHash: HASH_A, + }); + + expect(result).toEqual({ + kind: 'noDrift', + spaceId: 'cipherstash', + descriptorHash: HASH_A, + pinnedHash: HASH_A, + }); + }); + + it("returns 'firstEmit' when there is no pinned file yet (pinnedHash null)", () => { + const result = detectSpaceContractDrift('cipherstash', { + descriptorHash: HASH_A, + pinnedHash: null, + }); + + expect(result).toEqual({ + kind: 'firstEmit', + spaceId: 'cipherstash', + descriptorHash: HASH_A, + pinnedHash: null, + }); + }); + + it("returns 'drift' when descriptor hash differs from pinned hash", () => { + const result = detectSpaceContractDrift('cipherstash', { + descriptorHash: HASH_B, + pinnedHash: HASH_A, + }); + + expect(result).toEqual({ + kind: 'drift', + spaceId: 'cipherstash', + descriptorHash: HASH_B, + pinnedHash: HASH_A, + }); + }); + + it('preserves the supplied spaceId verbatim in the result', () => { + const result = detectSpaceContractDrift('audit-trail-v2', { + descriptorHash: HASH_A, + pinnedHash: HASH_B, + }); + expect(result.spaceId).toBe('audit-trail-v2'); + }); + + it('does not mutate the inputs object', () => { + const inputs = { descriptorHash: HASH_A, pinnedHash: HASH_B }; + const snapshot = { ...inputs }; + detectSpaceContractDrift('cipherstash', inputs); + expect(inputs).toEqual(snapshot); + }); + + it('treats two visually-equal-but-distinct strings byte-for-byte (no normalisation)', () => { + const result = detectSpaceContractDrift('cipherstash', { + descriptorHash: 'sha256:abc', + pinnedHash: 'sha256:ABC', + }); + expect(result.kind).toBe('drift'); + }); + + it("does not validate the spaceId pattern (caller's responsibility)", () => { + // Pure function that only inspects the hashes; AM7's "warning names + // the extension" comes from the result.spaceId being threaded + // through verbatim for the caller to format. + const result = detectSpaceContractDrift('Whatever You Like', { + descriptorHash: HASH_A, + pinnedHash: HASH_A, + }); + expect(result.spaceId).toBe('Whatever You Like'); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/emit-pinned-space-artefacts.test.ts b/packages/1-framework/3-tooling/migration/test/emit-pinned-space-artefacts.test.ts new file mode 100644 index 0000000000..645a94c5c9 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/emit-pinned-space-artefacts.test.ts @@ -0,0 +1,226 @@ +import { mkdir, mkdtemp, readdir, readFile, rm, writeFile } from 'node:fs/promises'; +import { tmpdir } from 'node:os'; +import { join } from 'pathe'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { canonicalizeJson } from '../src/canonicalize-json'; +import { emitPinnedSpaceArtefacts } from '../src/emit-pinned-space-artefacts'; +import { MigrationToolsError } from '../src/errors'; + +describe('emitPinnedSpaceArtefacts', () => { + let migrationsDir: string; + + beforeEach(async () => { + migrationsDir = await mkdtemp(join(tmpdir(), 'pinned-artefacts-')); + }); + + afterEach(async () => { + await rm(migrationsDir, { recursive: true, force: true }); + }); + + it('writes contract.json, contract.d.ts, and refs/head.json under migrations//', async () => { + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: { foo: 1 }, + contractDts: 'export interface Contract {}\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + + const dir = join(migrationsDir, 'cipherstash'); + const entries = (await readdir(dir)).sort(); + expect(entries).toEqual(['contract.d.ts', 'contract.json', 'refs']); + + const refsEntries = await readdir(join(dir, 'refs')); + expect(refsEntries).toEqual(['head.json']); + }); + + it('serialises contract.json as the canonical-JSON form of the supplied contract', async () => { + const contract = { z: 1, a: { y: 2, x: 3 } }; + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + + const raw = await readFile(join(migrationsDir, 'cipherstash', 'contract.json'), 'utf-8'); + expect(raw).toBe(`${canonicalizeJson(contract)}\n`); + }); + + it('writes contract.d.ts verbatim from the caller-supplied string', async () => { + const dts = `// rendered by the caller\nexport type Contract = { kind: 'cipherstash' };\n`; + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: {}, + contractDts: dts, + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + + const raw = await readFile(join(migrationsDir, 'cipherstash', 'contract.d.ts'), 'utf-8'); + expect(raw).toBe(dts); + }); + + it('serialises refs/head.json with sorted invariants and trailing newline', async () => { + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: {}, + contractDts: '\n', + headRef: { + hash: 'sha256:0123456789012345678901234567890123456789012345678901234567890123', + invariants: ['z-inv', 'a-inv', 'm-inv'], + }, + }); + + const raw = await readFile(join(migrationsDir, 'cipherstash', 'refs', 'head.json'), 'utf-8'); + expect(raw.endsWith('\n')).toBe(true); + const parsed = JSON.parse(raw); + expect(parsed).toEqual({ + hash: 'sha256:0123456789012345678901234567890123456789012345678901234567890123', + invariants: ['a-inv', 'm-inv', 'z-inv'], + }); + }); + + it('overwrites pre-existing pinned files (the framework owns these files)', async () => { + const dir = join(migrationsDir, 'cipherstash'); + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: { v: 1 }, + contractDts: 'v1\n', + headRef: { hash: 'sha256:empty', invariants: ['inv-v1'] }, + }); + + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: { v: 2 }, + contractDts: 'v2\n', + headRef: { + hash: 'sha256:fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210', + invariants: ['inv-v2'], + }, + }); + + expect(await readFile(join(dir, 'contract.json'), 'utf-8')).toBe( + `${canonicalizeJson({ v: 2 })}\n`, + ); + expect(await readFile(join(dir, 'contract.d.ts'), 'utf-8')).toBe('v2\n'); + const headRaw = await readFile(join(dir, 'refs', 'head.json'), 'utf-8'); + expect(JSON.parse(headRaw)).toEqual({ + hash: 'sha256:fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210', + invariants: ['inv-v2'], + }); + }); + + it('overwrites stray files left over from earlier runs (e.g. invariants reduced to []) ', async () => { + const dir = join(migrationsDir, 'cipherstash'); + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: ['old'] }, + }); + + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + + const headRaw = await readFile(join(dir, 'refs', 'head.json'), 'utf-8'); + expect(JSON.parse(headRaw)).toEqual({ hash: 'sha256:empty', invariants: [] }); + }); + + it('produces byte-identical output across two writes of the same artefact (idempotency)', async () => { + const dirA = join(migrationsDir, 'a'); + const dirB = join(migrationsDir, 'b'); + const args = { + contract: { z: 1, a: { y: 2 } }, + contractDts: 'export type X = number;\n', + headRef: { hash: 'sha256:empty', invariants: ['b', 'a'] }, + }; + + await emitPinnedSpaceArtefacts(dirA, 'cipherstash', args); + await emitPinnedSpaceArtefacts(dirB, 'cipherstash', args); + + const aContract = await readFile(join(dirA, 'cipherstash', 'contract.json'), 'utf-8'); + const bContract = await readFile(join(dirB, 'cipherstash', 'contract.json'), 'utf-8'); + expect(aContract).toBe(bContract); + + const aDts = await readFile(join(dirA, 'cipherstash', 'contract.d.ts'), 'utf-8'); + const bDts = await readFile(join(dirB, 'cipherstash', 'contract.d.ts'), 'utf-8'); + expect(aDts).toBe(bDts); + + const aHead = await readFile(join(dirA, 'cipherstash', 'refs', 'head.json'), 'utf-8'); + const bHead = await readFile(join(dirB, 'cipherstash', 'refs', 'head.json'), 'utf-8'); + expect(aHead).toBe(bHead); + }); + + it('does not mutate the supplied invariants array', async () => { + const invariants = ['z', 'a', 'm']; + const snapshot = [...invariants]; + + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants }, + }); + + expect(invariants).toEqual(snapshot); + }); + + it('rejects the app space (pinned artefacts apply only to extension spaces)', async () => { + let captured: unknown; + try { + await emitPinnedSpaceArtefacts(migrationsDir, 'app', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + } catch (err) { + captured = err; + } + + expect(MigrationToolsError.is(captured)).toBe(true); + expect((captured as MigrationToolsError).code).toBe('MIGRATION.PINNED_ARTEFACTS_APP_SPACE'); + }); + + it('rejects an invalid space id', async () => { + let captured: unknown; + try { + await emitPinnedSpaceArtefacts(migrationsDir, 'INVALID', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + } catch (err) { + captured = err; + } + + expect(MigrationToolsError.is(captured)).toBe(true); + expect((captured as MigrationToolsError).code).toBe('MIGRATION.INVALID_SPACE_ID'); + }); + + it('creates the migrations dir + space dir + refs dir if they do not yet exist', async () => { + const fresh = join(migrationsDir, 'fresh-project', 'migrations'); + + await emitPinnedSpaceArtefacts(fresh, 'cipherstash', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + + const entries = (await readdir(join(fresh, 'cipherstash'))).sort(); + expect(entries).toEqual(['contract.d.ts', 'contract.json', 'refs']); + }); + + it('preserves user-authored migration directories alongside the pinned files', async () => { + const dir = join(migrationsDir, 'cipherstash'); + const userMigration = join(dir, '20260101T0000_baseline'); + await writeFile(`${dir}-marker`, 'noop'); // ensure mkdir creates dir + await mkdir(userMigration, { recursive: true }); + await writeFile(join(userMigration, 'migration.json'), '{}'); + + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: {}, + contractDts: '\n', + headRef: { hash: 'sha256:empty', invariants: [] }, + }); + + const entries = (await readdir(dir)).sort(); + expect(entries).toContain('20260101T0000_baseline'); + expect(entries).toContain('contract.json'); + expect(entries).toContain('contract.d.ts'); + expect(entries).toContain('refs'); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/find-path-with-invariants.test.ts b/packages/1-framework/3-tooling/migration/test/find-path-with-invariants.test.ts index fcb9afd7ed..893a61b48d 100644 --- a/packages/1-framework/3-tooling/migration/test/find-path-with-invariants.test.ts +++ b/packages/1-framework/3-tooling/migration/test/find-path-with-invariants.test.ts @@ -3,7 +3,7 @@ import { EMPTY_CONTRACT_HASH } from '../src/constants'; import type { MigrationEdge } from '../src/graph'; import { computeMigrationHash } from '../src/hash'; import { findPath, findPathWithInvariants, reconstructGraph } from '../src/migration-graph'; -import type { MigrationPackage } from '../src/package'; +import type { OnDiskMigrationPackage } from '../src/package'; import { createTestMetadata, createTestOps } from './fixtures'; let migrationCounter = 0; @@ -14,7 +14,12 @@ interface PkgOpts { readonly labels?: readonly string[]; } -function pkg(from: string, to: string, dirName: string, opts: PkgOpts = {}): MigrationPackage { +function pkg( + from: string, + to: string, + dirName: string, + opts: PkgOpts = {}, +): OnDiskMigrationPackage { const baseCreatedAt = opts.createdAt ?? '2026-02-25T14:00:00.000Z'; const uniqueCreatedAt = `${baseCreatedAt}-${migrationCounter++}`; const metadata = createTestMetadata({ @@ -377,7 +382,7 @@ describe('findPathWithInvariants — pathological shapes', () => { // bottom edge provides nothing. Only the all-top path covers all 8 // invariants. Asserts: returns the all-top path. const k = 8; - const packages: MigrationPackage[] = []; + const packages: OnDiskMigrationPackage[] = []; for (let i = 0; i < k; i++) { const open = i === 0 ? E : `S${i}`; const close = `S${i + 1}`; diff --git a/packages/1-framework/3-tooling/migration/test/fixtures.ts b/packages/1-framework/3-tooling/migration/test/fixtures.ts index 96ae8f883f..96668218f5 100644 --- a/packages/1-framework/3-tooling/migration/test/fixtures.ts +++ b/packages/1-framework/3-tooling/migration/test/fixtures.ts @@ -5,7 +5,7 @@ import { computeMigrationHash } from '../src/hash'; import { deriveProvidedInvariants } from '../src/invariants'; import { writeMigrationPackage } from '../src/io'; import type { MigrationMetadata } from '../src/metadata'; -import type { MigrationOps, MigrationPackage } from '../src/package'; +import type { MigrationOps, OnDiskMigrationPackage } from '../src/package'; export function createTestContract(overrides: Partial = {}): Contract { return createContract(overrides); @@ -54,7 +54,7 @@ export function createAttestedPackage( dirName: string, metadataOverrides: Omit, 'migrationHash'> = {}, ops: MigrationOps = createTestOps(), -): MigrationPackage { +): OnDiskMigrationPackage { return { dirName, dirPath: `/tmp/migrations/${dirName}`, diff --git a/packages/1-framework/3-tooling/migration/test/io.test.ts b/packages/1-framework/3-tooling/migration/test/io.test.ts index 3f61ab35e8..5098c27baf 100644 --- a/packages/1-framework/3-tooling/migration/test/io.test.ts +++ b/packages/1-framework/3-tooling/migration/test/io.test.ts @@ -44,6 +44,18 @@ describe('writeMigrationPackage + readMigrationPackage', () => { expect(pkg.dirPath).toBe(dir); }); + it('normalizes dirPath to absolute when called with a relative path', async () => { + const absoluteDir = join(tmpDir, '20260225T1430_add_users'); + await writeTestPackage(absoluteDir); + const relativeDir = relative(process.cwd(), absoluteDir); + + expect(relativeDir.startsWith('/')).toBe(false); + + const pkg = await readMigrationPackage(relativeDir); + + expect(pkg.dirPath).toBe(absoluteDir); + }); + it('writes pretty-printed JSON', async () => { const dir = join(tmpDir, '20260225T1430_test'); await writeTestPackage(dir); diff --git a/packages/1-framework/3-tooling/migration/test/materialise-migration-package.test.ts b/packages/1-framework/3-tooling/migration/test/materialise-migration-package.test.ts new file mode 100644 index 0000000000..32631c55bb --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/materialise-migration-package.test.ts @@ -0,0 +1,108 @@ +import { mkdtemp, readdir, readFile, rm, stat, writeFile } from 'node:fs/promises'; +import { tmpdir } from 'node:os'; +import { join } from 'pathe'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { canonicalizeJson } from '../src/canonicalize-json'; +import { materialiseMigrationPackage } from '../src/io'; +import { createTestMetadata, createTestOps } from './fixtures'; + +describe('materialiseMigrationPackage', () => { + let tmpDir: string; + + beforeEach(async () => { + tmpDir = await mkdtemp(join(tmpdir(), 'materialise-mig-pkg-')); + }); + + afterEach(async () => { + await rm(tmpDir, { recursive: true, force: true }); + }); + + it('writes manifest, ops, and contract.json under //', async () => { + const ops = createTestOps(); + const metadata = createTestMetadata({}, ops); + const pkg = { dirName: '20260507T1100_install', metadata, ops }; + + await materialiseMigrationPackage(tmpDir, pkg); + + const dir = join(tmpDir, pkg.dirName); + const entries = (await readdir(dir)).sort(); + expect(entries).toEqual(['contract.json', 'migration.json', 'ops.json']); + }); + + it('serialises contract.json as the canonical JSON form of metadata.toContract', async () => { + const ops = createTestOps(); + const metadata = createTestMetadata({}, ops); + const pkg = { dirName: 'baseline', metadata, ops }; + + await materialiseMigrationPackage(tmpDir, pkg); + + const dir = join(tmpDir, pkg.dirName); + const contractRaw = await readFile(join(dir, 'contract.json'), 'utf-8'); + expect(contractRaw).toBe(`${canonicalizeJson(metadata.toContract)}\n`); + }); + + it('produces byte-identical output across two writes of the same package to different dirs', async () => { + const ops = createTestOps(); + const metadata = createTestMetadata({}, ops); + const pkg = { dirName: 'baseline', metadata, ops }; + + const dirA = join(tmpDir, 'a'); + const dirB = join(tmpDir, 'b'); + await materialiseMigrationPackage(dirA, pkg); + await materialiseMigrationPackage(dirB, pkg); + + const aManifest = await readFile(join(dirA, pkg.dirName, 'migration.json'), 'utf-8'); + const bManifest = await readFile(join(dirB, pkg.dirName, 'migration.json'), 'utf-8'); + expect(aManifest).toBe(bManifest); + + const aOps = await readFile(join(dirA, pkg.dirName, 'ops.json'), 'utf-8'); + const bOps = await readFile(join(dirB, pkg.dirName, 'ops.json'), 'utf-8'); + expect(aOps).toBe(bOps); + + const aContract = await readFile(join(dirA, pkg.dirName, 'contract.json'), 'utf-8'); + const bContract = await readFile(join(dirB, pkg.dirName, 'contract.json'), 'utf-8'); + expect(aContract).toBe(bContract); + }); + + it('overwrites the per-package directory idempotently and removes stale files', async () => { + const ops = createTestOps(); + const metadata = createTestMetadata({}, ops); + const pkg = { dirName: 'baseline', metadata, ops }; + const dir = join(tmpDir, pkg.dirName); + + await materialiseMigrationPackage(tmpDir, pkg); + + const firstManifest = await readFile(join(dir, 'migration.json'), 'utf-8'); + const firstOps = await readFile(join(dir, 'ops.json'), 'utf-8'); + const firstContract = await readFile(join(dir, 'contract.json'), 'utf-8'); + + await writeFile(join(dir, 'stale.json'), '{"stale":true}\n'); + expect((await readdir(dir)).sort()).toEqual([ + 'contract.json', + 'migration.json', + 'ops.json', + 'stale.json', + ]); + + await materialiseMigrationPackage(tmpDir, pkg); + + expect(await readFile(join(dir, 'migration.json'), 'utf-8')).toBe(firstManifest); + expect(await readFile(join(dir, 'ops.json'), 'utf-8')).toBe(firstOps); + expect(await readFile(join(dir, 'contract.json'), 'utf-8')).toBe(firstContract); + expect((await readdir(dir)).sort()).toEqual(['contract.json', 'migration.json', 'ops.json']); + }); + + it('creates the target directory if it does not yet exist', async () => { + const nested = join(tmpDir, 'cipherstash'); + const pkg = { + dirName: 'baseline', + metadata: createTestMetadata({}, []), + ops: [], + }; + + await materialiseMigrationPackage(nested, pkg); + + const dirStat = await stat(join(nested, 'baseline')); + expect(dirStat.isDirectory()).toBe(true); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/migration-graph.test.ts b/packages/1-framework/3-tooling/migration/test/migration-graph.test.ts index 943392bffc..6cf8a11188 100644 --- a/packages/1-framework/3-tooling/migration/test/migration-graph.test.ts +++ b/packages/1-framework/3-tooling/migration/test/migration-graph.test.ts @@ -13,7 +13,7 @@ import { findReachableLeaves, reconstructGraph, } from '../src/migration-graph'; -import type { MigrationPackage } from '../src/package'; +import type { OnDiskMigrationPackage } from '../src/package'; import { createTestMetadata, createTestOps } from './fixtures'; let migrationCounter = 0; @@ -24,7 +24,7 @@ function pkg( dirName: string, createdAt = '2026-02-25T14:00:00.000Z', labels: readonly string[] = [], -): MigrationPackage { +): OnDiskMigrationPackage { // Bake a per-pkg counter into createdAt so distinct packages get distinct // hashes — and use the same metadata for both hashing and the returned // package, so each fixture is internally consistent (round-trips through @@ -41,7 +41,7 @@ function pkg( }; } -function chain(...specs: Array<[string, string, string]>): MigrationPackage[] { +function chain(...specs: Array<[string, string, string]>): OnDiskMigrationPackage[] { return specs.map(([from, to, dirName]) => pkg(from!, to!, dirName!)); } @@ -56,7 +56,7 @@ function pkgWithInvariants( to: string, dirName: string, opts: PkgWithInvariantsOpts = {}, -): MigrationPackage { +): OnDiskMigrationPackage { const uniqueCreatedAt = opts.createdAt ?? `2026-02-25T14:00:00.000Z-${migrationCounter++}`; const metadata = createTestMetadata({ from, @@ -79,7 +79,7 @@ function pkgSelfEdge( hash: string, dirName: string, opts: { readonly invariants?: readonly string[] } = {}, -): MigrationPackage { +): OnDiskMigrationPackage { const uniqueCreatedAt = `2026-02-25T14:00:00.000Z-${migrationCounter++}`; const ops = [ { @@ -144,7 +144,7 @@ describe('reconstructGraph', () => { ops, ); const migrationHash = computeMigrationHash(metadata, ops); - const packages: MigrationPackage[] = [ + const packages: OnDiskMigrationPackage[] = [ { dirName: 'm1', dirPath: '/migrations/m1', @@ -384,7 +384,7 @@ describe('detectCycles', () => { }); it('detects cycle in node graph', () => { - const packages: MigrationPackage[] = [ + const packages: OnDiskMigrationPackage[] = [ pkg('A', 'B', 'm1'), pkg('B', 'C', 'm2'), pkg('C', 'A', 'm3'), diff --git a/packages/1-framework/3-tooling/migration/test/plan-all-spaces.test.ts b/packages/1-framework/3-tooling/migration/test/plan-all-spaces.test.ts new file mode 100644 index 0000000000..6bb89c1d4f --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/plan-all-spaces.test.ts @@ -0,0 +1,141 @@ +import { describe, expect, it, vi } from 'vitest'; +import { MigrationToolsError } from '../src/errors'; +import { planAllSpaces, type SpacePlanInput } from '../src/plan-all-spaces'; + +interface FakeContract { + readonly hash: string; +} +type FakePackage = { readonly id: string }; + +const makeInput = ( + spaceId: string, + newHash: string, + priorHash: string | null = null, +): SpacePlanInput => ({ + spaceId, + newContract: { hash: newHash }, + priorContract: priorHash !== null ? { hash: priorHash } : null, +}); + +const planSpace = (input: SpacePlanInput): readonly FakePackage[] => [ + { id: `${input.spaceId}->${input.newContract.hash}` }, +]; + +describe('planAllSpaces', () => { + it('returns one output per input, paired with the same spaceId', () => { + const result = planAllSpaces( + [makeInput('app', 'h-app'), makeInput('cipherstash', 'h-cipher')], + planSpace, + ); + + expect(result).toHaveLength(2); + expect(result.map((r) => r.spaceId)).toEqual(['app', 'cipherstash']); + }); + + it('sorts outputs alphabetically by spaceId regardless of input order (AM3)', () => { + const order1 = planAllSpaces( + [makeInput('cipherstash', 'h1'), makeInput('app', 'h2'), makeInput('pgvector', 'h3')], + planSpace, + ); + const order2 = planAllSpaces( + [makeInput('app', 'h2'), makeInput('pgvector', 'h3'), makeInput('cipherstash', 'h1')], + planSpace, + ); + const order3 = planAllSpaces( + [makeInput('pgvector', 'h3'), makeInput('cipherstash', 'h1'), makeInput('app', 'h2')], + planSpace, + ); + + expect(order1.map((r) => r.spaceId)).toEqual(['app', 'cipherstash', 'pgvector']); + expect(order2).toEqual(order1); + expect(order3).toEqual(order1); + }); + + it('passes the prior + new contract through to planSpace unchanged', () => { + const calls: SpacePlanInput[] = []; + const captured = (input: SpacePlanInput): readonly FakePackage[] => { + calls.push(input); + return []; + }; + + planAllSpaces( + [ + makeInput('app', 'h-app-new', 'h-app-prior'), + makeInput('cipherstash', 'h-cipher-new', null), + ], + captured, + ); + + expect(calls).toEqual([ + { + spaceId: 'app', + priorContract: { hash: 'h-app-prior' }, + newContract: { hash: 'h-app-new' }, + }, + { + spaceId: 'cipherstash', + priorContract: null, + newContract: { hash: 'h-cipher-new' }, + }, + ]); + }); + + it('returns an empty array unchanged (no calls to planSpace)', () => { + const planSpaceSpy = vi.fn(planSpace); + const result = planAllSpaces([], planSpaceSpy); + expect(result).toEqual([]); + expect(planSpaceSpy).not.toHaveBeenCalled(); + }); + + it("preserves today's single-app behaviour when only the app space is supplied", () => { + const result = planAllSpaces([makeInput('app', 'h-app')], planSpace); + expect(result).toEqual([{ spaceId: 'app', migrationPackages: [{ id: 'app->h-app' }] }]); + }); + + it('attaches whatever migrationPackages planSpace returns (zero, one, or many)', () => { + const variable = (input: SpacePlanInput): readonly FakePackage[] => { + if (input.spaceId === 'app') return []; + if (input.spaceId === 'cipherstash') return [{ id: 'p1' }, { id: 'p2' }, { id: 'p3' }]; + return [{ id: 'one' }]; + }; + + const result = planAllSpaces( + [makeInput('app', 'h'), makeInput('cipherstash', 'h'), makeInput('pgvector', 'h')], + variable, + ); + + expect(result.find((r) => r.spaceId === 'app')?.migrationPackages).toEqual([]); + expect(result.find((r) => r.spaceId === 'cipherstash')?.migrationPackages).toHaveLength(3); + expect(result.find((r) => r.spaceId === 'pgvector')?.migrationPackages).toHaveLength(1); + }); + + it('rejects duplicate spaceIds with MIGRATION.DUPLICATE_SPACE_ID before any planSpace call runs', () => { + const planSpaceSpy = vi.fn(planSpace); + let captured: unknown; + try { + planAllSpaces( + [makeInput('app', 'h1'), makeInput('cipherstash', 'h2'), makeInput('app', 'h3')], + planSpaceSpy, + ); + } catch (error) { + captured = error; + } + + expect(MigrationToolsError.is(captured)).toBe(true); + const err = captured as MigrationToolsError; + expect(err.code).toBe('MIGRATION.DUPLICATE_SPACE_ID'); + expect(err.why).toContain('"app"'); + expect(planSpaceSpy).not.toHaveBeenCalled(); + }); + + it('does not mutate the input array', () => { + const inputs = [ + makeInput('cipherstash', 'h1'), + makeInput('app', 'h2'), + makeInput('pgvector', 'h3'), + ]; + const snapshot = inputs.map((i) => i.spaceId); + planAllSpaces(inputs, planSpace); + expect(inputs.map((i) => i.spaceId)).toEqual(snapshot); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/read-pinned-contract-hash.test.ts b/packages/1-framework/3-tooling/migration/test/read-pinned-contract-hash.test.ts new file mode 100644 index 0000000000..18274e83bd --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/read-pinned-contract-hash.test.ts @@ -0,0 +1,101 @@ +import { mkdir, mkdtemp, rm, writeFile } from 'node:fs/promises'; +import { tmpdir } from 'node:os'; +import { join } from 'pathe'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { canonicalizeJson } from '../src/canonicalize-json'; +import { emitPinnedSpaceArtefacts } from '../src/emit-pinned-space-artefacts'; +import { MigrationToolsError } from '../src/errors'; +import { readPinnedContractHash } from '../src/read-pinned-contract-hash'; + +describe('readPinnedContractHash', () => { + let migrationsDir: string; + + beforeEach(async () => { + migrationsDir = await mkdtemp(join(tmpdir(), 'read-pinned-hash-')); + }); + + afterEach(async () => { + await rm(migrationsDir, { recursive: true, force: true }); + }); + + it("returns null when the space's pinned refs/head.json does not exist", async () => { + expect(await readPinnedContractHash(migrationsDir, 'cipherstash')).toBeNull(); + }); + + it('returns null when the migrations directory itself does not exist', async () => { + const missing = join(migrationsDir, 'nope', 'migrations'); + expect(await readPinnedContractHash(missing, 'cipherstash')).toBeNull(); + }); + + it('returns the pinned hash written by emitPinnedSpaceArtefacts', async () => { + const hash = 'sha256:0123456789012345678901234567890123456789012345678901234567890123'; + await emitPinnedSpaceArtefacts(migrationsDir, 'cipherstash', { + contract: { foo: 1 }, + contractDts: '\n', + headRef: { hash, invariants: ['inv-1'] }, + }); + + expect(await readPinnedContractHash(migrationsDir, 'cipherstash')).toBe(hash); + }); + + it('reads the hash field from refs/head.json verbatim (no normalisation)', async () => { + const dir = join(migrationsDir, 'cipherstash', 'refs'); + await mkdir(dir, { recursive: true }); + const hash = 'sha256:abc'; + await writeFile(join(dir, 'head.json'), `${canonicalizeJson({ hash, invariants: [] })}\n`); + + expect(await readPinnedContractHash(migrationsDir, 'cipherstash')).toBe(hash); + }); + + it('throws when refs/head.json is malformed JSON', async () => { + const dir = join(migrationsDir, 'cipherstash', 'refs'); + await mkdir(dir, { recursive: true }); + await writeFile(join(dir, 'head.json'), 'not json {'); + + let captured: unknown; + try { + await readPinnedContractHash(migrationsDir, 'cipherstash'); + } catch (err) { + captured = err; + } + expect(MigrationToolsError.is(captured)).toBe(true); + expect((captured as MigrationToolsError).code).toBe('MIGRATION.INVALID_JSON'); + }); + + it("throws when refs/head.json's hash field is missing or wrong-shaped", async () => { + const dir = join(migrationsDir, 'cipherstash', 'refs'); + await mkdir(dir, { recursive: true }); + await writeFile(join(dir, 'head.json'), JSON.stringify({ invariants: [] })); + + let captured: unknown; + try { + await readPinnedContractHash(migrationsDir, 'cipherstash'); + } catch (err) { + captured = err; + } + expect(MigrationToolsError.is(captured)).toBe(true); + expect((captured as MigrationToolsError).code).toBe('MIGRATION.INVALID_REF_FILE'); + }); + + it('rejects an invalid space id (filesystem safety)', async () => { + let captured: unknown; + try { + await readPinnedContractHash(migrationsDir, 'INVALID'); + } catch (err) { + captured = err; + } + expect(MigrationToolsError.is(captured)).toBe(true); + expect((captured as MigrationToolsError).code).toBe('MIGRATION.INVALID_SPACE_ID'); + }); + + it('rejects the app space (pinned head ref is an extension-space concept)', async () => { + let captured: unknown; + try { + await readPinnedContractHash(migrationsDir, 'app'); + } catch (err) { + captured = err; + } + expect(MigrationToolsError.is(captured)).toBe(true); + expect((captured as MigrationToolsError).code).toBe('MIGRATION.PINNED_ARTEFACTS_APP_SPACE'); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/space-layout.test.ts b/packages/1-framework/3-tooling/migration/test/space-layout.test.ts new file mode 100644 index 0000000000..7e7bd6edc2 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/space-layout.test.ts @@ -0,0 +1,85 @@ +import { describe, expect, it } from 'vitest'; +import { MigrationToolsError } from '../src/errors'; +import { + APP_SPACE_ID, + assertValidSpaceId, + isValidSpaceId, + spaceMigrationDirectory, +} from '../src/space-layout'; + +describe('APP_SPACE_ID', () => { + it('equals "app"', () => { + expect(APP_SPACE_ID).toBe('app'); + }); +}); + +describe('isValidSpaceId', () => { + it.each([ + ['app', true], + ['cipherstash', true], + ['pgvector', true], + ['my-extension', true], + ['my_extension', true], + ['x', true], + ['x123', true], + ])('accepts %s', (id, expected) => { + expect(isValidSpaceId(id)).toBe(expected); + }); + + it.each([ + ['', 'empty'], + ['Cipherstash', 'starts with uppercase'], + ['1pgvector', 'starts with digit'], + ['_leading-underscore', 'starts with underscore'], + ['-leading-dash', 'starts with dash'], + ['has space', 'contains a space'], + ['has.dot', 'contains a dot'], + ['has/slash', 'contains a slash'], + ['HAS_UPPER', 'uppercase letters'], + [`a${'b'.repeat(64)}`, 'longer than 64 chars'], + ])('rejects %s (%s)', (id, _why) => { + expect(isValidSpaceId(id)).toBe(false); + }); +}); + +describe('assertValidSpaceId', () => { + it('returns void on a valid id', () => { + expect(() => assertValidSpaceId('cipherstash')).not.toThrow(); + }); + + it('throws a MigrationToolsError with code MIGRATION.INVALID_SPACE_ID on an invalid id', () => { + let captured: unknown; + try { + assertValidSpaceId('Bad Space'); + } catch (error) { + captured = error; + } + expect(MigrationToolsError.is(captured)).toBe(true); + const err = captured as MigrationToolsError; + expect(err.code).toBe('MIGRATION.INVALID_SPACE_ID'); + expect(err.category).toBe('MIGRATION'); + expect(err.why).toContain('Bad Space'); + }); +}); + +describe('spaceMigrationDirectory', () => { + it("returns the project's migrations dir unchanged for the app space", () => { + expect(spaceMigrationDirectory('/p/migrations', APP_SPACE_ID)).toBe('/p/migrations'); + }); + + it('appends the space id as a subdirectory for an extension space', () => { + expect(spaceMigrationDirectory('/p/migrations', 'cipherstash')).toBe( + '/p/migrations/cipherstash', + ); + }); + + it('throws on an invalid space id', () => { + expect(() => spaceMigrationDirectory('/p/migrations', 'Bad Space')).toThrow( + MigrationToolsError, + ); + }); + + it('does not validate the app space id (always allowed)', () => { + expect(() => spaceMigrationDirectory('/p/migrations', APP_SPACE_ID)).not.toThrow(); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/test/verify-contract-spaces.test.ts b/packages/1-framework/3-tooling/migration/test/verify-contract-spaces.test.ts new file mode 100644 index 0000000000..c1e6d347fa --- /dev/null +++ b/packages/1-framework/3-tooling/migration/test/verify-contract-spaces.test.ts @@ -0,0 +1,315 @@ +import { mkdir, mkdtemp, rm, writeFile } from 'node:fs/promises'; +import { tmpdir } from 'node:os'; +import { join } from 'pathe'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { + listPinnedSpaceDirectories, + type SpaceMarkerRecord, + type SpacePinnedHashRecord, + verifyContractSpaces, +} from '../src/verify-contract-spaces'; + +describe('listPinnedSpaceDirectories', () => { + let projectMigrationsDir: string; + + async function makeMigrationDir(name: string): Promise { + await mkdir(join(projectMigrationsDir, name), { recursive: true }); + await writeFile(join(projectMigrationsDir, name, 'migration.json'), '{}'); + } + + async function makePinnedSpaceDir(name: string): Promise { + await mkdir(join(projectMigrationsDir, name), { recursive: true }); + } + + beforeEach(async () => { + projectMigrationsDir = await mkdtemp(join(tmpdir(), 'list-pinned-')); + }); + + afterEach(async () => { + await rm(projectMigrationsDir, { recursive: true, force: true }); + }); + + it('returns an empty list when the migrations directory does not exist', async () => { + const missing = join(projectMigrationsDir, 'does-not-exist'); + expect(await listPinnedSpaceDirectories(missing)).toEqual([]); + }); + + it('excludes timestamp-shaped migration directories that contain migration.json', async () => { + await makeMigrationDir('20260101T0000_baseline'); + await makeMigrationDir('20260507T1100_add_users'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual([]); + }); + + it('excludes a space-id-shaped directory when it contains migration.json', async () => { + // The directory name happens to look like a space id, but the + // presence of `migration.json` is the structural marker — users may + // freely name their migration directories. + await makeMigrationDir('cipherstash'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual([]); + }); + + it('includes a timestamp-shaped directory with no migration.json (verifier no longer trusts the name)', async () => { + await makePinnedSpaceDir('20260101T0000_baseline'); + await makePinnedSpaceDir('cipherstash'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual([ + '20260101T0000_baseline', + 'cipherstash', + ]); + }); + + it('returns extension-space subdirectories sorted alphabetically', async () => { + await makePinnedSpaceDir('pgvector'); + await makePinnedSpaceDir('cipherstash'); + await makePinnedSpaceDir('audit'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual([ + 'audit', + 'cipherstash', + 'pgvector', + ]); + }); + + it('returns pinned-space dirs alongside skipping migration dirs', async () => { + await makeMigrationDir('20260101T0000_baseline'); + await makePinnedSpaceDir('cipherstash'); + await makeMigrationDir('20260507T1100_add_users'); + await makePinnedSpaceDir('pgvector'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual([ + 'cipherstash', + 'pgvector', + ]); + }); + + it('skips files (only directory entries are reported)', async () => { + await writeFile(join(projectMigrationsDir, 'cipherstash'), 'i am a file'); + await makePinnedSpaceDir('pgvector'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual(['pgvector']); + }); + + it('skips dot-prefixed directories', async () => { + await mkdir(join(projectMigrationsDir, '.git')); + await mkdir(join(projectMigrationsDir, '.tmp')); + await makePinnedSpaceDir('cipherstash'); + + expect(await listPinnedSpaceDirectories(projectMigrationsDir)).toEqual(['cipherstash']); + }); +}); + +describe('verifyContractSpaces', () => { + const cipherstashPinned: SpacePinnedHashRecord = { + hash: 'sha256:0000000000000000000000000000000000000000000000000000000000000001', + invariants: ['cipherstash:install-v1'], + }; + const pgvectorPinned: SpacePinnedHashRecord = { + hash: 'sha256:0000000000000000000000000000000000000000000000000000000000000002', + invariants: ['pgvector:install-v1'], + }; + + const markerOf = (pinned: SpacePinnedHashRecord): SpaceMarkerRecord => ({ + hash: pinned.hash, + invariants: [...pinned.invariants], + }); + + it("returns ok for today's single-app project (no extensions, no extra dirs, no extra markers)", () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app']), + pinnedDirsOnDisk: [], + pinnedHashesBySpace: new Map(), + markerRowsBySpace: new Map(), + }); + expect(result.ok).toBe(true); + }); + + it('returns ok when loadedSpaces match pinned dirs and marker rows exactly', () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'cipherstash']), + pinnedDirsOnDisk: ['cipherstash'], + pinnedHashesBySpace: new Map([['cipherstash', cipherstashPinned]]), + markerRowsBySpace: new Map([['cipherstash', markerOf(cipherstashPinned)]]), + }); + expect(result.ok).toBe(true); + }); + + it('rejects when extensionPacks declares a space without a pinned dir on disk (declaredButUnmigrated)', () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'cipherstash']), + pinnedDirsOnDisk: [], + pinnedHashesBySpace: new Map(), + markerRowsBySpace: new Map(), + }); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.violations).toHaveLength(1); + expect(result.violations[0]).toMatchObject({ + kind: 'declaredButUnmigrated', + spaceId: 'cipherstash', + }); + }); + + it('rejects when a pinned dir on disk is not in extensionPacks (orphanPinnedDir)', () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app']), + pinnedDirsOnDisk: ['cipherstash'], + pinnedHashesBySpace: new Map([['cipherstash', cipherstashPinned]]), + markerRowsBySpace: new Map(), + }); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.violations).toHaveLength(1); + expect(result.violations[0]).toMatchObject({ + kind: 'orphanPinnedDir', + spaceId: 'cipherstash', + }); + }); + + it('rejects when a marker row exists for a space not in extensionPacks (orphanMarker)', () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app']), + pinnedDirsOnDisk: [], + pinnedHashesBySpace: new Map(), + markerRowsBySpace: new Map([['cipherstash', markerOf(cipherstashPinned)]]), + }); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.violations).toHaveLength(1); + expect(result.violations[0]).toMatchObject({ + kind: 'orphanMarker', + spaceId: 'cipherstash', + }); + }); + + it('rejects when marker hash does not match pinned hash for a loaded space (hashMismatch)', () => { + const driftedMarker: SpaceMarkerRecord = { + hash: 'sha256:00000000000000000000000000000000000000000000000000000000000000ff', + invariants: cipherstashPinned.invariants, + }; + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'cipherstash']), + pinnedDirsOnDisk: ['cipherstash'], + pinnedHashesBySpace: new Map([['cipherstash', cipherstashPinned]]), + markerRowsBySpace: new Map([['cipherstash', driftedMarker]]), + }); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.violations).toContainEqual( + expect.objectContaining({ + kind: 'hashMismatch', + spaceId: 'cipherstash', + pinnedHash: cipherstashPinned.hash, + markerHash: driftedMarker.hash, + }), + ); + }); + + it("rejects when marker invariants don't cover pinned invariants (invariantsMismatch)", () => { + const partialMarker: SpaceMarkerRecord = { + hash: cipherstashPinned.hash, + invariants: [], + }; + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'cipherstash']), + pinnedDirsOnDisk: ['cipherstash'], + pinnedHashesBySpace: new Map([['cipherstash', cipherstashPinned]]), + markerRowsBySpace: new Map([['cipherstash', partialMarker]]), + }); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.violations).toContainEqual( + expect.objectContaining({ + kind: 'invariantsMismatch', + spaceId: 'cipherstash', + }), + ); + }); + + it('aggregates multiple violations across spaces deterministically (alphabetical by spaceId)', () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'cipherstash']), + pinnedDirsOnDisk: ['orphan-z', 'orphan-a'], + pinnedHashesBySpace: new Map([ + ['orphan-a', cipherstashPinned], + ['orphan-z', pgvectorPinned], + ]), + markerRowsBySpace: new Map([ + ['orphan-marker-1', markerOf(cipherstashPinned)], + ['orphan-marker-2', markerOf(pgvectorPinned)], + ]), + }); + + expect(result.ok).toBe(false); + if (result.ok) return; + const kindsAndIds = result.violations.map((v) => `${v.kind}:${v.spaceId}`); + expect(kindsAndIds).toEqual([ + 'declaredButUnmigrated:cipherstash', + 'orphanMarker:orphan-marker-1', + 'orphanMarker:orphan-marker-2', + 'orphanPinnedDir:orphan-a', + 'orphanPinnedDir:orphan-z', + ]); + }); + + it('every violation includes a remediation hint', () => { + const driftedMarker: SpaceMarkerRecord = { + hash: 'sha256:00000000000000000000000000000000000000000000000000000000000000ff', + invariants: [], + }; + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'pgvector']), + pinnedDirsOnDisk: ['orphan'], + pinnedHashesBySpace: new Map([['orphan', cipherstashPinned]]), + markerRowsBySpace: new Map([ + ['ghost', markerOf(cipherstashPinned)], + ['pgvector', driftedMarker], + ]), + }); + + expect(result.ok).toBe(false); + if (result.ok) return; + for (const v of result.violations) { + expect(typeof v.remediation).toBe('string'); + expect(v.remediation.length).toBeGreaterThan(0); + } + }); + + it("treats 'app' marker rows as expected (app is always loaded)", () => { + const appMarker: SpaceMarkerRecord = { + hash: 'sha256:dead', + invariants: [], + }; + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app']), + pinnedDirsOnDisk: [], + pinnedHashesBySpace: new Map(), + markerRowsBySpace: new Map([['app', appMarker]]), + }); + expect(result.ok).toBe(true); + }); + + it('does not flag a missing app-space pinned dir (app pinning lives at the project root, not under migrations/)', () => { + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app']), + pinnedDirsOnDisk: [], + pinnedHashesBySpace: new Map(), + markerRowsBySpace: new Map(), + }); + expect(result.ok).toBe(true); + }); + + it('does not import any extension descriptor (verifier reads only its inputs)', () => { + // Smoke check: the function must work with a brand-new Map / Set + // and return a plain Result. No descriptor module required by the + // call itself — the inputs are pre-resolved by the caller. + const result = verifyContractSpaces({ + loadedSpaces: new Set(['app', 'cipherstash']), + pinnedDirsOnDisk: ['cipherstash'], + pinnedHashesBySpace: new Map([['cipherstash', cipherstashPinned]]), + markerRowsBySpace: new Map([['cipherstash', markerOf(cipherstashPinned)]]), + }); + expect(result.ok).toBe(true); + }); +}); diff --git a/packages/1-framework/3-tooling/migration/tsdown.config.ts b/packages/1-framework/3-tooling/migration/tsdown.config.ts index 4cc5b41316..ff1e46f0c3 100644 --- a/packages/1-framework/3-tooling/migration/tsdown.config.ts +++ b/packages/1-framework/3-tooling/migration/tsdown.config.ts @@ -14,6 +14,7 @@ export default defineConfig({ 'exports/constants': 'src/exports/constants.ts', 'exports/migration-ts': 'src/exports/migration-ts.ts', 'exports/migration': 'src/exports/migration.ts', + 'exports/spaces': 'src/exports/spaces.ts', }, exports: { enabled: false }, }); diff --git a/packages/2-mongo-family/9-family/src/core/control-instance.ts b/packages/2-mongo-family/9-family/src/core/control-instance.ts index d4100bef35..a16e34aa92 100644 --- a/packages/2-mongo-family/9-family/src/core/control-instance.ts +++ b/packages/2-mongo-family/9-family/src/core/control-instance.ts @@ -15,6 +15,7 @@ import type { VerifyDatabaseSchemaResult, } from '@prisma-next/framework-components/control'; import { + APP_SPACE_ID, VERIFY_CODE_HASH_MISMATCH, VERIFY_CODE_MARKER_MISSING, VERIFY_CODE_TARGET_MISMATCH, @@ -255,7 +256,15 @@ class MongoFamilyInstance implements MongoControlFamilyInstance { async readMarker(options: { readonly driver: ControlDriverInstance<'mongo', string>; + readonly space: string; }): Promise { + if (options.space !== APP_SPACE_ID) { + throw new Error( + 'Mongo target does not yet support per-space contract markers. ' + + `readMarker was called with space="${options.space}", but only "${APP_SPACE_ID}" is supported. ` + + 'Per-space marker support is tracked separately for Mongo and is not part of the SQL-family contract-spaces work.', + ); + } const db = extractDb(options.driver); return readMarker(db); } diff --git a/packages/2-sql/5-runtime/src/exports/index.ts b/packages/2-sql/5-runtime/src/exports/index.ts index 234c66967a..f641713b31 100644 --- a/packages/2-sql/5-runtime/src/exports/index.ts +++ b/packages/2-sql/5-runtime/src/exports/index.ts @@ -44,6 +44,7 @@ export { } from '../sql-context'; export type { SqlStatement } from '../sql-marker'; export { + APP_SPACE_ID, ensureSchemaStatement, ensureTableStatement, readContractMarker, diff --git a/packages/2-sql/5-runtime/src/sql-marker.ts b/packages/2-sql/5-runtime/src/sql-marker.ts index 575af80760..25796951ad 100644 --- a/packages/2-sql/5-runtime/src/sql-marker.ts +++ b/packages/2-sql/5-runtime/src/sql-marker.ts @@ -1,11 +1,23 @@ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { MarkerStatement } from '@prisma-next/sql-relational-core/ast'; +export { APP_SPACE_ID }; + export interface SqlStatement { readonly sql: string; readonly params: readonly unknown[]; } export interface WriteMarkerInput { + /** + * Logical space identifier for this marker row. Required at every + * call site so the type system surfaces every place that needs to + * thread the value (rather than letting an `?? APP_SPACE_ID` + * fall-through silently collapse multi-space markers onto the + * `'app'` row). App-plan callers pass {@link APP_SPACE_ID} + * (`'app'`); per-extension callers pass the extension's space id. + */ + readonly space: string; readonly storageHash: string; readonly profileHash: string; readonly contractJson?: unknown; @@ -28,9 +40,20 @@ export const ensureSchemaStatement: SqlStatement = { params: [], }; +/** + * Schema for `prisma_contract.marker`. The `space text` primary key + * supports one row per loaded contract space (`'app'`, + * `''`, …); brand-new databases create this shape + * directly. Pre-1.0 single-row markers (no `space` column) are not + * auto-migrated — the target-specific migration runner detects the + * legacy shape at boot and surfaces a structured `LEGACY_MARKER_SHAPE` + * failure pointing the operator at re-running `dbInit`. + * + * @see specs/framework-mechanism.spec.md § 2. + */ export const ensureTableStatement: SqlStatement = { sql: `create table if not exists prisma_contract.marker ( - id smallint primary key default 1, + space text not null primary key default '${APP_SPACE_ID}', core_hash text not null, profile_hash text not null, contract_json jsonb, @@ -43,7 +66,7 @@ export const ensureTableStatement: SqlStatement = { params: [], }; -export function readContractMarker(): MarkerStatement { +export function readContractMarker(space: string): MarkerStatement { return { sql: `select core_hash, @@ -55,8 +78,8 @@ export function readContractMarker(): MarkerStatement { meta, invariants from prisma_contract.marker - where id = $1`, - params: [1], + where space = $1`, + params: [space], }; } @@ -67,7 +90,7 @@ export interface WriteContractMarkerStatements { /** * Variable columns that participate in INSERT/UPDATE alongside the - * always-on `id = $1` and `updated_at = now()`. Each column declares + * always-on `space = $1` and `updated_at = now()`. Each column declares * its name, optional cast type, and parameter value; the placeholder * (`$N`) is computed positionally below — adding or reordering a * column doesn't desync indices. `invariants` only appears when the @@ -91,17 +114,17 @@ function markerColumns( export function writeContractMarker(input: WriteMarkerInput): WriteContractMarkerStatements { const cols = markerColumns(input); - // $1 is reserved for `id`; subsequent positions follow the order of cols. + // $1 is reserved for `space`; subsequent positions follow the order of cols. const placed = cols.map((c, i) => ({ name: c.name, expr: c.type ? `$${i + 2}::${c.type}` : `$${i + 2}`, param: c.param, })); - const params: readonly unknown[] = [1, ...placed.map((c) => c.param)]; + const params: readonly unknown[] = [input.space, ...placed.map((c) => c.param)]; // `updated_at = now()` is a SQL literal with no parameter slot, so it // sits outside `placed` and is appended directly to each statement. - const insertColumns = ['id', ...placed.map((c) => c.name), 'updated_at'].join(', '); + const insertColumns = ['space', ...placed.map((c) => c.name), 'updated_at'].join(', '); const insertValues = ['$1', ...placed.map((c) => c.expr), 'now()'].join(', '); const setClauses = [...placed.map((c) => `${c.name} = ${c.expr}`), 'updated_at = now()'].join( ', ', @@ -113,7 +136,7 @@ export function writeContractMarker(input: WriteMarkerInput): WriteContractMarke params, }, update: { - sql: `update prisma_contract.marker set ${setClauses} where id = $1`, + sql: `update prisma_contract.marker set ${setClauses} where space = $1`, params, }, }; diff --git a/packages/2-sql/5-runtime/test/intercept-decoding.test.ts b/packages/2-sql/5-runtime/test/intercept-decoding.test.ts index f73f95ce03..7d4750f90c 100644 --- a/packages/2-sql/5-runtime/test/intercept-decoding.test.ts +++ b/packages/2-sql/5-runtime/test/intercept-decoding.test.ts @@ -80,8 +80,8 @@ function createStubAdapter(codecs: ReadonlyArray>) { }, readMarkerStatement() { return { - sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta from prisma_contract.marker where id = $1', - params: [1], + sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta from prisma_contract.marker where space = $1', + params: ['app'], }; }, parseMarkerRow: parseContractMarkerRow, diff --git a/packages/2-sql/5-runtime/test/marker-vs-intercept-ordering.test.ts b/packages/2-sql/5-runtime/test/marker-vs-intercept-ordering.test.ts index c00635d2dd..575644c408 100644 --- a/packages/2-sql/5-runtime/test/marker-vs-intercept-ordering.test.ts +++ b/packages/2-sql/5-runtime/test/marker-vs-intercept-ordering.test.ts @@ -65,8 +65,8 @@ function createStubAdapter(codecs: ReadonlyArray>) { }, readMarkerStatement() { return { - sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where id = $1', - params: [1], + sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where space = $1', + params: ['app'], }; }, parseMarkerRow: parseContractMarkerRow, diff --git a/packages/2-sql/5-runtime/test/sql-family-adapter.test.ts b/packages/2-sql/5-runtime/test/sql-family-adapter.test.ts index 8e5d2541c5..3eeec6169c 100644 --- a/packages/2-sql/5-runtime/test/sql-family-adapter.test.ts +++ b/packages/2-sql/5-runtime/test/sql-family-adapter.test.ts @@ -24,8 +24,8 @@ const testProfile: AdapterProfile = { target: 'postgres', capabilities: {}, readMarkerStatement: () => ({ - sql: 'SELECT core_hash, profile_hash FROM prisma_contract.marker WHERE id = $1', - params: [1], + sql: 'SELECT core_hash, profile_hash FROM prisma_contract.marker WHERE space = $1', + params: ['app'], }), parseMarkerRow: () => { throw new Error('not needed in test'); @@ -46,7 +46,7 @@ describe('SqlFamilyAdapter', () => { const stmt = adapter.markerReader.readMarkerStatement(); expect(stmt.sql).toContain('prisma_contract.marker'); - expect(stmt.params).toEqual([1]); + expect(stmt.params).toEqual(['app']); }); it('validates plan with matching target and hash', () => { diff --git a/packages/2-sql/5-runtime/test/sql-marker.test.ts b/packages/2-sql/5-runtime/test/sql-marker.test.ts index 869bab2f88..efce2cb452 100644 --- a/packages/2-sql/5-runtime/test/sql-marker.test.ts +++ b/packages/2-sql/5-runtime/test/sql-marker.test.ts @@ -1,9 +1,10 @@ import { describe, expect, it } from 'vitest'; -import { writeContractMarker } from '../src/sql-marker'; +import { APP_SPACE_ID, readContractMarker, writeContractMarker } from '../src/sql-marker'; describe('writeContractMarker', () => { describe('without invariants (sign-side)', () => { const sample = writeContractMarker({ + space: APP_SPACE_ID, storageHash: 'sha256:hash', profileHash: 'sha256:profile', }); @@ -20,10 +21,18 @@ describe('writeContractMarker', () => { expect(sample.insert.params).toHaveLength(7); expect(sample.update.params).toHaveLength(7); }); + + it('binds the caller-supplied space as the first param of the upsert', () => { + expect(sample.insert.sql).toMatch(/\(\s*space\b/); + expect(sample.update.sql).toMatch(/where space = \$1/i); + expect(sample.insert.params[0]).toBe(APP_SPACE_ID); + expect(sample.update.params[0]).toBe(APP_SPACE_ID); + }); }); describe('with explicit invariants (sign-side / explicit overwrite)', () => { const sample = writeContractMarker({ + space: APP_SPACE_ID, storageHash: 'sha256:hash', profileHash: 'sha256:profile', invariants: ['alpha', 'beta'], @@ -45,6 +54,7 @@ describe('writeContractMarker', () => { describe('with invariants: [] (explicit empty — clobber, not preserve)', () => { const sample = writeContractMarker({ + space: APP_SPACE_ID, storageHash: 'sha256:hash', profileHash: 'sha256:profile', invariants: [], @@ -55,4 +65,36 @@ describe('writeContractMarker', () => { expect(sample.update.params).toContainEqual([]); }); }); + + describe('with an extension space id (per-space callers)', () => { + const sample = writeContractMarker({ + space: 'cipherstash', + storageHash: 'sha256:hash', + profileHash: 'sha256:profile', + invariants: ['cipherstash:install-eql-v1'], + }); + + it('binds the caller-supplied space as the first param', () => { + expect(sample.insert.params[0]).toBe('cipherstash'); + expect(sample.update.params[0]).toBe('cipherstash'); + }); + + it('keys both INSERT and UPDATE by space, never by id', () => { + expect(sample.insert.sql).not.toMatch(/\bid\b/); + expect(sample.update.sql).toMatch(/where space = \$1/i); + }); + }); +}); + +describe('readContractMarker', () => { + it('binds the caller-supplied space id as the parameter', () => { + const stmt = readContractMarker('cipherstash'); + expect(stmt.sql).toMatch(/where space = \$1/i); + expect(stmt.params).toEqual(['cipherstash']); + }); + + it('binds APP_SPACE_ID when callers ask for the app marker explicitly', () => { + const stmt = readContractMarker(APP_SPACE_ID); + expect(stmt.params).toEqual([APP_SPACE_ID]); + }); }); diff --git a/packages/2-sql/5-runtime/test/sql-runtime.test.ts b/packages/2-sql/5-runtime/test/sql-runtime.test.ts index 5f8d6b4a75..176172aeea 100644 --- a/packages/2-sql/5-runtime/test/sql-runtime.test.ts +++ b/packages/2-sql/5-runtime/test/sql-runtime.test.ts @@ -86,8 +86,8 @@ function createStubAdapter(extraCodecs: readonly Codec[] = []) { capabilities: {}, readMarkerStatement() { return { - sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where id = $1', - params: [1], + sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where space = $1', + params: ['app'], }; }, parseMarkerRow: parseContractMarkerRow, diff --git a/packages/2-sql/5-runtime/test/utils.ts b/packages/2-sql/5-runtime/test/utils.ts index 2c7f3ae581..f594126d3a 100644 --- a/packages/2-sql/5-runtime/test/utils.ts +++ b/packages/2-sql/5-runtime/test/utils.ts @@ -26,6 +26,7 @@ import { collectAsync, drainAsyncIterable } from '@prisma-next/test-utils'; import type { Client } from 'pg'; import type { SqlStatement } from '../src/exports'; import { + APP_SPACE_ID, createExecutionContext, type createRuntime, createSqlExecutionStack, @@ -100,6 +101,7 @@ export async function setupTestDatabase( await executeStatement(client, ensureSchemaStatement); await executeStatement(client, ensureTableStatement); const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contract.storage.storageHash, profileHash: contract.profileHash, contractJson: contract, @@ -116,6 +118,7 @@ export async function writeTestContractMarker( contract: Contract, ): Promise { const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contract.storage.storageHash, profileHash: contract.profileHash, contractJson: contract, @@ -295,8 +298,8 @@ export function createStubAdapter(): StubAdapter { capabilities: {}, readMarkerStatement() { return { - sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where id = $1', - params: [1], + sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where space = $1', + params: ['app'], }; }, parseMarkerRow: parseContractMarkerRow, diff --git a/packages/2-sql/9-family/src/core/control-adapter.ts b/packages/2-sql/9-family/src/core/control-adapter.ts index 5b724aa59f..34b4c729b6 100644 --- a/packages/2-sql/9-family/src/core/control-adapter.ts +++ b/packages/2-sql/9-family/src/core/control-adapter.ts @@ -21,15 +21,24 @@ import type { DefaultNormalizer, NativeTypeNormalizer } from './schema-verify/ve export interface SqlControlAdapter extends ControlAdapterInstance<'sql', TTarget> { /** - * Reads the contract marker from the database, returning `null` if the marker - * table or its row is missing. Implementations are responsible for the - * dialect-specific existence probe (e.g. Postgres `information_schema.tables` - * vs SQLite `sqlite_master`) and parameter placeholders. + * Reads the contract marker for `space` from the database, returning + * `null` if no marker row exists for that space (or if the marker + * table itself is missing). Implementations are responsible for the + * dialect-specific existence probe (e.g. Postgres + * `information_schema.tables` vs SQLite `sqlite_master`) and parameter + * placeholders. + * + * `space` is required so callers cannot accidentally fall through to + * the app's marker row when reading per-extension markers. * * @param driver - ControlDriverInstance for executing queries (target-specific) + * @param space - Contract space id whose marker row to read (e.g. `'app'`) * @returns Resolved marker record, or `null` if not yet stamped. */ - readMarker(driver: ControlDriverInstance<'sql', TTarget>): Promise; + readMarker( + driver: ControlDriverInstance<'sql', TTarget>, + space: string, + ): Promise; /** * Introspects a database schema and returns a raw SqlSchemaIR. diff --git a/packages/2-sql/9-family/src/core/control-instance.ts b/packages/2-sql/9-family/src/core/control-instance.ts index e682af2b86..96bac3cba4 100644 --- a/packages/2-sql/9-family/src/core/control-instance.ts +++ b/packages/2-sql/9-family/src/core/control-instance.ts @@ -20,6 +20,7 @@ import type { VerifyDatabaseSchemaResult, } from '@prisma-next/framework-components/control'; import { + APP_SPACE_ID, SchemaTreeNode, VERIFY_CODE_HASH_MISMATCH, VERIFY_CODE_MARKER_MISSING, @@ -355,7 +356,7 @@ export function createSqlFamilyInstance( const contractProfileHash = contract.profileHash; const contractTarget = contract.target; - const marker = await getControlAdapter().readMarker(driver); + const marker = await getControlAdapter().readMarker(driver, APP_SPACE_ID); let missingCodecs: readonly string[] | undefined; let codecCoverageSkipped = false; @@ -500,7 +501,7 @@ export function createSqlFamilyInstance( await driver.query(ensureSchemaStatement.sql, ensureSchemaStatement.params); await driver.query(ensureTableStatement.sql, ensureTableStatement.params); - const existingMarker = await getControlAdapter().readMarker(driver); + const existingMarker = await getControlAdapter().readMarker(driver, APP_SPACE_ID); let markerCreated = false; let markerUpdated = false; @@ -508,6 +509,7 @@ export function createSqlFamilyInstance( if (!existingMarker) { const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contractStorageHash, profileHash: contractProfileHash, contractJson: contractInput, @@ -528,6 +530,7 @@ export function createSqlFamilyInstance( profileHash: existingProfileHash, }; const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contractStorageHash, profileHash: contractProfileHash, contractJson: contractInput, @@ -576,8 +579,9 @@ export function createSqlFamilyInstance( }, async readMarker(options: { readonly driver: ControlDriverInstance<'sql', string>; + readonly space: string; }): Promise { - return getControlAdapter().readMarker(options.driver); + return getControlAdapter().readMarker(options.driver, options.space); }, async introspect(options: { readonly driver: ControlDriverInstance<'sql', string>; diff --git a/packages/2-sql/9-family/src/core/migrations/plan-helpers.ts b/packages/2-sql/9-family/src/core/migrations/plan-helpers.ts index 8daf8c8067..917021672d 100644 --- a/packages/2-sql/9-family/src/core/migrations/plan-helpers.ts +++ b/packages/2-sql/9-family/src/core/migrations/plan-helpers.ts @@ -98,6 +98,7 @@ export function createMigrationPlan( ): SqlMigrationPlan { return Object.freeze({ targetId: options.targetId, + spaceId: options.spaceId, ...(options.origin !== undefined ? { origin: options.origin ? Object.freeze({ ...options.origin }) : null } : {}), diff --git a/packages/2-sql/9-family/src/core/migrations/types.ts b/packages/2-sql/9-family/src/core/migrations/types.ts index 37589d35dd..69f216fa79 100644 --- a/packages/2-sql/9-family/src/core/migrations/types.ts +++ b/packages/2-sql/9-family/src/core/migrations/types.ts @@ -1,6 +1,7 @@ import type { Contract } from '@prisma-next/contract/types'; import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; import type { + ContractSpace, ControlAdapterDescriptor, ControlDriverInstance, ControlExtensionDescriptor, @@ -128,6 +129,20 @@ export interface SqlControlExtensionDescriptor extends ControlExtensionDescriptor<'sql', TTargetId> { readonly databaseDependencies?: ComponentDatabaseDependencies; readonly queryOperations?: () => ReadonlyArray; + /** + * Schema-contributing extensions opt into the per-space planner / runner / + * verifier by setting this field. Extensions without it are codec-only or + * query-ops-only — today's behaviour preserved. + * + * The shape comes from `@prisma-next/framework-components/control` + * (`ContractSpace`) — contract-space identity is a framework concept, + * not a SQL-specific one. The SQL family specialises the generic to + * `Contract` so descriptor authors continue to see a + * typed contract value. + * + * @see specs/framework-mechanism.spec.md § 1. + */ + readonly contractSpace?: ContractSpace>; } export interface SqlControlAdapterDescriptor @@ -182,6 +197,21 @@ export interface SqlMigrationPlanContractInfo { } export interface SqlMigrationPlan extends MigrationPlan { + /** + * Contract space this plan applies to. The runner uses this to key the + * `prisma_contract.marker` row it writes/reads (`space = `), + * so per-extension plans hit per-extension marker rows instead of all + * collapsing onto the app's row. + * + * App-plan callers pass `APP_SPACE_ID` (`'app'`); per-extension plans + * pass the extension's space id. Required at every call site so the + * type system surfaces every place that needs to thread the value + * (rather than letting an `?? APP_SPACE_ID` fall-through silently + * collapse multi-space markers onto the `'app'` row). + * + * @see specs/framework-mechanism.spec.md § 2. + */ + readonly spaceId: string; /** * Origin contract identity that the plan expects the database to currently be at. * If omitted or null, the runner skips origin validation entirely. @@ -246,6 +276,14 @@ export interface SqlMigrationPlannerPlanOptions { readonly schema: SqlSchemaIR; readonly policy: MigrationOperationPolicy; readonly schemaName?: string; + /** + * Contract space the plan applies to. The planner stamps this onto + * the produced {@link SqlMigrationPlan.spaceId} so the runner keys + * the marker row by the right space. App-plan callers pass + * `APP_SPACE_ID`; per-extension callers pass the extension's space + * id. + */ + readonly spaceId: string; /** * The "from" contract (state the planner assumes the database starts at), * or `null` for reconciliation flows that have no prior contract. @@ -311,6 +349,7 @@ export interface SqlMigrationRunnerExecuteOptions { export type SqlMigrationRunnerErrorCode = | 'DESTINATION_CONTRACT_MISMATCH' + | 'LEGACY_MARKER_SHAPE' | 'MARKER_ORIGIN_MISMATCH' | 'POLICY_VIOLATION' | 'PRECHECK_FAILED' @@ -346,6 +385,10 @@ export interface SqlControlTargetDescriptor { readonly targetId: string; + /** + * Contract space this plan applies to. Mirrors {@link SqlMigrationPlan.spaceId}. + */ + readonly spaceId: string; readonly origin?: SqlMigrationPlanContractInfo | null; readonly destination: SqlMigrationPlanContractInfo; readonly operations: readonly SqlMigrationPlanOperation[]; diff --git a/packages/2-sql/9-family/test/migrations.types.test-d.ts b/packages/2-sql/9-family/test/migrations.types.test-d.ts index 719cadd342..b46cd4fd99 100644 --- a/packages/2-sql/9-family/test/migrations.types.test-d.ts +++ b/packages/2-sql/9-family/test/migrations.types.test-d.ts @@ -6,15 +6,22 @@ * to use core types while SQL-specific code uses the extended types. */ +import type { Contract } from '@prisma-next/contract/types'; import type { + ContractSpace, + ContractSpaceHeadRef, + MigrationMetadata, + MigrationPackage, MigrationPlan, MigrationPlannerConflict, MigrationPlanOperation, MigrationRunnerFailure, MigrationRunnerSuccessValue, } from '@prisma-next/framework-components/control'; +import type { MigrationOps } from '@prisma-next/migration-tools/package'; import { expectTypeOf } from 'vitest'; import type { + SqlControlExtensionDescriptor, SqlMigrationPlan, SqlMigrationPlanOperation, SqlMigrationRunnerFailure, @@ -51,3 +58,37 @@ expectTypeOf().toExtend< // Test that SqlMigrationRunnerFailure has the required core fields expectTypeOf().toExtend(); expectTypeOf().toExtend(); + +// Contract-space descriptor surface (project: extension contract spaces). +// +// `contractSpace` is the in-memory view a schema-contributing extension +// publishes via its descriptor module. The framework consumes it only at +// authoring time (`migrate`) — apply / verify paths read the user's repo. +// The shape locks down here so downstream emission, planning, and runner +// code can rely on it. +// +// The contract-space identity types live in +// `@prisma-next/framework-components/control`; the SQL family specialises +// `ContractSpace` to a SQL contract while the framework-level type stays +// family-agnostic. +expectTypeOf().toEqualTypeOf<{ + readonly hash: string; + readonly invariants: readonly string[]; +}>(); + +expectTypeOf().toEqualTypeOf(); +expectTypeOf().toEqualTypeOf(); +expectTypeOf().toEqualTypeOf(); + +expectTypeOf().toExtend<{ + readonly contractJson: Contract; + readonly migrations: readonly MigrationPackage[]; + readonly headRef: ContractSpaceHeadRef; +}>(); + +// `contractSpace` is optional on the descriptor (additive change — existing +// extensions without a contract space continue to typecheck unchanged). +// SQL family specialises the framework type to `Contract`. +expectTypeOf['contractSpace']>().toEqualTypeOf< + ContractSpace> | undefined +>(); diff --git a/packages/2-sql/9-family/test/migrations.types.test.ts b/packages/2-sql/9-family/test/migrations.types.test.ts index f50e191188..3d91d880c5 100644 --- a/packages/2-sql/9-family/test/migrations.types.test.ts +++ b/packages/2-sql/9-family/test/migrations.types.test.ts @@ -1,3 +1,4 @@ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import { describe, expect, expectTypeOf, it } from 'vitest'; import { createMigrationPlan, @@ -30,6 +31,7 @@ describe('createMigrationPlan', () => { const plan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: { storageHash: 'originCore', profileHash: 'originProfile' }, destination: { storageHash: 'core', profileHash: 'profile' }, operations: sourceOperations as readonly SqlMigrationPlanOperation[], @@ -63,6 +65,7 @@ describe('createMigrationPlan', () => { const mutableDetails = { schema: 'public', objectType: 'table' as const, name: 'user' }; const plan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, destination: { storageHash: 'abc' }, operations: [ { @@ -96,6 +99,7 @@ describe('createMigrationPlan', () => { it('preserves primitive details without cloning', () => { const plan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, destination: { storageHash: 'abc' }, operations: [ { @@ -120,6 +124,7 @@ describe('createMigrationPlan', () => { const mutableArray = ['item1', 'item2']; const plan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, destination: { storageHash: 'abc' }, operations: [ { @@ -149,6 +154,7 @@ describe('planner helpers', () => { it('produce immutable envelopes that clone conflict metadata', () => { const plan: SqlMigrationPlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, destination: { storageHash: 'abc', profileHash: 'def' }, operations: [], providedInvariants: [], diff --git a/packages/3-extensions/sql-orm-client/test/integration/runtime-helpers.ts b/packages/3-extensions/sql-orm-client/test/integration/runtime-helpers.ts index 02e96a0c22..87715afe37 100644 --- a/packages/3-extensions/sql-orm-client/test/integration/runtime-helpers.ts +++ b/packages/3-extensions/sql-orm-client/test/integration/runtime-helpers.ts @@ -155,7 +155,7 @@ export async function createPgIntegrationRuntime( export async function setupTestSchema(runtime: PgIntegrationRuntime): Promise { await runtime.query('create schema if not exists prisma_contract'); await runtime.query(`create table if not exists prisma_contract.marker ( - id smallint primary key default 1, + space text not null primary key default 'app', core_hash text not null, profile_hash text not null, contract_json jsonb, diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/planner-produced-postgres-migration.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/planner-produced-postgres-migration.ts index dc0fe1f4d1..996d607ddf 100644 --- a/packages/3-targets/3-targets/postgres/src/core/migrations/planner-produced-postgres-migration.ts +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/planner-produced-postgres-migration.ts @@ -41,11 +41,13 @@ export class TypeScriptRenderablePostgresMigration { readonly #calls: readonly PostgresOpFactoryCall[]; readonly #meta: MigrationMeta; + readonly #spaceId: string; - constructor(calls: readonly PostgresOpFactoryCall[], meta: MigrationMeta) { + constructor(calls: readonly PostgresOpFactoryCall[], meta: MigrationMeta, spaceId: string) { super(); this.#calls = calls; this.#meta = meta; + this.#spaceId = spaceId; } override get operations(): readonly Op[] { @@ -56,6 +58,15 @@ export class TypeScriptRenderablePostgresMigration return this.#meta; } + /** + * Contract space this planner-produced plan applies to. Threaded + * from the planner options so the runner keys the marker row by + * the right space when executing the plan. + */ + get spaceId(): string { + return this.#spaceId; + } + renderTypeScript(): string { return renderCallsToTypeScript(this.#calls, { from: this.#meta.from, diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/planner.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/planner.ts index cc79b2a254..5932eddc6c 100644 --- a/packages/3-targets/3-targets/postgres/src/core/migrations/planner.ts +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/planner.ts @@ -101,15 +101,28 @@ export class PostgresMigrationPlanner implements MigrationPlanner<'sql', 'postgr readonly fromContract: Contract | null; readonly schemaName?: string; readonly frameworkComponents: ReadonlyArray>; + /** + * Contract space this plan applies to. Stamped onto the produced + * {@link TypeScriptRenderablePostgresMigration.spaceId} so the runner keys + * the marker row by the right space. + */ + readonly spaceId: string; }): PostgresPlanResult { return this.planSql(options as SqlMigrationPlannerPlanOptions); } - emptyMigration(context: MigrationScaffoldContext): MigrationPlanWithAuthoringSurface { - return new TypeScriptRenderablePostgresMigration([], { - from: context.fromHash, - to: context.toHash, - }); + emptyMigration( + context: MigrationScaffoldContext, + spaceId: string, + ): MigrationPlanWithAuthoringSurface { + return new TypeScriptRenderablePostgresMigration( + [], + { + from: context.fromHash, + to: context.toHash, + }, + spaceId, + ); } private planSql(options: SqlMigrationPlannerPlanOptions): PostgresPlanResult { @@ -147,10 +160,14 @@ export class PostgresMigrationPlanner implements MigrationPlanner<'sql', 'postgr return Object.freeze({ kind: 'success' as const, - plan: new TypeScriptRenderablePostgresMigration(result.value.calls, { - from: options.fromContract?.storage.storageHash ?? null, - to: options.contract.storage.storageHash, - }), + plan: new TypeScriptRenderablePostgresMigration( + result.value.calls, + { + from: options.fromContract?.storage.storageHash ?? null, + to: options.contract.storage.storageHash, + }, + options.spaceId, + ), }); } diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts index e8949ab8af..1bc0703a5d 100644 --- a/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts @@ -105,8 +105,14 @@ class PostgresMigrationRunner implements SqlMigrationRunner['driver'], - ): Promise { + ): Promise> { await this.executeStatement(driver, ensurePrismaContractSchemaStatement); + // Pre-1.0 zero-range guardrail: detect a pre-cleanup single-row + // marker table (no `space` column) and surface a structured failure + // rather than silently auto-migrating it to the per-space shape. + // See `specs/framework-mechanism.spec.md § 2`. + const legacyDetection = await this.detectLegacyMarkerShape(driver); + if (!legacyDetection.ok) { + return legacyDetection; + } await this.executeStatement(driver, ensureMarkerTableStatement); await this.executeStatement(driver, ensureLedgerTableStatement); + return okVoid(); + } + + private async detectLegacyMarkerShape( + driver: SqlMigrationRunnerExecuteOptions['driver'], + ): Promise> { + const result = await driver.query<{ column_name: string }>( + `select column_name + from information_schema.columns + where table_schema = 'prisma_contract' + and table_name = 'marker'`, + ); + if (result.rows.length === 0) { + return okVoid(); + } + const columns = new Set(result.rows.map((row) => row.column_name)); + if (columns.has('space')) { + return okVoid(); + } + return runnerFailure( + 'LEGACY_MARKER_SHAPE', + 'Legacy marker-table shape detected on prisma_contract.marker (no `space` column). ' + + 'Prisma Next is in pre-1.0; the previous transitional auto-migration to the per-space-row schema has been removed. ' + + 'Drop `prisma_contract.marker` and re-run `dbInit` to reinitialise from a clean baseline.', + { + meta: { + table: 'prisma_contract.marker', + columns: [...columns].sort(), + }, + }, + ); } private async runExpectationSteps( @@ -548,6 +593,7 @@ class PostgresMigrationRunner implements SqlMigrationRunner { const incomingInvariants = options.plan.providedInvariants ?? []; const writeStatements = buildMergeMarkerStatements({ + space: options.plan.spaceId, storageHash: options.plan.destination.storageHash, profileHash: options.plan.destination.profileHash ?? diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/statement-builders.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/statement-builders.ts index 4e54da786e..3877d8807f 100644 --- a/packages/3-targets/3-targets/postgres/src/core/migrations/statement-builders.ts +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/statement-builders.ts @@ -1,3 +1,7 @@ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; + +export { APP_SPACE_ID }; + export interface SqlStatement { readonly sql: string; readonly params: readonly unknown[]; @@ -8,9 +12,18 @@ export const ensurePrismaContractSchemaStatement: SqlStatement = { params: [], }; +/** + * Schema for `prisma_contract.marker`. The `space text` primary key + * supports one row per loaded contract space (`'app'`, + * `''`, …); on a brand-new database `CREATE TABLE IF NOT + * EXISTS` produces this shape directly. The migration runner detects + * pre-1.0 single-row markers (no `space` column) at boot and fails with + * a structured `LEGACY_MARKER_SHAPE` error rather than auto-migrating — + * see `specs/framework-mechanism.spec.md § 2`. + */ export const ensureMarkerTableStatement: SqlStatement = { sql: `create table if not exists prisma_contract.marker ( - id smallint primary key default 1, + space text not null primary key default '${APP_SPACE_ID}', core_hash text not null, profile_hash text not null, contract_json jsonb, @@ -39,6 +52,16 @@ export const ensureLedgerTableStatement: SqlStatement = { }; export interface MergeMarkerInput { + /** + * Logical space identifier for this marker row. Required at every + * call site so the type system surfaces every place that needs to + * thread the value (rather than letting an `?? APP_SPACE_ID` + * fall-through silently collapse multi-space markers onto the + * `'app'` row). App-plan callers pass {@link APP_SPACE_ID} + * (`'app'`); per-extension callers (planner / runner / verifier + * extensions over contract spaces) pass the extension's space id. + */ + readonly space: string; readonly storageHash: string; readonly profileHash: string; readonly contractJson?: unknown; @@ -59,7 +82,7 @@ export function buildMergeMarkerStatements(input: MergeMarkerInput): { readonly update: SqlStatement; } { const params: readonly unknown[] = [ - 1, + input.space, input.storageHash, input.profileHash, jsonParam(input.contractJson), @@ -72,7 +95,7 @@ export function buildMergeMarkerStatements(input: MergeMarkerInput): { return { insert: { sql: `insert into prisma_contract.marker ( - id, + space, core_hash, profile_hash, contract_json, @@ -108,7 +131,7 @@ export function buildMergeMarkerStatements(input: MergeMarkerInput): { app_tag = $6, meta = $7::jsonb, invariants = array(select distinct unnest(invariants || $8::text[]) order by 1) - where id = $1`, + where space = $1`, params, }, }; diff --git a/packages/3-targets/3-targets/postgres/src/exports/statement-builders.ts b/packages/3-targets/3-targets/postgres/src/exports/statement-builders.ts index 1d681730c4..bb18dddc31 100644 --- a/packages/3-targets/3-targets/postgres/src/exports/statement-builders.ts +++ b/packages/3-targets/3-targets/postgres/src/exports/statement-builders.ts @@ -1,5 +1,6 @@ export type { SqlStatement } from '../core/migrations/statement-builders'; export { + APP_SPACE_ID, buildMergeMarkerStatements, ensureLedgerTableStatement, ensureMarkerTableStatement, diff --git a/packages/3-targets/3-targets/postgres/test/migrations/statement-builders.test.ts b/packages/3-targets/3-targets/postgres/test/migrations/statement-builders.test.ts index 0a985f8225..a27aaac0f4 100644 --- a/packages/3-targets/3-targets/postgres/test/migrations/statement-builders.test.ts +++ b/packages/3-targets/3-targets/postgres/test/migrations/statement-builders.test.ts @@ -1,8 +1,53 @@ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import { describe, expect, test } from 'vitest'; -import { ensureMarkerTableStatement } from '../../src/core/migrations/statement-builders'; +import { + buildMergeMarkerStatements, + ensureMarkerTableStatement, +} from '../../src/core/migrations/statement-builders'; describe('ensureMarkerTableStatement', () => { test('declares the invariants column as text[] not null default empty array', () => { expect(ensureMarkerTableStatement.sql).toContain("invariants text[] not null default '{}'"); }); + + test('keys the marker by `space text` (PRIMARY KEY) instead of the legacy single-row `id`', () => { + expect(ensureMarkerTableStatement.sql).toMatch(/space\s+text\s+not null/i); + // PK can be either inline (`space text ... primary key`) or a + // table-level constraint (`primary key (space)`); both forms are + // valid as long as `space` is the only key column. + expect(ensureMarkerTableStatement.sql).toMatch( + /space\s+text\s+not null\s+primary key|primary key\s*\(\s*space\s*\)/i, + ); + }); + + test('does not declare a legacy `id smallint` primary-key column', () => { + expect(ensureMarkerTableStatement.sql).not.toMatch(/id\s+smallint/i); + }); +}); + +describe('buildMergeMarkerStatements', () => { + test('keys the upsert by `space` and binds the caller-supplied app space', () => { + const stmts = buildMergeMarkerStatements({ + space: APP_SPACE_ID, + storageHash: 'sha256:dest', + profileHash: 'sha256:profile', + invariants: [], + }); + expect(stmts.insert.sql).toMatch(/\(\s*space\b/); + expect(stmts.insert.sql).not.toMatch(/\bid\b/); + expect(stmts.update.sql).toMatch(/where space = \$1/i); + expect(stmts.insert.params[0]).toBe(APP_SPACE_ID); + expect(stmts.update.params[0]).toBe(APP_SPACE_ID); + }); + + test('honours a caller-supplied `space` value', () => { + const stmts = buildMergeMarkerStatements({ + space: 'cipherstash', + storageHash: 'sha256:dest', + profileHash: 'sha256:profile', + invariants: [], + }); + expect(stmts.insert.params[0]).toBe('cipherstash'); + expect(stmts.update.params[0]).toBe('cipherstash'); + }); }); diff --git a/packages/3-targets/3-targets/sqlite/src/core/migrations/planner-produced-sqlite-migration.ts b/packages/3-targets/3-targets/sqlite/src/core/migrations/planner-produced-sqlite-migration.ts index 2cf1f03820..5edefe245f 100644 --- a/packages/3-targets/3-targets/sqlite/src/core/migrations/planner-produced-sqlite-migration.ts +++ b/packages/3-targets/3-targets/sqlite/src/core/migrations/planner-produced-sqlite-migration.ts @@ -22,15 +22,18 @@ export class TypeScriptRenderableSqliteMigration readonly #calls: readonly SqliteOpFactoryCall[]; readonly #meta: MigrationMeta; readonly #destination: SqliteMigrationDestinationInfo; + readonly #spaceId: string; constructor( calls: readonly SqliteOpFactoryCall[], meta: MigrationMeta, + spaceId: string, destination?: SqliteMigrationDestinationInfo, ) { super(); this.#calls = calls; this.#meta = meta; + this.#spaceId = spaceId; this.#destination = destination ?? { storageHash: meta.to }; } @@ -46,6 +49,15 @@ export class TypeScriptRenderableSqliteMigration return this.#destination; } + /** + * Contract space this planner-produced plan applies to. Threaded + * from {@link SqlMigrationPlannerPlanOptions.spaceId} so the runner + * keys the marker row by the right space when executing the plan. + */ + get spaceId(): string { + return this.#spaceId; + } + renderTypeScript(): string { return renderCallsToTypeScript(this.#calls, { from: this.#meta.from, diff --git a/packages/3-targets/3-targets/sqlite/src/core/migrations/planner.ts b/packages/3-targets/3-targets/sqlite/src/core/migrations/planner.ts index 9ff37bf8f3..95ac4d32b2 100644 --- a/packages/3-targets/3-targets/sqlite/src/core/migrations/planner.ts +++ b/packages/3-targets/3-targets/sqlite/src/core/migrations/planner.ts @@ -64,15 +64,28 @@ export class SqliteMigrationPlanner */ readonly fromContract: Contract | null; readonly frameworkComponents: ReadonlyArray>; + /** + * Contract space this plan applies to. Stamped onto the produced + * {@link TypeScriptRenderableSqliteMigration.spaceId} so the runner keys + * the marker row by the right space. + */ + readonly spaceId: string; }): SqlitePlanResult { return this.planSql(options as SqlMigrationPlannerPlanOptions); } - emptyMigration(context: MigrationScaffoldContext): TypeScriptRenderableSqliteMigration { - return new TypeScriptRenderableSqliteMigration([], { - from: context.fromHash, - to: context.toHash, - }); + emptyMigration( + context: MigrationScaffoldContext, + spaceId: string, + ): TypeScriptRenderableSqliteMigration { + return new TypeScriptRenderableSqliteMigration( + [], + { + from: context.fromHash, + to: context.toHash, + }, + spaceId, + ); } private planSql(options: SqlMigrationPlannerPlanOptions): SqlitePlanResult { @@ -114,6 +127,7 @@ export class SqliteMigrationPlanner from: options.fromContract?.storage.storageHash ?? null, to: options.contract.storage.storageHash, }, + options.spaceId, destination, ), }; diff --git a/packages/3-targets/3-targets/sqlite/src/core/migrations/runner.ts b/packages/3-targets/3-targets/sqlite/src/core/migrations/runner.ts index 789197010b..fc51fc97f6 100644 --- a/packages/3-targets/3-targets/sqlite/src/core/migrations/runner.ts +++ b/packages/3-targets/3-targets/sqlite/src/core/migrations/runner.ts @@ -24,6 +24,7 @@ import { buildWriteMarkerStatements, ensureLedgerTableStatement, ensureMarkerTableStatement, + MARKER_TABLE_NAME, readMarkerStatement, type SqlStatement, } from './statement-builders'; @@ -69,8 +70,11 @@ class SqliteMigrationRunner implements SqlMigrationRunner['driver'], - ): Promise { + ): Promise> { + // Pre-1.0 zero-range guardrail: detect a pre-cleanup single-row + // marker table (no `space` column) and surface a structured failure + // rather than silently rebuilding the table into the per-space + // shape. See `specs/framework-mechanism.spec.md § 2`. + const legacyDetection = await this.detectLegacyMarkerShape(driver); + if (!legacyDetection.ok) { + return legacyDetection; + } await this.executeStatement(driver, ensureMarkerTableStatement); await this.executeStatement(driver, ensureLedgerTableStatement); + return okVoid(); + } + + private async detectLegacyMarkerShape( + driver: SqlMigrationRunnerExecuteOptions['driver'], + ): Promise> { + const tableInfo = await driver.query<{ name: string }>( + `PRAGMA table_info("${MARKER_TABLE_NAME}")`, + ); + if (tableInfo.rows.length === 0) { + return okVoid(); + } + const columns = new Set(tableInfo.rows.map((row) => row.name)); + if (columns.has('space')) { + return okVoid(); + } + return runnerFailure( + 'LEGACY_MARKER_SHAPE', + `Legacy marker-table shape detected on ${MARKER_TABLE_NAME} (no \`space\` column). ` + + 'Prisma Next is in pre-1.0; the previous transitional auto-migration to the per-space-row schema has been removed. ' + + `Drop \`${MARKER_TABLE_NAME}\` and re-run \`dbInit\` to reinitialise from a clean baseline.`, + { + meta: { + table: MARKER_TABLE_NAME, + columns: [...columns].sort(), + }, + }, + ); } private async readMarker( driver: SqlMigrationRunnerExecuteOptions['driver'], + space: string, ): Promise { - const stmt = readMarkerStatement(); + const stmt = readMarkerStatement(space); try { const result = await driver.query(stmt.sql, stmt.params); const row = result.rows[0]; @@ -527,6 +568,7 @@ class SqliteMigrationRunner implements SqlMigrationRunner = new Set([ LEDGER_TABLE_NAME, ]); +/** + * Schema for `_prisma_marker`. The `space TEXT PRIMARY KEY` shape + * supports one row per loaded contract space (`'app'`, + * `''`, …); brand-new databases create this shape + * directly. The migration runner detects pre-1.0 single-row markers + * (no `space` column) at boot and fails with a structured + * `LEGACY_MARKER_SHAPE` error rather than auto-rebuilding the table — + * see `specs/framework-mechanism.spec.md § 2`. + */ export const ensureMarkerTableStatement: SqlStatement = { sql: `CREATE TABLE IF NOT EXISTS _prisma_marker ( - id INTEGER PRIMARY KEY CHECK (id = 1), + space TEXT NOT NULL PRIMARY KEY DEFAULT '${APP_SPACE_ID}', core_hash TEXT NOT NULL, profile_hash TEXT NOT NULL, contract_json TEXT, @@ -45,7 +58,7 @@ export const ensureLedgerTableStatement: SqlStatement = { params: [], }; -export function readMarkerStatement(): SqlStatement { +export function readMarkerStatement(space: string): SqlStatement { return { sql: `SELECT core_hash, @@ -57,12 +70,21 @@ export function readMarkerStatement(): SqlStatement { meta, invariants FROM _prisma_marker - WHERE id = ?`, - params: [1], + WHERE space = ?`, + params: [space], }; } export interface WriteMarkerInput { + /** + * Logical space identifier for this marker row. Required at every + * call site so the type system surfaces every place that needs to + * thread the value (rather than letting an `?? APP_SPACE_ID` + * fall-through silently collapse multi-space markers onto the + * `'app'` row). App-plan callers pass {@link APP_SPACE_ID} + * (`'app'`); per-extension callers pass the extension's space id. + */ + readonly space: string; readonly storageHash: string; readonly profileHash: string; readonly contractJson?: unknown; @@ -84,7 +106,7 @@ export function buildWriteMarkerStatements(input: WriteMarkerInput): { readonly update: SqlStatement; } { const params: readonly unknown[] = [ - 1, + input.space, input.storageHash, input.profileHash, jsonParam(input.contractJson), @@ -97,7 +119,7 @@ export function buildWriteMarkerStatements(input: WriteMarkerInput): { return { insert: { sql: `INSERT INTO _prisma_marker ( - id, + space, core_hash, profile_hash, contract_json, @@ -129,7 +151,7 @@ export function buildWriteMarkerStatements(input: WriteMarkerInput): { app_tag = ?, meta = ?, invariants = ? - WHERE id = ?`, + WHERE space = ?`, params: [ input.storageHash, input.profileHash, @@ -138,7 +160,7 @@ export function buildWriteMarkerStatements(input: WriteMarkerInput): { input.appTag ?? null, jsonParam(input.meta ?? {}), jsonParam(input.invariants), - 1, + input.space, ], }, }; diff --git a/packages/3-targets/3-targets/sqlite/src/exports/statement-builders.ts b/packages/3-targets/3-targets/sqlite/src/exports/statement-builders.ts index 8d5b994707..ded6d9ac77 100644 --- a/packages/3-targets/3-targets/sqlite/src/exports/statement-builders.ts +++ b/packages/3-targets/3-targets/sqlite/src/exports/statement-builders.ts @@ -1,4 +1,5 @@ export { + APP_SPACE_ID, buildLedgerInsertStatement, buildWriteMarkerStatements, CONTROL_TABLE_NAMES, diff --git a/packages/3-targets/3-targets/sqlite/test/migrations/nullability-backfill.test.ts b/packages/3-targets/3-targets/sqlite/test/migrations/nullability-backfill.test.ts index 70e2a53baf..257485f109 100644 --- a/packages/3-targets/3-targets/sqlite/test/migrations/nullability-backfill.test.ts +++ b/packages/3-targets/3-targets/sqlite/test/migrations/nullability-backfill.test.ts @@ -1,4 +1,5 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageColumn, StorageTable } from '@prisma-next/sql-contract/types'; import type { SqlTableIR } from '@prisma-next/sql-schema-ir/types'; import { describe, expect, it } from 'vitest'; @@ -91,6 +92,7 @@ describe('nullability-tightening backfill', () => { policy: { allowedOperationClasses: ['additive', 'destructive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -109,6 +111,7 @@ describe('nullability-tightening backfill', () => { policy: { allowedOperationClasses: ['additive', 'destructive', 'data'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -173,6 +176,7 @@ describe('nullability-tightening backfill', () => { policy: { allowedOperationClasses: ['additive', 'widening', 'data'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); diff --git a/packages/3-targets/3-targets/sqlite/test/migrations/planner.authoring-surface.test.ts b/packages/3-targets/3-targets/sqlite/test/migrations/planner.authoring-surface.test.ts index 4e145922c0..941a60be24 100644 --- a/packages/3-targets/3-targets/sqlite/test/migrations/planner.authoring-surface.test.ts +++ b/packages/3-targets/3-targets/sqlite/test/migrations/planner.authoring-surface.test.ts @@ -1,4 +1,5 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { describe, expect, it } from 'vitest'; @@ -54,6 +55,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: fromContractWithHash('sha256:from'), frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -71,6 +73,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') throw new Error('expected success'); @@ -87,6 +90,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') throw new Error('expected success'); @@ -102,6 +106,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') throw new Error('expected success'); @@ -119,6 +124,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') throw new Error('expected success'); @@ -136,6 +142,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: fromContractWithHash('sha256:from'), frameworkComponents: [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') throw new Error('expected success'); @@ -152,11 +159,14 @@ describe('SqliteMigrationPlanner authoring surface', () => { describe('emptyMigration(context)', () => { it("identifies as the 'sqlite' target with no operations and the supplied destination hash", () => { const planner = createSqliteMigrationPlanner(); - const empty = planner.emptyMigration({ - packageDir: '/tmp/migration-pkg', - fromHash: null, - toHash: 'sha256:to', - }); + const empty = planner.emptyMigration( + { + packageDir: '/tmp/migration-pkg', + fromHash: null, + toHash: 'sha256:to', + }, + APP_SPACE_ID, + ); expect(empty.targetId).toBe('sqlite'); expect(empty.operations).toEqual([]); @@ -165,11 +175,14 @@ describe('SqliteMigrationPlanner authoring surface', () => { it('renders a stub whose describe() carries from/to and whose operations list is empty', () => { const planner = createSqliteMigrationPlanner(); - const empty = planner.emptyMigration({ - packageDir: '/tmp/migration-pkg', - fromHash: 'sha256:from', - toHash: 'sha256:to', - }); + const empty = planner.emptyMigration( + { + packageDir: '/tmp/migration-pkg', + fromHash: 'sha256:from', + toHash: 'sha256:to', + }, + APP_SPACE_ID, + ); const source = empty.renderTypeScript(); expect(source).toContain("from '@prisma-next/target-sqlite/migration'"); @@ -189,6 +202,7 @@ describe('SqliteMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['widening', 'destructive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('failure'); diff --git a/packages/3-targets/6-adapters/postgres/src/core/adapter.ts b/packages/3-targets/6-adapters/postgres/src/core/adapter.ts index 77931295dd..d1b302b323 100644 --- a/packages/3-targets/6-adapters/postgres/src/core/adapter.ts +++ b/packages/3-targets/6-adapters/postgres/src/core/adapter.ts @@ -1,4 +1,5 @@ import type { CodecLookup } from '@prisma-next/framework-components/codec'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { Adapter, AdapterProfile, @@ -42,8 +43,8 @@ class PostgresAdapterImpl target: 'postgres', capabilities: defaultCapabilities, readMarkerStatement: () => ({ - sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where id = $1', - params: [1], + sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from prisma_contract.marker where space = $1', + params: [APP_SPACE_ID], }), // Postgres' driver hydrates `text[]` columns as native JS arrays, so the row is already in the shape the shared parser expects. parseMarkerRow: (row: unknown) => parseContractMarkerRow(row), diff --git a/packages/3-targets/6-adapters/postgres/src/core/control-adapter.ts b/packages/3-targets/6-adapters/postgres/src/core/control-adapter.ts index daa3fb8dc0..bd02fb9f6f 100644 --- a/packages/3-targets/6-adapters/postgres/src/core/control-adapter.ts +++ b/packages/3-targets/6-adapters/postgres/src/core/control-adapter.ts @@ -83,6 +83,7 @@ export class PostgresControlAdapter implements SqlControlAdapter<'postgres'> { */ async readMarker( driver: ControlDriverInstance<'sql', 'postgres'>, + space: string, ): Promise { const exists = await driver.query( `select 1 @@ -114,8 +115,8 @@ export class PostgresControlAdapter implements SqlControlAdapter<'postgres'> { meta, invariants from prisma_contract.marker - where id = $1`, - [1], + where space = $1`, + [space], ); const row = result.rows[0]; diff --git a/packages/3-targets/6-adapters/postgres/test/adapter.test.ts b/packages/3-targets/6-adapters/postgres/test/adapter.test.ts index b0be7be6b5..91b0d623ef 100644 --- a/packages/3-targets/6-adapters/postgres/test/adapter.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/adapter.test.ts @@ -295,7 +295,8 @@ describe('Postgres adapter', () => { const marker = adapter.profile.readMarkerStatement(); expect(marker.sql).toContain('from prisma_contract.marker'); - expect(marker.params).toEqual([1]); + expect(marker.sql).toMatch(/where space = \$1/i); + expect(marker.params).toEqual(['app']); }); it('honours an overridden profile id from PostgresAdapterOptions', () => { diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/fixtures/runner-fixtures.ts b/packages/3-targets/6-adapters/postgres/test/migrations/fixtures/runner-fixtures.ts index 2923576c2c..72319c238c 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/fixtures/runner-fixtures.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/fixtures/runner-fixtures.ts @@ -4,7 +4,7 @@ import sqlFamilyDescriptor, { createMigrationPlan, type SqlMigrationRunnerFailure, } from '@prisma-next/family-sql/control'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { APP_SPACE_ID, createControlStack } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import postgresTargetDescriptor from '@prisma-next/target-postgres/control'; @@ -73,6 +73,7 @@ export async function resetDatabase(driver: PostgresControlDriver): Promise({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/op-factory-call.lowering.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/op-factory-call.lowering.test.ts index b510da9d28..3376e2d98a 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/op-factory-call.lowering.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/op-factory-call.lowering.test.ts @@ -14,6 +14,7 @@ * aggregator are covered in op-factory-call.rendering.test.ts. */ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import { AddColumnCall, AddEnumValuesCall, @@ -248,7 +249,7 @@ describe('renderOps', () => { describe('TypeScriptRenderablePostgresMigration', () => { it('identifies as postgres, derives destination from meta.to, and materializes operations via renderOps', () => { const calls = [new DropTableCall('public', 'stale')]; - const migration = new TypeScriptRenderablePostgresMigration(calls, META); + const migration = new TypeScriptRenderablePostgresMigration(calls, META, APP_SPACE_ID); expect(migration.targetId).toBe('postgres'); expect(migration.destination).toEqual({ storageHash: 'sha256:to' }); @@ -261,7 +262,7 @@ describe('TypeScriptRenderablePostgresMigration', () => { it('renders TypeScript source mirroring renderCallsToTypeScript output', () => { const calls = [new DropTableCall('public', 'stale')]; - const migration = new TypeScriptRenderablePostgresMigration(calls, META); + const migration = new TypeScriptRenderablePostgresMigration(calls, META, APP_SPACE_ID); const source = migration.renderTypeScript(); expect(source).toContain( diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.authoring-surface.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.authoring-surface.test.ts index f8ec055d36..1826105e46 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.authoring-surface.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.authoring-surface.test.ts @@ -4,9 +4,10 @@ import { extractCodecControlHooks, type NativeTypeExpander, } from '@prisma-next/family-sql/control'; -import type { - MigrationPlanner, - MigrationPlannerSuccessResult, +import { + APP_SPACE_ID, + type MigrationPlanner, + type MigrationPlannerSuccessResult, } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import postgresTargetDescriptor, { @@ -67,6 +68,7 @@ describe('PostgresMigrationPlanner authoring surface', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') { @@ -87,11 +89,14 @@ describe('PostgresMigrationPlanner authoring surface', () => { describe('emptyMigration(context)', () => { it("identifies as the 'postgres' target with no operations and the supplied destination hash", () => { const planner = makeFrameworkPlanner(); - const empty = planner.emptyMigration({ - packageDir: '/tmp/migration-pkg', - fromHash: null, - toHash: 'sha256:to', - }); + const empty = planner.emptyMigration( + { + packageDir: '/tmp/migration-pkg', + fromHash: null, + toHash: 'sha256:to', + }, + APP_SPACE_ID, + ); expect(empty.targetId).toBe('postgres'); expect(empty.operations).toEqual([]); @@ -100,11 +105,14 @@ describe('PostgresMigrationPlanner authoring surface', () => { it('renders a stub whose describe() carries from/to and whose operations list is empty', () => { const planner = makeFrameworkPlanner(); - const empty = planner.emptyMigration({ - packageDir: '/tmp/migration-pkg', - fromHash: 'sha256:from', - toHash: 'sha256:to', - }); + const empty = planner.emptyMigration( + { + packageDir: '/tmp/migration-pkg', + fromHash: 'sha256:from', + toHash: 'sha256:to', + }, + APP_SPACE_ID, + ); const source = empty.renderTypeScript(); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.behavior.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.behavior.test.ts index b92fbef308..5e5798610a 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.behavior.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.behavior.test.ts @@ -7,6 +7,7 @@ import { import pgvectorDescriptor from '@prisma-next/extension-pgvector/control'; import { type CodecControlHooks, INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageTable } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -42,6 +43,7 @@ describe('PostgresMigrationPlanner - subset/superset/conflict handling', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result).toMatchObject({ @@ -73,6 +75,7 @@ describe('PostgresMigrationPlanner - subset/superset/conflict handling', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -113,6 +116,7 @@ describe('PostgresMigrationPlanner - subset/superset/conflict handling', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result).toMatchObject({ @@ -671,6 +675,7 @@ function planUserTableOperations( policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: options?.frameworkComponents ?? [], + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') throw new Error('expected planner success'); return result.plan.operations; diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.case1.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.case1.test.ts index 9884bc9ef7..5560269a17 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.case1.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.case1.test.ts @@ -5,6 +5,7 @@ import type { SqlControlExtensionDescriptor, } from '@prisma-next/family-sql/control'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageColumn } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -99,6 +100,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -167,6 +169,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { }, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -209,6 +212,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -295,6 +299,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [postgresAdapterDescriptor], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -352,6 +357,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [pgvectorDescriptor], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -379,6 +385,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -409,6 +416,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [extensionWithoutDeps], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -452,6 +460,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -514,6 +523,7 @@ describe('PostgresMigrationPlanner - when database is empty', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -568,6 +578,7 @@ describe('PostgresMigrationPlanner - composite unique constraint DDL', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -634,6 +645,7 @@ describe('PostgresMigrationPlanner - column defaults', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); if (result.kind !== 'success') throw new Error(`Expected success: ${JSON.stringify(result)}`); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.contract-to-schema-ir.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.contract-to-schema-ir.test.ts index 9400b96af0..f5cdd7eed5 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.contract-to-schema-ir.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.contract-to-schema-ir.test.ts @@ -12,6 +12,7 @@ import { extractCodecControlHooks, } from '@prisma-next/family-sql/control'; import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageColumn, StorageTable } from '@prisma-next/sql-contract/types'; import { postgresRenderDefault } from '@prisma-next/target-postgres/control'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -87,6 +88,7 @@ function planFromStorages( policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); } @@ -122,6 +124,7 @@ describe('contractToSchemaIR → planner round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -160,6 +163,7 @@ describe('contractToSchemaIR → planner round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -224,6 +228,7 @@ describe('contractToSchemaIR → planner round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -278,6 +283,7 @@ describe('contractToSchemaIR → planner round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -848,6 +854,7 @@ describe('incremental migration with full contract surface (extensions, enums, F policy: { allowedOperationClasses: ['additive', 'widening', 'destructive'] }, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -877,6 +884,7 @@ describe('incremental migration with full contract surface (extensions, enums, F policy: { allowedOperationClasses: ['additive', 'widening', 'destructive'] }, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -901,6 +909,7 @@ describe('incremental migration with full contract surface (extensions, enums, F policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.fk-config.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.fk-config.test.ts index 6407f812cf..83d0beb7ab 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.fk-config.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.fk-config.test.ts @@ -1,5 +1,6 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -71,6 +72,7 @@ describe('PostgresMigrationPlanner - per-FK config combinations', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -89,6 +91,7 @@ describe('PostgresMigrationPlanner - per-FK config combinations', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -108,6 +111,7 @@ describe('PostgresMigrationPlanner - per-FK config combinations', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -126,6 +130,7 @@ describe('PostgresMigrationPlanner - per-FK config combinations', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -150,6 +155,7 @@ describe('PostgresMigrationPlanner - per-FK config combinations', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -174,6 +180,7 @@ describe('PostgresMigrationPlanner - per-FK config combinations', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.integration.test.ts index c1a8274f69..0d441d9745 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.integration.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; import { @@ -51,6 +52,7 @@ describe.sequential('PostgresMigrationPlanner - integration (existing schemas)', policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (initialPlan.kind !== 'success') { throw new Error('expected initial plan success'); @@ -76,6 +78,7 @@ describe.sequential('PostgresMigrationPlanner - integration (existing schemas)', policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(supersetResult).toMatchObject({ kind: 'success', @@ -95,6 +98,7 @@ describe.sequential('PostgresMigrationPlanner - integration (existing schemas)', policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(subsetResult.kind).toBe('success'); @@ -127,6 +131,7 @@ describe.sequential('PostgresMigrationPlanner - integration (existing schemas)', policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(planResult.kind).toBe('success'); @@ -163,6 +168,7 @@ describe.sequential('PostgresMigrationPlanner - integration (existing schemas)', policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(conflictResult).toMatchObject({ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.integration.test.ts index a8ea3493a1..a0ea208a37 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.integration.test.ts @@ -1,6 +1,9 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; -import type { MigrationOperationPolicy } from '@prisma-next/framework-components/control'; +import { + APP_SPACE_ID, + type MigrationOperationPolicy, +} from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageTable } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; @@ -60,6 +63,7 @@ async function applyBaseline( policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') { throw new Error(`baseline planner failed: ${JSON.stringify(result)}`); @@ -92,6 +96,7 @@ async function planAndExecute( policy: RECONCILIATION_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (planResult.kind !== 'success') { throw new Error(`planner failed: ${JSON.stringify(planResult, null, 2)}`); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.test.ts index 2e3ed7cdc1..f7f3f1fda6 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.reconciliation.test.ts @@ -1,5 +1,8 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; -import type { MigrationOperationPolicy } from '@prisma-next/framework-components/control'; +import { + APP_SPACE_ID, + type MigrationOperationPolicy, +} from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -54,6 +57,7 @@ describe('PostgresMigrationPlanner - reconciliation planning', () => { policy: RECONCILIATION_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -107,6 +111,7 @@ describe('PostgresMigrationPlanner - reconciliation planning', () => { policy: WIDENING_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -159,6 +164,7 @@ describe('PostgresMigrationPlanner - reconciliation planning', () => { policy: WIDENING_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result).toMatchObject({ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.referential-actions.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.referential-actions.test.ts index 1caa79140a..7885a24a9b 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.referential-actions.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.referential-actions.test.ts @@ -1,5 +1,6 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { ForeignKey, ReferentialAction, SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -68,6 +69,7 @@ function planAndGetFkSql(onDelete?: ReferentialAction, onUpdate?: ReferentialAct policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.semantic-satisfaction.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.semantic-satisfaction.test.ts index 80aadeff17..9631c09492 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.semantic-satisfaction.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.semantic-satisfaction.test.ts @@ -8,6 +8,7 @@ */ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -60,6 +61,7 @@ describe('PostgresMigrationPlanner - semantic satisfaction', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -115,6 +117,7 @@ describe('PostgresMigrationPlanner - semantic satisfaction', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -168,6 +171,7 @@ describe('PostgresMigrationPlanner - semantic satisfaction', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -223,6 +227,7 @@ describe('PostgresMigrationPlanner - semantic satisfaction', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); // Should succeed (no conflicts) and emit no operations (semantic match) diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.integration.test.ts index 53e5dee7a8..4a45efa045 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.integration.test.ts @@ -1,5 +1,6 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { expectNarrowedType } from '@prisma-next/test-utils/typed-expectations'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; @@ -86,6 +87,7 @@ describe.sequential('PostgresMigrationPlanner - Storage Types Integration', () = policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expectNarrowedType(planResult.kind === 'success'); @@ -159,6 +161,7 @@ describe.sequential('PostgresMigrationPlanner - Storage Types Integration', () = policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expectNarrowedType(planResult.kind === 'success'); @@ -189,6 +192,7 @@ describe.sequential('PostgresMigrationPlanner - Storage Types Integration', () = policy: { allowedOperationClasses: ['additive', 'widening'] }, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expectNarrowedType(planResult.kind === 'success'); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.test.ts index 15f3caa78b..6ab95b6daa 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/planner.storage-types.test.ts @@ -3,6 +3,7 @@ import pgvectorDescriptor from '@prisma-next/extension-pgvector/control'; import type { CodecControlHooks } from '@prisma-next/family-sql/control'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import { createPostgresMigrationPlanner } from '@prisma-next/target-postgres/planner'; @@ -94,6 +95,7 @@ describe('PostgresMigrationPlanner - storage types', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expectNarrowedType(result.kind === 'success'); @@ -163,6 +165,7 @@ describe('PostgresMigrationPlanner - storage types', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result).toMatchObject({ @@ -256,6 +259,7 @@ describe('PostgresMigrationPlanner - storage types', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expectNarrowedType(result.kind === 'success'); @@ -316,6 +320,7 @@ describe('PostgresMigrationPlanner - storage types', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [pgvectorDescriptor], + spaceId: APP_SPACE_ID, }); expectNarrowedType(result.kind === 'success'); @@ -375,6 +380,7 @@ describe('PostgresMigrationPlanner - storage types', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }), ).toThrow( 'Column declares typeParams for nativeType "vector" but no expandNativeType hook is registered for codecId "pg/vector@1".', diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/render-typescript.roundtrip.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/render-typescript.roundtrip.test.ts index 92f3198104..ae2ccdcb51 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/render-typescript.roundtrip.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/render-typescript.roundtrip.test.ts @@ -16,6 +16,7 @@ import { mkdtemp, readFile, rm, writeFile } from 'node:fs/promises'; import { tmpdir } from 'node:os'; import { pathToFileURL } from 'node:url'; import { promisify } from 'node:util'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import { AddColumnCall, CreateExtensionCall, @@ -133,7 +134,7 @@ describe('TypeScriptRenderablePostgresMigration round-trip', () => { new CreateIndexCall('public', 'user', 'user_email_idx', ['email']), new DropTableCall('public', 'stale'), ]; - const migration = new TypeScriptRenderablePostgresMigration(calls, META); + const migration = new TypeScriptRenderablePostgresMigration(calls, META, APP_SPACE_ID); const tsSource = rewriteImports(migration.renderTypeScript()); await writeFile(join(tmpDir, 'migration.ts'), tsSource); @@ -156,7 +157,7 @@ describe('TypeScriptRenderablePostgresMigration round-trip', () => { 'renders an empty calls list whose executed scaffold emits []', { timeout: timeouts.typeScriptCompilation }, async () => { - const migration = new TypeScriptRenderablePostgresMigration([], META); + const migration = new TypeScriptRenderablePostgresMigration([], META, APP_SPACE_ID); const tsSource = rewriteImports(migration.renderTypeScript()); await writeFile(join(tmpDir, 'migration.ts'), tsSource); @@ -186,7 +187,7 @@ describe('TypeScriptRenderablePostgresMigration round-trip', () => { meta: { note: 'preserved' }, }; const calls = [new RawSqlCall(op)]; - const migration = new TypeScriptRenderablePostgresMigration(calls, META); + const migration = new TypeScriptRenderablePostgresMigration(calls, META, APP_SPACE_ID); const tsSource = rewriteImports(migration.renderTypeScript()); await writeFile(join(tmpDir, 'migration.ts'), tsSource); diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/runner.basic.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/runner.basic.integration.test.ts index 1e5ea43085..22b057b3a2 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/runner.basic.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/runner.basic.integration.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { PostgresPlanTargetDetails } from '@prisma-next/target-postgres/planner-target-details'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; import { @@ -55,6 +56,7 @@ describe.sequential('PostgresMigrationRunner - Basic Execution', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); if (result.kind !== 'success') { @@ -84,7 +86,7 @@ describe.sequential('PostgresMigrationRunner - Basic Execution', () => { const markerRow = await driver!.query<{ core_hash: string; profile_hash: string; - }>('select core_hash, profile_hash from prisma_contract.marker where id = $1', [1]); + }>('select core_hash, profile_hash from prisma_contract.marker where space = $1', ['app']); expect(markerRow.rows[0]).toMatchObject({ core_hash: contract.storage.storageHash, profile_hash: contract.profileHash, @@ -115,6 +117,7 @@ describe.sequential('PostgresMigrationRunner - Basic Execution', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (initialPlan.kind !== 'success') { throw new Error('expected initial planner success'); @@ -129,6 +132,7 @@ describe.sequential('PostgresMigrationRunner - Basic Execution', () => { const emptyPlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [], @@ -151,8 +155,8 @@ describe.sequential('PostgresMigrationRunner - Basic Execution', () => { } const markerCount = await driver!.query<{ count: string }>( - 'select count(*)::text as count from prisma_contract.marker where id = $1', - [1], + 'select count(*)::text as count from prisma_contract.marker where space = $1', + ['app'], ); expect(markerCount.rows[0]?.count).toBe('1'); const ledgerCount = await driver!.query<{ count: string }>( diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/runner.errors.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/runner.errors.integration.test.ts index 77f519b3bc..5da8d173f6 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/runner.errors.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/runner.errors.integration.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { PostgresPlanTargetDetails } from '@prisma-next/target-postgres/planner-target-details'; import { buildMergeMarkerStatements, @@ -59,6 +60,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { const emptyPlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [], @@ -108,6 +110,72 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { ); }); + describe('when a legacy single-row marker table exists (pre-1.0 transitional shape)', () => { + it( + 'fails with LEGACY_MARKER_SHAPE error and points the operator at re-running dbInit', + { timeout: testTimeout }, + async () => { + // Reproduce the pre-cleanup shape that `migrateMarkerSchemaStatements` + // used to auto-promote: `id smallint primary key` with no `space` + // column. The detection step at boot must surface this rather than + // silently rebuilding the table. + await executeStatement(driver!, ensurePrismaContractSchemaStatement); + await driver!.query(`create table prisma_contract.marker ( + id smallint primary key default 1, + core_hash text not null, + profile_hash text not null, + contract_json jsonb, + canonical_version int, + updated_at timestamptz not null default now(), + app_tag text, + meta jsonb not null default '{}', + invariants text[] not null default '{}' + )`); + + const runner = postgresTargetDescriptor.createRunner(familyInstance); + const emptyPlan = createMigrationPlan({ + targetId: 'postgres', + spaceId: APP_SPACE_ID, + origin: null, + destination: toPlanContractInfo(contract), + operations: [], + providedInvariants: [], + }); + + const result = await runner.execute({ + plan: emptyPlan, + driver: driver!, + destinationContract: contract, + policy: INIT_ADDITIVE_POLICY, + frameworkComponents, + }); + + expect(result.ok).toBe(false); + const failure = result.assertNotOk(); + expect(failure.code).toBe('LEGACY_MARKER_SHAPE'); + expect(failure.summary).toMatch(/legacy marker-table shape/i); + expect(failure.summary).toMatch(/dbInit/); + expect(failure.summary).toMatch(/prisma_contract\.marker/); + expect(failure.meta).toMatchObject({ table: 'prisma_contract.marker' }); + + // The legacy table is left untouched — operator dropping it is the + // explicit remediation; the runner doesn't mutate state on failure. + const pkColumns = await driver!.query<{ column_name: string }>( + `select kcu.column_name + from information_schema.table_constraints tc + join information_schema.key_column_usage kcu + on tc.constraint_name = kcu.constraint_name + and tc.table_schema = kcu.table_schema + and tc.table_name = kcu.table_name + where tc.table_schema = 'prisma_contract' + and tc.table_name = 'marker' + and tc.constraint_type = 'PRIMARY KEY'`, + ); + expect(pkColumns.rows.map((r) => r.column_name)).toEqual(['id']); + }, + ); + }); + describe('when an existing marker does not match the origin contract', () => { it( 'fails with MARKER_ORIGIN_MISMATCH error and does not modify marker or append ledger', @@ -118,6 +186,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { await executeStatement(driver!, ensureLedgerTableStatement); const mismatchedMarker = buildMergeMarkerStatements({ + space: APP_SPACE_ID, storageHash: 'sha256:other-contract', profileHash: 'sha256:other-profile', contractJson: { storageHash: 'sha256:other-contract' }, @@ -130,6 +199,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { const runner = postgresTargetDescriptor.createRunner(familyInstance); const emptyPlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: { storageHash: 'sha256:expected-origin', profileHash: 'sha256:expected-profile', @@ -153,8 +223,8 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { expect(failure.summary).toMatch(/does not match plan origin/i); const markerRow = await driver!.query<{ core_hash: string; profile_hash: string }>( - 'select core_hash, profile_hash from prisma_contract.marker where id = $1', - [1], + 'select core_hash, profile_hash from prisma_contract.marker where space = $1', + ['app'], ); expect(markerRow.rows[0]).toMatchObject({ core_hash: 'sha256:other-contract', @@ -183,6 +253,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { const invalidPlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -244,6 +315,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { const planWithFailingPostcheck = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -314,6 +386,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { // Create a plan with SQL that will fail (syntax error) const planWithInvalidSql = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -394,6 +467,7 @@ describe.sequential('PostgresMigrationRunner - Error Scenarios', () => { // Create a plan that tries to insert duplicate email (will fail with constraint violation) const planWithConstraintViolation = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/runner.execution-checks.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/runner.execution-checks.integration.test.ts index 57ceeb68f0..cdf993bb51 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/runner.execution-checks.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/runner.execution-checks.integration.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { PostgresPlanTargetDetails } from '@prisma-next/target-postgres/planner-target-details'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; import { @@ -49,6 +50,7 @@ describe.sequential('PostgresMigrationRunner - Execution Checks', () => { const runner = postgresTargetDescriptor.createRunner(familyInstance); const planWithFailingChecks = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -139,6 +141,7 @@ describe.sequential('PostgresMigrationRunner - Execution Checks', () => { const runner = postgresTargetDescriptor.createRunner(familyInstance); const planWithPreSatisfiedPostcheck = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/runner.idempotency.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/runner.idempotency.integration.test.ts index 3cdd64850d..551ce2e2cd 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/runner.idempotency.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/runner.idempotency.integration.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { PostgresPlanTargetDetails } from '@prisma-next/target-postgres/planner-target-details'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; import { @@ -55,6 +56,7 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { const runner = postgresTargetDescriptor.createRunner(familyInstance); const planWithPreSatisfiedPostcheck = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -110,8 +112,8 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { } const markerCount = await driver!.query<{ count: string }>( - 'select count(*)::text as count from prisma_contract.marker where id = $1', - [1], + 'select count(*)::text as count from prisma_contract.marker where space = $1', + ['app'], ); expect(markerCount.rows[0]?.count).toBe('1'); @@ -177,6 +179,7 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { const planWithPreSatisfiedPostcheck = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [mutableOperation], @@ -246,6 +249,7 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (initialPlan.kind !== 'success') { throw new Error('expected initial planner success'); @@ -263,13 +267,14 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { 'select count(*)::text as count from prisma_contract.ledger', ); const initialUpdatedAt = await driver!.query<{ updated_at: Date }>( - 'select updated_at from prisma_contract.marker where id = 1', + `select updated_at from prisma_contract.marker where space = 'app'`, ); // Self-edge plan with no operations and no new invariants. This is a // true no-op: nothing should be written. const noOpSelfEdgePlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: toPlanContractInfo(contract), destination: toPlanContractInfo(contract), operations: [], @@ -299,7 +304,7 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { // Marker updated_at unchanged: no churn from the no-op. const updatedAtAfter = await driver!.query<{ updated_at: Date }>( - 'select updated_at from prisma_contract.marker where id = 1', + `select updated_at from prisma_contract.marker where space = 'app'`, ); expect(updatedAtAfter.rows[0]?.updated_at?.toISOString()).toBe( initialUpdatedAt.rows[0]?.updated_at?.toISOString(), @@ -320,6 +325,7 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (initialPlan.kind !== 'success') { throw new Error('expected initial planner success'); @@ -339,6 +345,7 @@ describe.sequential('PostgresMigrationRunner - Idempotency', () => { const selfEdgePlan = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: toPlanContractInfo(contract), destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/runner.policy.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/runner.policy.integration.test.ts index b61075ce55..4f5624fcaa 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/runner.policy.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/runner.policy.integration.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { PostgresPlanTargetDetails } from '@prisma-next/target-postgres/planner-target-details'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; import { expectNoMarkerOrLedgerWrites } from '../utils/dbAssertions'; @@ -51,6 +52,7 @@ describe.sequential('PostgresMigrationRunner - Policy Violations', () => { const planWithPolicyViolation = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -112,6 +114,7 @@ describe.sequential('PostgresMigrationRunner - Policy Violations', () => { // so the SQL bodies are arbitrary. const planWithDataOp = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), providedInvariants: [], @@ -183,6 +186,7 @@ describe.sequential('PostgresMigrationRunner - Policy Violations', () => { // Same operation structure as above, but now with a permissive policy const planWithDestructiveOp = createMigrationPlan({ targetId: 'postgres', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/postgres/test/migrations/schema-verify.after-runner.integration.test.ts b/packages/3-targets/6-adapters/postgres/test/migrations/schema-verify.after-runner.integration.test.ts index e3fc221f8b..1b2ea5a444 100644 --- a/packages/3-targets/6-adapters/postgres/test/migrations/schema-verify.after-runner.integration.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/migrations/schema-verify.after-runner.integration.test.ts @@ -1,5 +1,6 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest'; import { @@ -72,6 +73,7 @@ describe.sequential('Schema verification after runner - integration', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (result.kind !== 'success') { diff --git a/packages/3-targets/6-adapters/sqlite/src/core/adapter.ts b/packages/3-targets/6-adapters/sqlite/src/core/adapter.ts index 3c9e3e5d3c..df23cce71a 100644 --- a/packages/3-targets/6-adapters/sqlite/src/core/adapter.ts +++ b/packages/3-targets/6-adapters/sqlite/src/core/adapter.ts @@ -1,3 +1,4 @@ +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { Adapter, AdapterProfile, @@ -52,8 +53,8 @@ class SqliteAdapterImpl implements Adapter ({ - sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from _prisma_marker where id = ?', - params: [1], + sql: 'select core_hash, profile_hash, contract_json, canonical_version, updated_at, app_tag, meta, invariants from _prisma_marker where space = ?', + params: [APP_SPACE_ID], }), // SQLite stores arrays as JSON-encoded TEXT (no native array type), so the driver returns `invariants` as a string. Decode before delegating to the shared row schema, which expects `string[]`. parseMarkerRow: (row: unknown) => { diff --git a/packages/3-targets/6-adapters/sqlite/src/core/control-adapter.ts b/packages/3-targets/6-adapters/sqlite/src/core/control-adapter.ts index b01dd9b63c..2a5a8e5865 100644 --- a/packages/3-targets/6-adapters/sqlite/src/core/control-adapter.ts +++ b/packages/3-targets/6-adapters/sqlite/src/core/control-adapter.ts @@ -91,6 +91,7 @@ export class SqliteControlAdapter implements SqlControlAdapter<'sqlite'> { */ async readMarker( driver: ControlDriverInstance<'sql', 'sqlite'>, + space: string, ): Promise { const exists = await driver.query( `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ?`, @@ -120,8 +121,8 @@ export class SqliteControlAdapter implements SqlControlAdapter<'sqlite'> { meta, invariants FROM _prisma_marker - WHERE id = ?`, - [1], + WHERE space = ?`, + [space], ); const row = result.rows[0]; diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/fixtures/runner-fixtures.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/fixtures/runner-fixtures.ts index 4770f808ca..e9b776505a 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/fixtures/runner-fixtures.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/fixtures/runner-fixtures.ts @@ -8,7 +8,7 @@ import sqlFamilyDescriptor, { createMigrationPlan, type SqlMigrationRunnerFailure, } from '@prisma-next/family-sql/control'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { APP_SPACE_ID, createControlStack } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import sqliteTargetDescriptor from '@prisma-next/target-sqlite/control'; @@ -116,6 +116,7 @@ export function createTestDatabase(): TestDatabase { export function createFailingPlan() { return createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/planner-introspection.integration.test.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/planner-introspection.integration.test.ts index 57f8f13190..3a4a465887 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/planner-introspection.integration.test.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/planner-introspection.integration.test.ts @@ -9,6 +9,7 @@ import { DatabaseSync } from 'node:sqlite'; import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageColumn, StorageTable } from '@prisma-next/sql-contract/types'; import { createSqliteMigrationPlanner } from '@prisma-next/target-sqlite/planner'; import { describe, expect, it } from 'vitest'; @@ -114,6 +115,7 @@ describe('SQLite planner + introspection round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -172,6 +174,7 @@ describe('SQLite planner + introspection round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -228,6 +231,7 @@ describe('SQLite planner + introspection round-trip', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/planner.test.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/planner.test.ts index 307207127f..c17381bda3 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/planner.test.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/planner.test.ts @@ -1,4 +1,5 @@ import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageColumn, StorageTable } from '@prisma-next/sql-contract/types'; import { createSqliteMigrationPlanner } from '@prisma-next/target-sqlite/planner'; import { describe, expect, it } from 'vitest'; @@ -61,6 +62,7 @@ describe('SQLite migration planner', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -108,6 +110,7 @@ describe('SQLite migration planner', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -136,6 +139,7 @@ describe('SQLite migration planner', () => { policy: { allowedOperationClasses: ['additive'] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); @@ -154,6 +158,7 @@ describe('SQLite migration planner', () => { policy: { allowedOperationClasses: [] }, fromContract: null, frameworkComponents: [], + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('failure'); }); diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/render-typescript.roundtrip.test.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/render-typescript.roundtrip.test.ts index 47a4df45cf..bd23ece484 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/render-typescript.roundtrip.test.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/render-typescript.roundtrip.test.ts @@ -14,6 +14,7 @@ import { mkdtemp, readFile, rm, writeFile } from 'node:fs/promises'; import { tmpdir } from 'node:os'; import { pathToFileURL } from 'node:url'; import { promisify } from 'node:util'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import { AddColumnCall, CreateIndexCall, @@ -139,7 +140,7 @@ describe('TypeScriptRenderableSqliteMigration round-trip', () => { new CreateIndexCall('user', 'user_email_idx', ['email']), new DropTableCall('stale'), ]; - const migration = new TypeScriptRenderableSqliteMigration(calls, META); + const migration = new TypeScriptRenderableSqliteMigration(calls, META, APP_SPACE_ID); const tsSource = rewriteImports(migration.renderTypeScript()); await writeFile(join(tmpDir, 'migration.ts'), tsSource); @@ -162,7 +163,7 @@ describe('TypeScriptRenderableSqliteMigration round-trip', () => { 'renders an empty calls list whose executed scaffold emits []', { timeout: timeouts.coldTransformImport }, async () => { - const migration = new TypeScriptRenderableSqliteMigration([], META); + const migration = new TypeScriptRenderableSqliteMigration([], META, APP_SPACE_ID); const tsSource = rewriteImports(migration.renderTypeScript()); await writeFile(join(tmpDir, 'migration.ts'), tsSource); @@ -205,7 +206,7 @@ describe('TypeScriptRenderableSqliteMigration round-trip', () => { operationClass: 'widening', }), ]; - const migration = new TypeScriptRenderableSqliteMigration(calls, META); + const migration = new TypeScriptRenderableSqliteMigration(calls, META, APP_SPACE_ID); const tsSource = rewriteImports(migration.renderTypeScript()); await writeFile(join(tmpDir, 'migration.ts'), tsSource); diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/runner.basic.test.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/runner.basic.test.ts index 5b5a6afb1c..b4003947b3 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/runner.basic.test.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/runner.basic.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlitePlanTargetDetails } from '@prisma-next/target-sqlite/planner-target-details'; import { timeouts } from '@prisma-next/test-utils'; import { afterEach, describe, expect, it } from 'vitest'; @@ -34,6 +35,7 @@ describe('SqliteMigrationRunner - Basic Execution', { timeout: timeouts.database policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(result.kind).toBe('success'); if (result.kind !== 'success') throw new Error('expected planner success'); @@ -60,8 +62,8 @@ describe('SqliteMigrationRunner - Basic Execution', { timeout: timeouts.database expect(tableRow.rows[0]!.cnt).toBe(1); const markerRow = await driver.query<{ core_hash: string; profile_hash: string }>( - 'SELECT core_hash, profile_hash FROM _prisma_marker WHERE id = ?', - [1], + 'SELECT core_hash, profile_hash FROM _prisma_marker WHERE space = ?', + ['app'], ); expect(markerRow.rows[0]).toMatchObject({ core_hash: contract.storage.storageHash, @@ -89,6 +91,7 @@ describe('SqliteMigrationRunner - Basic Execution', { timeout: timeouts.database policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); if (initialPlan.kind !== 'success') throw new Error('expected initial planner success'); const firstResult = await runner.execute({ @@ -103,6 +106,7 @@ describe('SqliteMigrationRunner - Basic Execution', { timeout: timeouts.database const emptyPlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [], @@ -124,8 +128,8 @@ describe('SqliteMigrationRunner - Basic Execution', { timeout: timeouts.database }); const markerCount = await driver.query<{ cnt: number }>( - 'SELECT COUNT(*) as cnt FROM _prisma_marker WHERE id = ?', - [1], + 'SELECT COUNT(*) as cnt FROM _prisma_marker WHERE space = ?', + ['app'], ); expect(markerCount.rows[0]!.cnt).toBe(1); diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/runner.errors.test.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/runner.errors.test.ts index 853186bf8f..2719e43335 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/runner.errors.test.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/runner.errors.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlitePlanTargetDetails } from '@prisma-next/target-sqlite/planner-target-details'; import { buildWriteMarkerStatements, @@ -35,6 +36,7 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database const emptyPlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [], @@ -85,6 +87,7 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database await executeStatement(driver, ensureMarkerTableStatement); await executeStatement(driver, ensureLedgerTableStatement); const mismatchedMarker = buildWriteMarkerStatements({ + space: APP_SPACE_ID, storageHash: 'sha256:other-contract', profileHash: 'sha256:other-profile', contractJson: { storageHash: 'sha256:other-contract' }, @@ -97,6 +100,7 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database const runner = sqliteTargetDescriptor.createRunner(familyInstance); const emptyPlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: { storageHash: 'sha256:expected-origin', profileHash: 'sha256:expected-profile', @@ -120,8 +124,8 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database expect(failure.summary).toMatch(/does not match plan origin/i); const markerRow = await driver.query<{ core_hash: string; profile_hash: string }>( - 'SELECT core_hash, profile_hash FROM _prisma_marker WHERE id = ?', - [1], + 'SELECT core_hash, profile_hash FROM _prisma_marker WHERE space = ?', + ['app'], ); expect(markerRow.rows[0]).toMatchObject({ core_hash: 'sha256:other-contract', @@ -141,6 +145,7 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database const planWithFailingPostcheck = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -195,6 +200,7 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database const planWithInvalidSql = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -237,6 +243,63 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database await expectNoMarkerOrLedgerWrites(driver); }); + it('fails with LEGACY_MARKER_SHAPE when a legacy single-row marker table exists', async () => { + // Reproduce the pre-cleanup shape that `migrateMarkerSchemaSqlite` used + // to auto-promote: `id` PK with no `space` column. The detection step at + // boot must surface this rather than silently rebuilding the table. + testDb = createTestDatabase(); + const { driver } = testDb; + + await driver.query(`CREATE TABLE _prisma_marker ( + id INTEGER PRIMARY KEY DEFAULT 1, + core_hash TEXT NOT NULL, + profile_hash TEXT NOT NULL, + contract_json TEXT, + canonical_version INTEGER, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + app_tag TEXT, + meta TEXT NOT NULL DEFAULT '{}', + invariants TEXT NOT NULL DEFAULT '[]' + )`); + + const runner = sqliteTargetDescriptor.createRunner(familyInstance); + const emptyPlan = createMigrationPlan({ + targetId: 'sqlite', + spaceId: APP_SPACE_ID, + origin: null, + destination: toPlanContractInfo(contract), + operations: [], + providedInvariants: [], + }); + + const result = await runner.execute({ + plan: emptyPlan, + driver, + destinationContract: contract, + policy: INIT_ADDITIVE_POLICY, + frameworkComponents, + }); + + expect(result.ok).toBe(false); + const failure = result.assertNotOk(); + expect(failure.code).toBe('LEGACY_MARKER_SHAPE'); + expect(failure.summary).toMatch(/legacy marker-table shape/i); + expect(failure.summary).toMatch(/dbInit/); + expect(failure.summary).toMatch(/_prisma_marker/); + expect(failure.meta).toMatchObject({ table: '_prisma_marker' }); + + // Detection must not mutate the legacy table — operator dropping it is + // the explicit remediation. + const info = await driver.query<{ name: string; pk: number }>( + 'PRAGMA table_info("_prisma_marker")', + ); + const pkColumns = info.rows + .filter((r) => r.pk > 0) + .sort((a, b) => a.pk - b.pk) + .map((r) => r.name); + expect(pkColumns).toEqual(['id']); + }); + it('fails with DESTINATION_CONTRACT_MISMATCH when plan hash differs from contract', async () => { testDb = createTestDatabase(); const { driver } = testDb; @@ -244,6 +307,7 @@ describe('SqliteMigrationRunner - Error Scenarios', { timeout: timeouts.database const plan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: { storageHash: 'sha256:plan-hash', profileHash: 'sha256:plan-profile' }, operations: [], @@ -278,6 +342,7 @@ describe('SqliteMigrationRunner - Policy Violations', () => { const planWithPolicyViolation = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ diff --git a/packages/3-targets/6-adapters/sqlite/test/migrations/runner.idempotency.test.ts b/packages/3-targets/6-adapters/sqlite/test/migrations/runner.idempotency.test.ts index a6bbefa8ee..0842456e77 100644 --- a/packages/3-targets/6-adapters/sqlite/test/migrations/runner.idempotency.test.ts +++ b/packages/3-targets/6-adapters/sqlite/test/migrations/runner.idempotency.test.ts @@ -1,4 +1,5 @@ import { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlitePlanTargetDetails } from '@prisma-next/target-sqlite/planner-target-details'; import { timeouts } from '@prisma-next/test-utils'; import { afterEach, describe, expect, it } from 'vitest'; @@ -34,6 +35,7 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper const runner = sqliteTargetDescriptor.createRunner(familyInstance); const plan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [ @@ -81,8 +83,8 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper }); const markerCount = await driver.query<{ cnt: number }>( - 'SELECT COUNT(*) as cnt FROM _prisma_marker WHERE id = ?', - [1], + 'SELECT COUNT(*) as cnt FROM _prisma_marker WHERE space = ?', + ['app'], ); expect(markerCount.rows[0]!.cnt).toBe(1); @@ -109,6 +111,7 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper const runner = sqliteTargetDescriptor.createRunner(familyInstance); const initPlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [], @@ -128,13 +131,14 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper 'SELECT COUNT(*) as cnt FROM _prisma_ledger', ); const initialUpdatedAt = await driver.query<{ updated_at: string }>( - 'SELECT updated_at FROM _prisma_marker WHERE id = ?', - [1], + 'SELECT updated_at FROM _prisma_marker WHERE space = ?', + ['app'], ); // True no-op self-edge: origin === destination, no ops, no invariants. const noOpPlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: toPlanContractInfo(contract), destination: toPlanContractInfo(contract), operations: [], @@ -160,8 +164,8 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper expect(ledgerAfter.rows[0]!.cnt).toBe(initialLedger.rows[0]!.cnt); const updatedAtAfter = await driver.query<{ updated_at: string }>( - 'SELECT updated_at FROM _prisma_marker WHERE id = ?', - [1], + 'SELECT updated_at FROM _prisma_marker WHERE space = ?', + ['app'], ); expect(updatedAtAfter.rows[0]!.updated_at).toBe(initialUpdatedAt.rows[0]!.updated_at); }); @@ -179,6 +183,7 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper const initPlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: null, destination: toPlanContractInfo(contract), operations: [], @@ -199,6 +204,7 @@ describe('SqliteMigrationRunner - Idempotency', { timeout: timeouts.databaseOper const selfEdgePlan = createMigrationPlan({ targetId: 'sqlite', + spaceId: APP_SPACE_ID, origin: toPlanContractInfo(contract), destination: toPlanContractInfo(contract), operations: [ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 93bac5ea3b..7fd5b1552c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -8463,7 +8463,7 @@ snapshots: '@babel/helper-annotate-as-pure@7.27.3': dependencies: - '@babel/types': 7.28.6 + '@babel/types': 7.29.0 '@babel/helper-compilation-targets@7.28.6': dependencies: @@ -8491,7 +8491,7 @@ snapshots: '@babel/helper-member-expression-to-functions@7.28.5': dependencies: '@babel/traverse': 7.29.0 - '@babel/types': 7.28.6 + '@babel/types': 7.29.0 transitivePeerDependencies: - supports-color @@ -8513,7 +8513,7 @@ snapshots: '@babel/helper-optimise-call-expression@7.27.1': dependencies: - '@babel/types': 7.28.6 + '@babel/types': 7.29.0 '@babel/helper-plugin-utils@7.28.6': {} @@ -8529,7 +8529,7 @@ snapshots: '@babel/helper-skip-transparent-expression-wrappers@7.27.1': dependencies: '@babel/traverse': 7.29.0 - '@babel/types': 7.28.6 + '@babel/types': 7.29.0 transitivePeerDependencies: - supports-color diff --git a/projects/extension-contract-spaces/plan.md b/projects/extension-contract-spaces/plan.md new file mode 100644 index 0000000000..2ec12d4400 --- /dev/null +++ b/projects/extension-contract-spaces/plan.md @@ -0,0 +1,267 @@ +# Extension Contract Spaces — Project Plan + +## Summary + +Introduce **contract spaces** — disjoint `(contract.json, migration-graph)` units that the framework treats uniformly — so extensions become first-class schema contributors using the same planner, runner, verifier, and migration shape as application authoring. As part of the project, the in-tree workspace extension that uses `databaseDependencies.init` (pgvector — confirmed sole consumer by spike) is migrated to a contract space; cipherstash is authored fresh on the new mechanism. arktype-json is out of scope (spike confirmed it ships no `databaseDependencies` and needs no DB scaffolding). After both extensions ship, the `databaseDependencies` mechanism is removed. The cipherstash blocker (TML-2373) is unblocked by M3. + +**Spec:** `projects/extension-contract-spaces/spec.md` + +**Sub-specs:** +- `specs/framework-mechanism.spec.md` — drives M1 + M2. +- `specs/cipherstash-migration.spec.md` — drives M3. + +## Collaborators + +| Role | Person/Team | Context | +| ------------ | ------------------------------------ | ----------------------------------------------------------------- | +| Maker | William Madden | Drives execution | +| Reviewer | William Madden | Architectural review across planner / runner / verifier | +| Collaborator | Cipherstash project (TML-2373) | Immediate consumer; needs the unblock | +| Collaborator | pgvector maintainers | pgvector is migrated to a contract space under M4 | + +## Shipping Strategy + +Every milestone is safe to deploy immediately because the contract-space mechanism is **additive** until M5. The implicit gate between old and new behaviour is the presence of the `contractSpace` field on an extension descriptor: + +- If an extension descriptor does not expose `contractSpace`, it falls back to the existing `databaseDependencies.init` path (M1-M4). +- If an extension descriptor exposes `contractSpace`, the framework loads it; any `databaseDependencies` entry the same descriptor still carries is ignored. +- M5 removes `databaseDependencies` only after every in-tree extension has migrated — by then the field is unused everywhere. + +The marker schema gains a `space` column with a one-shot framework-internal migration that promotes the existing single-row marker to `(space='app', …)` shape. The migration is idempotent; deployments mid-rollout see no semantic change. No feature flags are required. + +## Sub-specs + +Two milestones have task specs because their implementation detail is large enough to crowd the project plan: + +- [`specs/framework-mechanism.spec.md`](./specs/framework-mechanism.spec.md) — locks down API shapes for the per-space planner / runner / verifier, the `contractSpace` extension-descriptor field, the marker schema migration SQL, the pinned per-space artefact layout + canonicalisation rules, the codec lifecycle hook, and the per-space `db init` / `db update` flows. Drives M1 and M2. +- [`specs/cipherstash-migration.spec.md`](./specs/cipherstash-migration.spec.md) — locks down cipherstash's package layout, contract IR contents, baseline migration shape (with the EQL bundle byte-equivalence rule), codec hook behaviour, descriptor wiring, and four end-to-end test scenarios (initial, drop, bump, revert workaround). Drives M3. + +M4 (pgvector + monorepo example) and M5 (`databaseDependencies` removal + close-out) are small enough to be captured inline in this plan; no task spec needed. + +## Test Design + +Test cases derived from each acceptance criterion in the spec. Tasks reference these test cases. + +| AC | TC | Test Case | Type | Milestone | Expected Outcome | +|-------|-------|---------------------------------------------------------------------------------------------------------------------------------|-------------|-----------|--------------------------------------------------------------------------------------------------------| +| AC-1 | TC-1 | Fresh Postgres + cipherstash-as-contract-space → `dbInit` strict mode succeeds | Integration | M3 | dbInit returns success; verifier sees `eql_v2_*` objects as expected | +| AC-1 | TC-2 | After TC-1 setup, hand-add unexpected column to `eql_v2_configuration` → `dbInit` fails | Integration | M3 | dbInit fails with strict-mode error; clear remediation hint | +| AC-2 | TC-3 | User schema with cipherstash + Encrypted column → `migrate` produces app-space migration directory at root | Integration | M3 | Directory at `migrations/_*/` with structural ops + codec-emitted `add_search_config` op | +| AC-2 | TC-4 | Same setup → `migrate` produces cipherstash-space migration directory under `migrations/cipherstash/` | Integration | M3 | Directory under `migrations/cipherstash//` | +| AC-2 | TC-5 | After TC-3/4, `db apply` runs both migrations in single transaction | Integration | M3 | Both migrations apply atomically; either both succeed or neither | +| AC-2 | TC-6 | After TC-5 apply, marker table has 2 rows | Integration | M3 | Marker has rows for `app` and `cipherstash` with expected hashes | +| AC-3 | TC-7 | Bump cipherstash, run `migrate` → only new ops in cipherstash-space migration | Integration | M3 | New cipherstash-space migration contains only the new op; prior invariantIds skipped via marker | +| AC-4 | TC-8 | Monorepo example with 2 internal contract owners + aggregator → builds, emits per-space, applies | Integration | M4 | All per-space migrations applied; marker has rows per package | +| AC-5 | TC-9 | After multi-space apply, integration test asserts marker row set + hash equality | Integration | M3 | Marker row count = number of loaded spaces; each row's hash matches contract.json content | +| AC-5 | TC-10 | Vary `extensionPacks` declaration order, verify aggregate is identical | Integration | M1 | Aggregate hash byte-equal regardless of declaration order | +| AC-6 | TC-11 | Runner does not import extension descriptor module during apply path | Integration | M3 | Static analysis or runtime tracing confirms no descriptor import during apply | +| AC-7 | TC-12 | Cipherstash extension's `installEqlBundle` op contains vendored bundle SQL byte-for-byte | Unit | M3 | Op body equals the vendored bundle file's contents | +| AC-8 | TC-13 | Cipherstash's `contract.json` contains the typed objects but not the opaque ones | Unit | M3 | Contains `eql_v2_encrypted`, `eql_v2_configuration_state`, domains; does NOT contain functions / operators / casts | +| AC-9 | TC-14 | Drop searchable Encrypted column → codec hook emits `remove_search_config` in app-space migration; cipherstash marker unchanged | Integration | M3 | App-space migration carries `remove_search_config`; cipherstash marker row hash unchanged | +| AC-10 | TC-15 | pgvector contract space declares `vector` type | Unit | M4 | pgvector's `contract.json` declares `vector` type | +| AC-10 | TC-16 | User adds pgvector + `vector(N)` column → `migrate` + `apply` succeeds; marker has 2 rows | Integration | M4 | Marker has rows for `app` and `pgvector` with expected hashes | +| AC-10 | TC-17 | dbInit on resulting database succeeds in strict mode | Integration | M4 | Strict-mode dbInit returns success | +| AC-11 | TC-19 | `ComponentDatabaseDependencies` and `databaseDependencies` removed from framework + pgvector | Build | M5 | Build fails if any reference remains; rg returns no consumer matches | +| AC-12 | TC-21 | Fresh database with cipherstash → `db init` walks cipherstash graph + synthesizes app-space delta in single transaction | Integration | M3 | Single transaction; marker rows for both spaces with expected hashes | +| AC-13 | TC-22 | User removes extension while marker row remains → `dbInit` fails with orphan-row error and remediation hint | Integration | M1 | Clear error identifying orphan row + recommended manual cleanup | +| NFR1 | TC-23 | `strictVerification: false` workaround removed from cipherstash-related test setups | Build/Lint | M3 | grep returns no matches in cipherstash tests / examples | +| NFR5 | TC-24 | Benchmark emit + dbInit performance with 0 vs 1 extensions | Integration | M5 | < 5% wall-clock overhead delta | +| AC-14 | TC-25 | Bump cipherstash → `migrate` produces PR diff with updated pinned files + new migration directory | Integration | M3 | Pinned `migrations/cipherstash/{contract.json,contract.d.ts,refs/head.json}` updated; new migration dir | +| AC-15 | TC-26 | Delete `node_modules/` then run `dbInit` + `db apply` → both succeed reading pinned files only | Integration | M1 | Verifier + runner succeed; no descriptor import attempted on either path | +| AC-16 | TC-27 | Add extension to `extensionPacks` without running `migrate` → `dbInit` fails with declared-but-unmigrated error | Integration | M1 | Clear error naming the extension + remediation `prisma-next migrate` | +| AC-16 | TC-28 | `migrations//` exists on disk for extension not in `extensionPacks` → `dbInit` fails with orphan-pinned-dir error | Integration | M1 | Clear error identifying orphan dir + remediation | +| AC-2 | TC-29 | After `migrate` with cipherstash declared, pinned `migrations/cipherstash/{contract.json,contract.d.ts,refs/head.json}` exist | Integration | M3 | Files exist; byte-equivalent to descriptor's current values via canonicalization | +| FR-17 | TC-30 | Bump descriptor's `contractJson` without running `migrate` → next `migrate` invocation surfaces drift warning before emitting | Unit | M1 | Drift detection emits a clear "extension bumped — run migrate to materialise" message | + +Decision/spike tasks (resolved during plan finalisation): + +- ~~T4.4~~ — arktype-json scope spike. **Resolved**: arktype-json ships no `databaseDependencies` (jsonb is built-in); no contract space needed. Dropped from scope. + +## Milestones + +### Milestone 1: Framework — contract space mechanism + +Introduce the framework's per-space planner/runner/verifier and the extension descriptor's `contractSpace` field. Existing in-tree extensions remain on `databaseDependencies.init` and continue to work unchanged (additive change). The new code path is exercised by a synthetic test extension end-to-end. + +**Tasks:** + +- [x] **T1.1** Marker schema migration: add `space` column (text, not null), change PK from `id` to `space`, write a one-shot framework-internal migration that promotes existing single-row markers to `(space='app', …)`. Must be idempotent. (supports: TC-9, TC-10, TC-22, and many later TCs) — landed M1 R2, commit `ed257f226`. Postgres + SQLite paths, three-state idempotency tests on both. Sub-spec § 2 wording amended to clarify that ADR 029's shadow-DB preflight is user-DDL-scoped; control-DDL like `ensureControlTables` is validated by idempotency tests instead. +- [x] **T1.2** Add `contractSpace?: { contractJson, migrations, headRef }` field to extension descriptor types. (satisfies: TC-3, TC-4) — landed M1 R1, commit `5733d8e18`. Implementation introduced an in-memory authored migration package (no `dirPath`) distinct from the on-disk `MigrationPackage`; sub-spec § 1 amended to reflect the resolved shape. **M1-cleanup F4** subsequently renamed the in-memory shape (`ExtensionMigrationPackage` → `AuthoredMigrationPackage`) and hoisted it to `@prisma-next/framework-components/control` (commit `68ebbeb25`). +- [x] **T1.3** Per-space planner: extend the SQL-family planner to accept a list of (space, contract) tuples; default behaviour for existing single-app code paths preserved when no extension exposes `contractSpace`. (satisfies: TC-3, TC-4, TC-10) — landed M1 R3, commit `de17e7c86`. Shipped as a generic `planAllSpaces` in `@prisma-next/migration-tools/exports/spaces` (target-agnostic; SQL family wires it at consumption site). 8 tests including AM3 three-permutation determinism. Sub-spec § 3 amended to show generic signature + SQL-family use site + helper location. +- [x] **T1.4** Per-space runner: extend the SQL-family migration runner to support per-space marker rows with cross-space ordering convention (extensions first, app-space second); single transaction across spaces. The runner reads only from the user's repo (no descriptor import). (satisfies: TC-5, TC-21, TC-26) — landed M1 R4. Shipped as the target-agnostic `concatenateSpaceApplyInputs(inputs)` ordering helper plus the `SpaceApplyInput` data shape in `@prisma-next/migration-tools/exports/spaces`. 9 tests covering ordering (extensions alphabetical, app last), determinism, duplicate-id rejection, non-mutation, single-app preservation. The actual transaction-wrapping + per-space marker writes live at the SQL-family consumption site (per the helper-location convention from R3); this round ships the framework-neutral primitive the consumer composes against. +- [x] **T1.5** Per-space verifier: aggregate per-space contracts by reading the user's repo (root-level app-space `contract.json` + each loaded extension's pinned `migrations//contract.json`). Deterministic alphabetical-by-space-id sort. Per-space hash check. Three orphan / missing checks with clear remediation hints: (a) marker rows for spaces not in `extensionPacks`, (b) `extensionPacks` entries with no pinned contract on disk, (c) `migrations//` directories on disk for spaces not in `extensionPacks`. No descriptor import on this path. (satisfies: TC-9, TC-10, TC-22, TC-26, TC-27, TC-28) — landed M1 R4. Shipped as `listPinnedSpaceDirectories(projectMigrationsDir)` (async I/O — filters dot-prefixed and timestamp-prefixed `YYYYMMDDTHHmm_…` directories) plus the pure `verifyContractSpaces(inputs)` helper in `@prisma-next/migration-tools/exports/spaces`. 18 tests covering all five violation kinds (declaredButUnmigrated, orphanMarker, orphanPinnedDir, hashMismatch, invariantsMismatch), deterministic ordering of violations, and the no-descriptor-import property. Integration with the live DB schema (step 8) lives at the SQL-family consumption site. +- [x] **T1.6** Per-space layout convention (γ): emit migrations under `migrations///` for extension spaces; root for app-space. Update emitter to choose target directory by space. (satisfies: TC-3, TC-4) — landed M1 R3, commit `de17e7c86`. Shipped as `spaceMigrationDirectory(...)` + `APP_SPACE_ID` + `isValidSpaceId`/`assertValidSpaceId` in `@prisma-next/migration-tools/exports/spaces`. 24 tests covering accept/reject/passthrough/invalid. +- [x] **T1.7** Migration package emission helper: serialize an in-memory `MigrationPackage` (manifest + ops + contract.json snapshot) to per-space subdirectory; canonicalized for byte-determinism. (satisfies: TC-3, TC-4) — landed M1 R3, commit `de17e7c86`. Shipped as `writeAuthoredMigrationPackage(targetDir, pkg)` in `@prisma-next/migration-tools/exports/io` (originally `writeExtensionMigrationPackage`; renamed under M1-cleanup F4, commit `68ebbeb25`). 4 tests covering layout, canonical content, idempotency (byte-determinism), target-dir creation. +- [x] **T1.8** Pinned per-space artefact emission: on every `migrate`, write (or overwrite) `migrations//contract.json`, `migrations//contract.d.ts`, `migrations//refs/head.json` from each loaded extension's descriptor `contractSpace` values. Canonicalised for byte-determinism. (satisfies: TC-25, TC-29) — landed M1 R4. Shipped as `emitPinnedSpaceArtefacts(projectMigrationsDir, spaceId, inputs)` in `@prisma-next/migration-tools/exports/spaces`. Always-overwrite (the framework owns these files); rejects app-space (its pinned shape lives at the project root, not under `migrations/`) and invalid space ids; sorts head-ref `invariants` alphabetically before serialisation; canonical-JSON `contract.json`. 12 tests covering layout, canonicalisation, byte-determinism, overwrite semantics, app-space rejection, invalid-space rejection, and directory creation. Caller renders `contract.d.ts` (target-aware via the SQL family's existing renderer) and passes the string in. +- [x] **T1.9** Drift detection at `migrate` time: compare descriptor's current `contractJson` against the on-disk pinned version; if diverged but no new migrations are being emitted (e.g. user bumped a non-changing extension), surface a clear warning. (satisfies: TC-30) — landed M1 R5. Shipped as a target-agnostic primitive pair in `@prisma-next/migration-tools/exports/spaces`: pure `detectSpaceContractDrift(spaceId, { descriptorHash, pinnedHash })` returning a discriminated `{ kind: 'noDrift' | 'firstEmit' | 'drift', spaceId, descriptorHash, pinnedHash }` (7 tests covering all three cases plus pure-function / non-mutation properties), and the I/O wrapper `readPinnedContractHash(projectMigrationsDir, spaceId)` that returns the pinned `refs/head.json` hash or `null` when the file does not exist (8 tests covering happy path, ENOENT-as-null, malformed JSON / missing-hash error surfacing, app-space + invalid-space-id rejection, and emitPinnedSpaceArtefacts round-trip). The SQL family in M2 R1 composes the two: read the pinned hash, supply the descriptor's `headRef.hash` as `descriptorHash`, and format the warning from the result. +- [~] **T1.10** Synthetic test extension fixture at `test/integration/test/contract-space-fixture/` (originally landed as a private workspace package at `packages/3-extensions/test-contract-space/`; relocated under M1-cleanup T-cleanup.1, commit `db33795e3`, because the package shape implied "real extension" alongside pgvector / cipherstash with no external consumers to justify it). Declares one baseline migration and one head ref. Used as scaffolding for later milestones' E2E tests; the fixture exposes the same descriptor surface a real extension would. Split into two phases for sequencing against the verifier (T1.5): + - [x] **T1.10a** Package skeleton + descriptor wiring + baseline migration + smoke tests. (landed M1 R1, commit `25a1e48fa`). Uses a `test_box` table rather than a composite type — composite-type IR support is M3-or-later work driven by cipherstash; M1's per-space mechanism is type-agnostic so a table substitutes cleanly. + - [x] **T1.10b** "Deletable `node_modules`" test fixture that exercises TC-26 (verifier reads only the user repo, no descriptor import). — landed M1 R4 alongside T1.5. Lives at [`packages/1-framework/3-tooling/migration/test/deletable-node-modules.test.ts`](../../packages/1-framework/3-tooling/migration/test/deletable-node-modules.test.ts) (4 tests). The fixture sets up a tmpdir project with pinned per-space artefacts, deletes `node_modules`, then exercises `listPinnedSpaceDirectories` + `verifyContractSpaces` + `concatenateSpaceApplyInputs` — all of which complete correctly without descriptor access. Intentionally does **not** import the synthetic `@prisma-next/extension-test-contract-space` package; the test invents its own space id inline to lock in the no-descriptor property. + +**Validation gate:** + +- `pnpm typecheck` +- `pnpm test:packages` +- `pnpm test:integration` +- `pnpm lint:deps` +- `pnpm build` + +### Milestone 1-cleanup: M1 design-review remediation — **SATISFIED** (HEAD `ac2157d72`) + +Addresses the design-quality findings surfaced in `reviews/code-review.md` (post-M1, pre-PR-merge). M1's functional acceptance was already met; this milestone is the remediation pass for design-quality concerns that didn't surface during the implementation rounds. Each task below maps to one or more findings (F0–F6) recorded in the review log; closure of every linked GitHub PR thread is part of "done." + +**Trajectory:** Reached SATISFIED at `15e0534e1` post-R3 (F0–F5 closed); reopened by `a9697ba52` for F6 (Authored* prefix flatten — surfaced in interactive review of R2's output); re-reached SATISFIED at `ac2157d72` post-R4 (reviewer verdict). 7/7 ACs PASS; 15/15 GH PR threads closed; validation gates green. + +**Round outcomes (4 implementer rounds + 4 reviewer rounds):** + +- **R1 (closed F0/F1/F5):** verifier learned to detect migration directories by `migration.json` presence rather than by name shape (commits `2473c9611` + `c19086d90`); test fixture relocated out of `packages/3-extensions/test-contract-space/` to `test/integration/test/contract-space-fixture/` (commit `db33795e3`); CodeRabbit's tsdown nitpick made moot. +- **R2 (closed F3/F4):** `APP_SPACE_ID` canonicalised under `framework-components/control` with `scripts/lint-app-space-id.mjs` enforcing single-source + no-raw-literal invariants (commit `9e39382e4`); contract-space identity types hoisted and renamed (`ContractSpaceHeadRef`, `AuthoredMigrationPackage`, `AuthoredContractSpace`); `MigrationMetadata` + `MigrationHints` hoisted to `framework-components/control` with re-export shim preserving the 12 existing `migration-tools/metadata` consumers (commit `68ebbeb25`). +- **R3 (closed F2):** transitional `migrateMarkerSchema*` helpers deleted; replaced with structured `LEGACY_MARKER_SHAPE` runtime detection that fails non-mutatingly with a `dbInit` remediation hint (commit `15e0534e1`). +- **R4 (closed F6 — SATISFIED):** flattened the contract-space typology — `Authored*` prefix dropped (implementer commit `f8649ba43`; orchestrator pickups for spec § 3 + § 7 and plan T-cleanup.5 in `ac2157d72`). Canonical `ContractSpace` and `MigrationPackage` (structural) live in `framework-components/control`; `OnDiskMigrationPackage extends MigrationPackage` lives in `migration-tools/src/package.ts` carrying `dirPath`; struct-taking emitter renamed `writeAuthoredMigrationPackage` → `materialiseMigrationPackage` (kept, not deleted — M3 cipherstash branch consumes it on its next rebase). AC-D6 PASS. Reviewer R4 verdict SATISFIED at `ac2157d72`. F6 had no GH-thread dimension (surfaced in interactive design review). + +**ACs PASS (7/7):** AC-D0 / AC-D1 / AC-D2 / AC-D3 / AC-D4 / AC-D5 / AC-D6. + +The original task list and validation gates are preserved below for the audit trail; T-cleanup.5 is the new addition for R4. + +**Tasks** (run in numbered order; T-cleanup.3 is the largest single piece of work and should be its own round): + +- [x] **T-cleanup.0** (F0) Content-based migration-dir detection in `verify-contract-spaces.ts` — implemented pre-cleanup-milestone in commits `2473c9611` (export `MANIFEST_FILE`) + `c19086d90` (manifest-presence detection). Closed M1-cleanup R1: PR #434 thread `PRRT_kwDOQM0QJc6Am-ZV` replied "Done" + resolved. +- [x] **T-cleanup.1** (F1) Relocate the `test-contract-space` fixture out of `packages/3-extensions/` into a test-tree fixture. Drop the `@prisma-next/extension-test-contract-space` workspace registration, update consumer imports + `extensionPacks` references, update docs. Resolve thread `PRRT_kwDOQM0QJc6AnF1X`. — landed M1-cleanup R1, commit `db33795e3`. Fixture relocated to `test/integration/test/contract-space-fixture/` (option (b) — orchestrator's preferred path; rationale: M2 T2.5 extends the fixture so delete-and-recreate would be wasted churn, and integration-tests is a layering-clean home that already declares the SQL-domain deps). Package shape dropped entirely — the fixture is hosted by `@prisma-next/integration-tests`'s existing config. `pnpm-workspace.yaml` unchanged (new path falls under existing `test/**` glob). Initial attempt at `migration-tools/test/fixtures/` was reverted before commit because `@prisma-next/family-sql` runtime-depends on `@prisma-next/migration-tools`, which would have created a workspace cycle. +- [x] **T-cleanup.2** (F5 — closed-by-T-cleanup.1) After T-cleanup.1 landed and `tsdown.config.ts` was dropped along with the package shape, posted "Done" + resolve on CodeRabbit thread `PRRT_kwDOQM0QJc6Am7m6` referencing T-cleanup.1 / commit `db33795e3`. — closed M1-cleanup R1. +- [x] **T-cleanup.3** (F3 + F4 — coupled refactor) Hoist the contract-space identity surface into `@prisma-next/framework-components/control`. — landed M1-cleanup R2 in two commits: + - **F3 — commit `9e39382e4`**: canonical `APP_SPACE_ID` now lives in `packages/1-framework/1-core/framework-components/src/control/control-spaces.ts`; the four duplicate consts (`migration-tools/space-layout`, `sql-runtime/sql-marker`, postgres + sqlite statement-builders) re-export from the canonical home; raw `'app'` literals in SQL templates and adapter `params` arrays across `packages/2-sql/**/src` and `packages/3-targets/**/src` are replaced with `APP_SPACE_ID` (or `${APP_SPACE_ID}` interpolation inside SQL strings); `scripts/lint-app-space-id.mjs` (chained into `pnpm lint:deps`) enforces both invariants going forward. Resolved 9 GH threads. + - **F4 — commit `68ebbeb25`**: `ExtensionContractRef`/`ExtensionMigrationPackage`/`ExtensionContractSpace` moved to `framework-components/control` and renamed `ContractSpaceHeadRef`/`AuthoredMigrationPackage`/`AuthoredContractSpace`; `writeExtensionMigrationPackage` → `writeAuthoredMigrationPackage` (consumes `AuthoredMigrationPackage` directly); lampshaded `MigrationPackageContents` duplicate in `migration-tools/io.ts` deleted. To make the type move structurally feasible, `MigrationMetadata` + `MigrationHints` were also hoisted to `framework-components/control/control-migration-types.ts`; `migration-tools/src/metadata.ts` is now a re-export barrel so the 12 existing import sites stay unchanged. SQL family's `SqlControlExtensionDescriptor.contractSpace?:` specialises to `AuthoredContractSpace>`. Spec § 1 / § 3 / § 6 updated. Resolved 2 GH threads. +- [x] **T-cleanup.4** (F2) Delete `migrateMarkerSchemaStatements` (postgres + sqlite); replace with a runtime detection step at runner boot that surfaces a structured `LEGACY_MARKER_SHAPE` failure if the marker table exists in the pre-`space`-column shape. — landed M1-cleanup R3, commit `15e0534e1`. `migrateMarkerSchemaStatements` (postgres) and `migrateMarkerSchemaSqlite` (sqlite) deleted along with their dedicated integration tests (`marker-schema-migration.{integration.,}test.ts`). New `detectLegacyMarkerShape` precheck (postgres uses `INFORMATION_SCHEMA.COLUMNS`; sqlite uses `PRAGMA table_info`) runs inside `ensureControlTables` *before* any marker write; non-mutating; legacy table left untouched on failure. New `LEGACY_MARKER_SHAPE` variant added to `SqlMigrationRunnerErrorCode`; the failure summary directs operators to drop the legacy table and re-run `dbInit`. `LEGACY_MARKER_SHAPE` test cases added to each adapter's existing `runner.errors.{integration.,}test.ts` (test count 7→8 each), including the load-bearing "legacy table untouched" assertion. **Audit clean** — broader grep across `packages/2-sql/**/src` + `packages/3-targets/**/src` for analogous in-zero-range transitional migrations turned up no further candidates (ledger table, codec tables, other promote-shape paths all clean). GH thread `PRRT_kwDOQM0QJc6AnH2D` resolved. (Milestone reached SATISFIED at `15e0534e1` and was reopened post-close for F6 / T-cleanup.5 below.) +- [x] **T-cleanup.5** (F6 — own round) Flatten the contract-space typology; drop the `Authored*` prefix introduced under T-cleanup.3. — landed M1-cleanup R4, commit `f8649ba43`. `AuthoredContractSpace` → `ContractSpace`; `AuthoredMigrationPackage` → `MigrationPackage` (canonical structural form, in `framework-components/control`); standalone `MigrationPackage` (with `dirPath`) in `migration-tools/src/package.ts` → `OnDiskMigrationPackage extends MigrationPackage`; `writeAuthoredMigrationPackage` → `materialiseMigrationPackage` (renamed, **not deleted** — M3 cipherstash branch consumes it and absorbs the rename on its next rebase). 22 files changed (143 insertions / 126 deletions); 1 file renamed. Spec § 1 updated by implementer; § 3 + § 7 by orchestrator in `b9f96015e` (next commit). `MigrationOps` alias preserved. CLI consumers (`migration-{show,status,apply}`, `utils/command-helpers`) switched from `MigrationPackage` to `OnDiskMigrationPackage` for values originating from on-disk readers. AC-D6 PASS — `rg --type ts "Authored(ContractSpace|MigrationPackage)|writeAuthoredMigrationPackage" packages/ test/` returns no matches. **No GH thread to close** (F6 surfaced in interactive review). **M1-cleanup milestone SATISFIED at this commit.** The rename specification below is preserved for the audit trail: + - In `framework-components/control/control-spaces.ts`: rename `AuthoredContractSpace` → `ContractSpace`; rename `AuthoredMigrationPackage` → `MigrationPackage` (this is the canonical, structural shape: `{ dirName, metadata, ops }`). + - In `migration-tools/src/package.ts`: rename today's standalone `MigrationPackage` (with `dirPath`) → `OnDiskMigrationPackage`; declare it as `extends MigrationPackage` (the canonical type from framework-components) adding `dirPath: string`. `readMigrationPackage` / `readMigrationsDir` return `OnDiskMigrationPackage` going forward. + - Update `verifyMigrationHash`, `reconstructGraph`, and other migration-tools internals that consume the on-disk variant — switch their parameter types from the old standalone `MigrationPackage` to `OnDiskMigrationPackage`. Body changes: none. + - In `migration-tools/src/io.ts`: rename `writeAuthoredMigrationPackage(dir, pkg: AuthoredMigrationPackage)` → `materialiseMigrationPackage(dir, pkg: MigrationPackage)`. Different verb from the lower-level constituent-taking `writeMigrationPackage(dir, metadata, ops)` so the snapshot-vs-no-snapshot semantic difference stays visible at the call site. **Do not delete the helper** — M3 (cipherstash, branched off this branch) consumes it; M3 absorbs the rename on its next rebase. + - Drop redundant re-exports of the renamed-from `Authored*` symbols if any remain (no backward-compat shims per repo convention). + - In `2-sql/9-family/src/core/migrations/types.ts`: `SqlControlExtensionDescriptor.contractSpace?: AuthoredContractSpace>` → `ContractSpace>`. + - In `test/integration/test/contract-space-fixture/control.ts`: update imports + type annotations. + - In `projects/extension-contract-spaces/specs/framework-mechanism.spec.md`: update § 1, § 3, and § 7 references to track the renames (orchestrator handles spec/plan amendments — the implementer is authorized to edit § 1 only, mirroring T-cleanup.3's spec-edit carve-out). + - This is a within-PR cleanup with no external GH thread to close. Validation gate is the standard workspace-wide set. + +**Validation gate:** + +- `pnpm typecheck` +- `pnpm test:packages` (workspace-wide; cross-package because the cleanup deletes / renames public exports) +- `pnpm test:integration` +- `pnpm test:e2e` +- `pnpm lint:deps` +- `pnpm build` +- `rg --type ts "export const APP_SPACE_ID" packages/` returns exactly one match — the canonical home in `framework-components/control`. +- `rg --type ts "[\"']app[\"']" packages/2-sql packages/3-targets/3-targets/postgres/src packages/3-targets/3-targets/sqlite/src packages/3-targets/6-adapters/postgres/src packages/3-targets/6-adapters/sqlite/src` returns no matches in `src/` directories outside test fixtures. +- All GitHub PR review threads listed in `reviews/code-review.md` § Findings log are replied "Done" and resolved on PR #434 (corresponds to AC-D5). + +### Milestone 2: Framework — codec lifecycle hook + db init/update per-space + +Introduce the codec lifecycle hook and refactor `db init` / `db update` to be per-space `findPathWithDecision` applications. Still no in-tree extension uses these; the synthetic test extension from M1 exercises them. + +**Tasks:** + +- [ ] **T2.1** Codec lifecycle hook API: extend `CodecControlHooks` with `onFieldEvent(event, ctx) => MigrationOp[]`. Triggered events: `'added'`, `'dropped'`, `'altered'` (where `'altered'` = any field property changed except `codecId`). Synchronous; receives prior + new IR for the table containing the changed field, app-space scope only. (satisfies: TC-14) +- [ ] **T2.2** Wire codec lifecycle hook into the application emitter's per-field diff logic; capture returned ops into the app-space migration's `ops.json`, alongside the user's structural ops. (satisfies: TC-14) +- [ ] **T2.3** db init per-space: extend the in-memory edge synthesis to be per-space-aware. App-space synthesizes from contract; extension-space walks the migration graph from current marker → headRef.hash via `findPathWithDecision`. Concatenate per cross-space ordering; single transaction. (satisfies: TC-21) +- [ ] **T2.4** db update per-space: same as T2.3 but for `db update` (advance current marker → headRef.hash per space). (satisfies: TC-21) +- [ ] **T2.5** Extend the synthetic test extension from T1.10 to exercise the codec hook + per-space db init/update. + +**Validation gate:** + +- `pnpm typecheck` +- `pnpm test:packages` +- `pnpm test:integration` +- `pnpm lint:deps` +- `pnpm build` + +### Milestone 3: Migrate cipherstash to contract space + +Migrate the cipherstash extension to a contract space, unblocking TML-2373. Cipherstash's `databaseDependencies.init` is removed; the `strictVerification: false` workaround is reverted. + +A task spec at `specs/cipherstash-migration.spec.md` captures the implementation detail (precise contract-space contents, baseline migration shape, codec hook behaviour, E2E test design). + +**Tasks:** + +- [ ] **T3.1** Author cipherstash's contract space contents: PSL/TS for `eql_v2_configuration` table, `eql_v2_encrypted` composite, `eql_v2_configuration_state` enum, `eql_v2.bloom_filter` / `hmac_256` / `blake3` domains, `ore_*` composites. Emit `contract.json`. (satisfies: TC-13) +- [ ] **T3.2** Author cipherstash's baseline migration: `installEqlBundle` op containing the vendored 5,750-line bundle SQL byte-for-byte + create-eql_v2_configuration op + create-type ops; each carrying `cipherstash:*` invariantIds. (satisfies: TC-1, TC-12) +- [ ] **T3.3** Author cipherstash's `headRef` declaring the current target hash + `cipherstash:*` invariants set. Wire descriptor module to expose `contractSpace`. (satisfies: TC-1) +- [ ] **T3.4** Implement codec lifecycle hook for `cipherstash:string@1`: emit `add_search_config` op on field-added(searchable: true), `remove_search_config` on field-dropped(searchable: true), and rotate-search-config on altered (searchable flip / typeParams change). Each op carries `cipherstash-codec:*` invariantId. (satisfies: TC-14) +- [ ] **T3.5** Remove cipherstash's `databaseDependencies.init` from its descriptor. (satisfies: TC-1) +- [ ] **T3.6** End-to-end integration test: user schema with `Encrypted` searchable column + cipherstash → `migrate` → `apply` → query. Live Postgres + EQL. Asserts directory layout, pinned per-space artefacts (`migrations/cipherstash/{contract.json,contract.d.ts,refs/head.json}`), marker rows, transactional apply, codec ops. (satisfies: TC-1 through TC-7, TC-11, TC-21, TC-29) +- [ ] **T3.7** Bump-cipherstash test: simulate cipherstash version bump (e.g. test fixture with two descriptor versions); run `migrate` against a project pinned at vX with vY now installed. Assert pinned files updated in place + new migration directory created. (satisfies: TC-25) +- [ ] **T3.8** Revert `strictVerification: false` workaround in cipherstash test setups + examples. (satisfies: TC-23) +- [ ] **T3.9** Re-verify NFR1: dbInit strict mode runs end-to-end without the workaround. + +**Validation gate:** + +- `pnpm typecheck` +- `pnpm test:packages` +- `pnpm test:integration` +- `pnpm test:e2e` (cipherstash needs live Postgres + EQL) +- `pnpm lint:deps` +- `pnpm build` + +### Milestone 4: Migrate pgvector + monorepo example + +Migrate the only existing workspace consumer of `databaseDependencies` (pgvector) to a contract space. A monorepo example demonstrates the same mechanism applies to internal-package contract owners. arktype-json was investigated during plan finalisation and confirmed out of scope (no `databaseDependencies`, jsonb built-in, no contract space needed). + +**Tasks:** + +- [ ] **T4.1** Author pgvector's contract space contents: `vector` type (parameterized native type) declared in `contract.json`. Author baseline migration: `installVectorExtension` op carrying `CREATE EXTENSION IF NOT EXISTS vector` DDL + postcondition check; carries `pgvector:install-vector-v1` invariantId. (satisfies: TC-15) +- [ ] **T4.2** Wire pgvector descriptor's `contractSpace`; remove `databaseDependencies` from `packages/3-extensions/pgvector/src/exports/control.ts`. (satisfies: TC-15, TC-16, TC-17) +- [ ] **T4.3** End-to-end integration test for pgvector: user schema with `vector(N)` column → migrate → apply → query. Assert pinned `migrations/pgvector/{contract.json,contract.d.ts,refs/head.json}` are written with byte-equivalent content. (satisfies: TC-16, TC-17) +- [ ] **T4.4** Monorepo example: two internal packages each declare a contract space + an aggregator package depending on both. Build, emit per-space migrations, apply. (satisfies: TC-8) + +**Validation gate:** + +- `pnpm typecheck` +- `pnpm test:packages` +- `pnpm test:integration` +- `pnpm test:e2e` +- `pnpm lint:deps` +- `pnpm build` + +### Milestone 5: Remove `databaseDependencies` mechanism + close-out + +Remove the `databaseDependencies` mechanism from the framework. After M3 + M4 land, the only remaining consumer is gone (pgvector was the sole workspace consumer; cipherstash never used it). The blast radius is small — confirmed by spike: 3 files (the type def, the re-export, and pgvector's now-removed usage). Migrate finalised ADRs into `docs/`, strip transient project references, delete `projects/extension-contract-spaces/`. + +**Tasks:** + +- [ ] **T5.1** Remove `ComponentDatabaseDependencies` and `ComponentDatabaseDependency` types from `packages/2-sql/9-family/src/core/migrations/types.ts`. Remove the re-export from `packages/2-sql/9-family/src/exports/control.ts`. Remove the `databaseDependencies?` field from `SqlControlExtensionDescriptor`. Remove any planner / runner / verifier code paths that consume `databaseDependencies`. (satisfies: TC-19) +- [ ] **T5.2** Audit: `rg 'ComponentDatabaseDependencies|ComponentDatabaseDependency|databaseDependencies' packages/ examples/` returns zero matches. (satisfies: TC-19) +- [ ] **T5.3** New ADR — Contract spaces. Captures the design (per-space planner / runner / verifier, descriptor model, layout convention, pinned per-space artefacts, marker schema change, db init/update semantics). +- [ ] **T5.4** New ADR — Codec lifecycle hooks. Captures the hook contract (synchronous, app-space-bound, IR scope, altered semantics). +- [ ] **T5.5** Update ADR 154 — record supersession; mark `databaseDependencies` removed. +- [ ] **T5.6** Update ADR 021 — record marker schema gain of `space` column; PK change. +- [ ] **T5.7** Update subsystem docs: Migration System (per-space planner/runner/verifier; ADR 208 use in db init/update; pinned per-space artefact layout), Ecosystem Extensions & Packs (descriptor model; contract space authoring guide). +- [ ] **T5.8** NFR5 perf benchmark: emit + dbInit with 0 vs 1 extensions; assert < 5% delta. Capture results in `docs/`. (satisfies: TC-24) +- [ ] **T5.9** Close-out: migrate finalised ADRs into `docs/architecture docs/adrs/`. Strip references to `projects/extension-contract-spaces/` across the repo (replace with canonical `docs/` links). Delete `projects/extension-contract-spaces/`. PR title or branch references TML-2397 so Linear's GitHub integration auto-completes the issue on merge. + +**Validation gate:** + +- `pnpm typecheck` +- `pnpm test:all` (workspace-wide because we delete public API surfaces) +- `pnpm lint:deps` +- `pnpm build` +- `rg 'ComponentDatabaseDependencies|ComponentDatabaseDependency|databaseDependencies' packages/ examples/` returns no matches + +## Open Items + +Carrying forward from `spec.md` § Open Questions: + +1. **`invariantId` namespacing convention.** Recommended default: prefix convention (`cipherstash:install-eql-v1`, `app:create-table-User-v1`, `cipherstash-codec:User.email@v1`). Alternative: structured records. Decide during M1 / M3 implementation; captured in `specs/framework-mechanism.spec.md` for M1 and `specs/cipherstash-migration.spec.md` for M3. +2. **Cipherstash project (TML-2373) integration path.** Whether the in-flight cipherstash project pivots to consume this mechanism, continues with its current band-aid until this lands, or pauses. Decision deferred to a separate conversation; not a plan-level question. + +Plan-derived items needing resolution during execution: + +3. ~~**Marker schema migration safety.**~~ Resolved in M1 R2 (commit `ed257f226`). Postgres path uses transactional DDL (concurrent processes serialise on the table lock); SQLite path runs inside `BEGIN EXCLUSIVE`. Three-state idempotency tests (fresh / legacy single-row / already-migrated) on both targets prove the migration is safe under repeated invocation. ADR 029's shadow-DB preflight is user-DDL-scoped and does not apply to control-DDL; sub-spec § 2 amended accordingly in decision 5. +4. **Pre-existing `pnpm test:integration` flake (out of project scope).** Surfaced during M1 R1 — `pnpm test:integration` exhibits ~12-14 unrelated failures (vitest pool isolation / `mongodb-memory-server` / port-pool resource contention with 109 parallel test files). Not a regression caused by this project's changes. Should be filed as a separate ticket against the integration-test runner; this project will continue to interpret pre-existing flakes as pass-through during validation gates. + +Resolved during plan finalisation: + +- ~~**arktype-json's contract space shape.**~~ Spike (T4.4) confirmed: arktype-json ships no `databaseDependencies` and needs no DB scaffolding (jsonb is built-in). Dropped from scope. +- ~~**Synthetic test extension package location.**~~ Initially locked as a private workspace package at `packages/3-extensions/test-contract-space/`. **Revisited under M1-cleanup F1** (post-implementation design review): the package shape implied "real extension" with no external consumers to justify it. Relocated under T-cleanup.1 (commit `db33795e3`) to `test/integration/test/contract-space-fixture/` — fixture-shaped, no `package.json`, hosted by `@prisma-next/integration-tests`. +- ~~**Linear project elevation.**~~ Decision: keep TML-2397 as a single tracking issue (no Linear project / per-deliverable issues). +- ~~**Sub-spec timing.**~~ Decision: drafted now (alongside this plan). See `specs/framework-mechanism.spec.md` and `specs/cipherstash-migration.spec.md`. +- ~~**Reviewer assignment.**~~ William Madden (self-review on architectural decisions across planner / runner / verifier). diff --git a/projects/extension-contract-spaces/spec.md b/projects/extension-contract-spaces/spec.md new file mode 100644 index 0000000000..dc59e39db2 --- /dev/null +++ b/projects/extension-contract-spaces/spec.md @@ -0,0 +1,363 @@ +# Summary + +Extensions and other in-tree contract authors (monorepo packages) currently have no honest way to contribute schema objects to a Prisma Next application's database; they install SQL via a side-channel (`databaseDependencies.init`) and the resulting schema goes untracked, which causes `dbInit`'s strict verifier to reject those objects as extras. This project introduces **contract spaces** — disjoint `(contract.json, migration-graph)` units that the framework treats uniformly, with the live database as the integration point — so extensions become first-class schema contributors using the same planner, runner, and migration shape as application authoring. As part of the same project, the in-tree workspace extension that uses `databaseDependencies.init` (pgvector) is migrated to a contract space and the cipherstash extension is authored fresh on the new mechanism; the `databaseDependencies` mechanism is then removed so the framework has a single mechanism for schema-contributing extensions after this project lands. + +# Context + +## At a glance + +A Prisma Next application today owns exactly one contract: the user's. Extensions live alongside it but contribute *only* via the `databaseDependencies.init` hook — a runtime SQL escape hatch that runs during `dbInit` and is invisible to every other part of the system (planner, verifier, types). Anything an extension installs in the database that the verifier can see therefore looks like an "extra." That is the immediate cause of the cipherstash blocker, but the underlying gap is broader: there is no honest seam through which a non-application party can declare "I own these persistence structures; manage them with the same machinery you manage the application's." + +The settled design promotes **contract spaces** to a first-class concept. Each space is a unit of `(contract.json, migration-graph)`. A single application's database is the **integration point** for all spaces it depends on. The framework runs the same planner per space, the same runner per space, and the same migration shape per space. The marker table grows one row per space; the schema gains a `space` column whose value identifies the row's owner. Aggregation across spaces happens in memory, only at the boundaries that strictly need it (verifier, typed DSL emission). + +``` +Application's DB +├── prisma_contract.marker +│ ├── (space=app) applied-hash, applied-invariants +│ └── (space=cipherstash) applied-hash, applied-invariants +├── (user tables, owned by app space) +└── (eql_v2_* tables / types, owned by cipherstash space) +``` + +Extensions own one space per extension package. Codecs (referenced by every column via `codecId`) gain a plan-time lifecycle hook, fired on field-added/dropped/altered events, that emits migration ops captured into the *consuming application's* migration JSON. Schema-driven extension behaviour (e.g. `addSearchConfig` for each searchable encrypted column) flows through codec hooks; static extension scaffolding (the EQL bundle, the `eql_v2_configuration` table) flows through the extension's own contract space. + +The user's `migrations/` directory grows a subdirectory per loaded extension space; app-space migrations stay at the root. Each extension space's subdirectory carries the extension's **current contract pinned on disk** alongside the migrations themselves — the user's repo is a complete, WYSIWYG record of every space the database depends on: + +``` +migrations/ +├── 20260507T1530_add_user/ ← app-space, flat at root +├── 20260507T1545_add_post/ ← app-space +├── cipherstash/ +│ ├── contract.json ← cipherstash-space CURRENT contract (pinned) +│ ├── contract.d.ts ← cipherstash-space CURRENT typings (pinned) +│ ├── refs/ +│ │ └── head.json ← cipherstash-space head ref (pinned) +│ ├── 20250101T0000_install_eql_bundle/ ← cipherstash-space, name preserved +│ └── 20250215T1000_add_config_column/ +└── pgvector/ + ├── contract.json + ├── contract.d.ts + ├── refs/ + │ └── head.json + └── 20240601T0000_install_vector/ +``` + +App-space's current `contract.json` continues to live at the project root (today's convention preserved); extension-space contracts live under `migrations//`. The asymmetry is deliberate: app-space is the user's authoring surface (its contract sits next to the PSL/TS schema), whereas extension-space contracts are *pinned mirrors* of state owned by the extension package. + +## Problem + +The cipherstash extension installs ~5,750 lines of SQL into the user's database via `databaseDependencies.init`: 1 schema, 1 table (`eql_v2_configuration`), 7 composite types (including the `eql_v2_encrypted` domain that user `Encrypted` columns reference via `nativeType`), 3 domains, 169 functions, 46 operators, 4 casts, 9 operator classes/families, 1 enum (`eql_v2_configuration_state`). None of these objects are described in the contract. `dbInit`'s strict verifier walks the live database and rejects every one of them as an unexpected extra column / extra table / extra type. Two band-aid solutions surfaced during cipherstash project execution and were both rejected: + +1. **Globally relax `strictVerification`** in the `db init` runner. Changes the user-facing semantics of the CLI (suddenly `dbInit` ignores extras the user *did* introduce, e.g. by hand-editing the database). Quietly weakens a safety property users may rely on. +2. **Per-extension allowlist on `ComponentDatabaseDependency.installs.{tables,schemas}`**. The framework keeps strict mode for the user's surfaces but turns a blind eye to declared extension scaffolding. Architecturally a band-aid: extensions declare *what tables they install* but not *what shape those tables have*, so the verifier can only check existence, not structure. The user can still drift the extension's tables and dbInit won't catch it. + +Both options paper over the underlying gap: extensions are not first-class. The framework has a contract concept and a migration graph concept, but only one party (the user) can use them. Anything else that touches the database has to wedge itself in through `databaseDependencies.init` and live in the verifier's blind spot. Cipherstash is the example forcing the conversation; monorepos with multiple internal contract owners exhibit the same shape. + +The `databaseDependencies.init` hook itself is not the problem — it is a reasonable runtime escape valve. The problem is that there is no *upstream* seam at the contract layer through which an extension can say "I own these structures, plan and verify them as you would mine." This project introduces that seam, then removes `databaseDependencies.init` so there is one mechanism, not two. + +## Approach + +### Contract spaces + +A **contract space** is a `(contract.json, migration-graph, head-ref)` unit. Every party that contributes persistence structures to a database owns exactly one space. The application owns one. Each installed extension owns one. A monorepo aggregator package can compose multiple internal-package spaces with its own. + +The framework operates per space: + +- **Planner**: runs per space. Diffs the prior contract for that space against the new contract for that space; produces a migration JSON for that space. +- **Runner**: applies each space's migrations against the live database. Each space's marker-table row tracks its own applied hash + applied invariants. +- **Verifier**: runs per space, but constructs an in-memory aggregate union of all spaces before checking expected schema against live schema. The aggregate exists only at verification time; it is never serialized. + +Spaces are disjoint at the artefact level (separate `contract.json`, separate migration graph) and integrate only via the live database. There is no "merged contract" data structure on disk; the database itself is what guarantees that all spaces are simultaneously satisfied. Each space's contract is materialised on disk in the user's repo (app-space at the project root; extension-space under `migrations//`), so the repo alone fully describes every space the database depends on — no `node_modules` access is required to read, hash, review, or verify the expected schema. + +A user's `prisma-next.config.ts` declares an extension by importing its descriptor module and adding it to `extensionPacks`. The framework consumes the descriptor at composition time — there is no `node_modules` filesystem-walking, which means the design works under Yarn PnP, Deno, pnpm symlinks, and bundlers without exception cases. An extension descriptor exposes its contract space as in-memory JSON values via the module dependency graph: + +> _Illustrative — exact field names and types are up to the implementer:_ +> +> ```ts +> interface ExtensionDescriptor { +> // existing fields (codecs, query operations, target/family, …) +> contractSpace: { +> contractJson: ContractJson; +> migrations: ReadonlyArray; // each carries manifest + ops + contract.json snapshot +> headRef: { hash: string; invariants: readonly string[] }; +> }; +> } +> ``` + +Extension authors use the same emit pipeline as application authors: PSL or TS schema → emitter → `contract.json` + per-migration directories. The descriptor module wires up those JSON artifacts via `import` declarations so they flow through the bundler / module resolver of the consuming application without filesystem assumptions. + +**Pinned per-space artefacts on disk.** The descriptor is the *extension's view of itself* (its current contract, migration graph, and head ref, in-memory at authoring time). The user's repo holds a *pinned mirror* of that view: for each loaded extension space, the framework writes `migrations//contract.json`, `migrations//contract.d.ts`, and `migrations//refs/head.json` into the user's repo on every emit. Bumping an extension shows up in the user's PR diff as: (a) updated pinned `contract.json` / `contract.d.ts` / `refs/head.json`, plus (b) one or more new migration directories under `migrations//`. Both halves are reviewable, hashable, and version-controlled. The mental model is "vendored extension contract + lockfile-equivalent head ref" — the user's repo never delegates "what schema does my database need" to a `node_modules` import at apply or verify time. + +Drift detection follows naturally: at every `migrate` invocation, the framework compares the descriptor's current `contractJson` against the on-disk pinned version; mismatch means "you've bumped this extension in `node_modules` but haven't run `migrate` yet" and prompts for emit. + +### Marker table + +The marker table grows from one row to N rows: one per `(space, applied-hash, applied-invariants)` triple. The schema gains a `space` column (text, not null) whose value is the space identifier (e.g. `app`, `cipherstash`, `pgvector`); the primary key changes from `id` to `space`. Each space tracks its own progression independently. The runner updates a space's row only when migrations from that space apply. + +**Source of truth.** The composition declared in `prisma-next.config.ts` (specifically, the `extensionPacks` list plus the always-present application space) is canonical. The set of marker rows must match this composition exactly. **Orphan marker rows** (rows for spaces no longer present in `extensionPacks`) are reported as errors with a clear remediation hint (manual cleanup of the orphan row). Extension removal is a v1 non-goal, but the verifier's behaviour on encountering this case is well-defined. + +**Lazy creation.** A space's marker row is created on the first successful apply of one of its migrations — whether triggered by `db init`, `db update`, or `migration apply`. Spaces declared in `extensionPacks` but never applied have no marker row yet, which the verifier handles by treating the space as needing initial application. + +### Codec-as-seam for schema-driven ops + +Some extension behaviour is *not* a function of the extension version but of the consuming application's schema. Cipherstash is the canonical example: when a user adds an `Encrypted` column with `searchable: true`, the database needs `SELECT eql_v2.add_search_config(table, column, …)` executed. That op is per-`(table, column)`, not per-cipherstash-version. + +Codecs already exist as first-class objects: every column in the contract names its codec via `codecId`. This project promotes codecs to also carry a **plan-time lifecycle hook**. The hook contract: + +- **Synchronous.** Hook is a pure function over IR; no async I/O at plan time. +- **Triggered events:** `'added'`, `'dropped'`, `'altered'`. `'altered'` fires when a field exists in both contracts and any field property has changed *except* `codecId` (codec-id-changed is a v1 non-goal — see Non-goals). +- **IR scope:** the hook receives the prior + new IR for *the table containing the changed field*, scoped to the application's contract space. No cross-space visibility at hook time. Codec authors who need version information put it in the returned ops' `invariantId` (e.g. `cipherstash-codec:User.email@v1`). +- **Return value:** `MigrationOp[]`, each carrying its own `invariantId`. Returned ops are inlined into the consuming application's migration JSON (app space). Codec-emitted ops are app-space-bound by API shape — the hook cannot return ops targeting other spaces. Cross-space *SQL writes* are still possible inside an op's body (e.g. `INSERT INTO eql_v2.eql_v2_configuration ...` — the database integrates regardless), but the migration op record is app-space. + +> _Illustrative hook signature:_ +> +> ```ts +> interface CodecMigrationHook { +> onFieldEvent( +> event: 'added' | 'dropped' | 'altered', +> ctx: { +> priorTable?: TableIR; +> newTable?: TableIR; +> priorField?: FieldIR; +> newField?: FieldIR; +> }, +> ): MigrationOp[]; +> } +> ``` + +The hook fires during emit (plan time), receives the table IR before and after the change, and returns migration ops. Each op carries its own `invariantId`. The codec implementation that runs is the one *active at plan time*; the resulting JSON pins that snapshot of the codec's behaviour. Apply-time replay just runs the captured ops. + +Codec-emitted ops land in the **application's** contract space, not in the extension's. The data invariant *"search-config registered for `User.email`"* is conceptually about application content. Cipherstash's contract space stays a pure function of cipherstash's package version; consuming-app activity never reaches into it. + +### IR vocabulary boundary + +The contract IR (used by the planner and verifier per space) admits anything a column or field can name as `nativeType`: + +- **In IR**: tables (with columns, primary keys, foreign keys, indexes, uniques), enums, composite types, domains. +- **Not in IR**: schemas, functions, operators, casts, operator classes/families, anything else not a column type. + +For the cipherstash extension's space, that means the `contract.json` carries: + +- `eql_v2_configuration` table. +- `eql_v2_configuration_state` enum. +- `eql_v2_encrypted` composite type. +- `eql_v2.bloom_filter`, `eql_v2.hmac_256`, `eql_v2.blake3` domains, plus the various `ore_*` composites. + +Total contribution to the user's contract: ~3-5 KB pretty-printed. The remaining ~5,750 lines of bundle SQL (functions, operators, casts, op classes, the `eql_v2` schema itself) live as the body of one migration op (`installEqlBundle`) inside cipherstash's migration graph. That op carries its own `invariantId` and is treated by the runner as an opaque DDL step. + +The same boundary applies to other extensions: pgvector's `vector` type is in its contract IR; the `CREATE EXTENSION vector` DDL is the body of one migration op. + +### Migration JSON shape and on-disk layout + +A single user emit produces one migration JSON directory per space whose contract changed in this emit, plus pinned per-space `contract.json` / `contract.d.ts` / `refs/head.json` files for every loaded extension space. Each migration directory is the ADR 197 shape (`{manifest, ops, contract.json snapshot}`); the framework writes everything into the user's repo using the **per-space subdirectory convention**: + +- App-space migrations live under `migrations//`. App-space's current `contract.json` lives at the project root (today's convention). +- Each loaded extension space's migrations live under `migrations///`. Each extension space's *current* `contract.json` (and its `contract.d.ts` and `refs/head.json`) lives at `migrations//`. + +For extension spaces, the framework reads the extension's `contractJson`, `migrations`, and `headRef` from the extension descriptor's in-memory values (loaded via the module dependency graph at authoring time) and **emits** them as JSON files into the user's `migrations//`. Byte-equivalence with the extension's own canonical form is guaranteed by the canonicalization rules already used for hashing — same data in, same JSON out, regardless of bundler / package-manager / runtime context. + +``` +migrations/ +├── 20260507T1530_add_user/ +│ ├── manifest.json +│ ├── ops.json ← carries app structural ops + codec-emitted ops +│ └── contract.json ← app-space contract snapshot at the time of emit +├── cipherstash/ +│ ├── contract.json ← cipherstash-space CURRENT contract (pinned) +│ ├── contract.d.ts ← cipherstash-space CURRENT typings (pinned) +│ ├── refs/ +│ │ └── head.json ← cipherstash-space head ref (pinned) +│ ├── 20250101T0000_install_eql_bundle/ +│ │ ├── manifest.json +│ │ ├── ops.json ← carries the EQL bundle SQL as the body of one op +│ │ └── contract.json ← cipherstash-space contract snapshot +│ └── 20250215T1000_add_config_column/ +│ └── … +└── pgvector/ + ├── contract.json + ├── contract.d.ts + ├── refs/ + │ └── head.json + └── 20240601T0000_install_vector/ + └── … +``` + +Each migration's `ops.json` is space-scoped: it contains only ops belonging to that space. **Codec-emitted ops belong to app-space** and are inlined into the relevant app-space migration's `ops.json`, alongside the user's own structural ops: + +> _Illustrative — final shape is up to the implementer:_ +> +> ```jsonc +> // app-space migration: 20260507T1530_add_user/ops.json +> { +> "from": "", +> "to": "", +> "operations": [ +> // From user authoring (invariantId: app:create-table-User-v1) +> { "invariantId": "app:create-table-User-v1", "execute": ["CREATE TABLE \"User\" (...)"] }, +> // From cipherstash codec hook on User.email (invariantId: cipherstash-codec:User.email@v1) +> { "invariantId": "cipherstash-codec:User.email@v1", "execute": ["SELECT eql_v2.add_search_config(...)"] } +> ] +> } +> ``` +> +> ```jsonc +> // cipherstash-space migration: cipherstash/20250101T0000_install_eql_bundle/ops.json +> { +> "from": null, +> "to": "", +> "operations": [ +> { "invariantId": "cipherstash:install-eql-v1", "execute": ["...EQL bundle SQL..."] }, +> { "invariantId": "cipherstash:create-eql_v2_configuration-v1", "execute": ["CREATE TABLE eql_v2_configuration (...)"] } +> ] +> } +> ``` + +WYSIWYG-the-runnable is preserved per space, and now extends to verification: every consumer of "expected schema" — runner, verifier, `dbInit`, `db update` — reads only the JSON files under the user's repo (root-level app-space `contract.json` + per-space `migrations//contract.json` + migration directories). The extension descriptor module is consumed only at **authoring time** (`migration plan`, run by a dev locally) — to know the extension's current state for diffing against the pinned on-disk version. At apply time and at verify time in CD, no extension package import is required: the user's repo alone is sufficient. + +### Apply-time atomicity and ordering + +A user emit may produce migrations in multiple spaces (e.g. user bumped cipherstash and refactored their own tables in the same emit). All migrations across all changed spaces apply in a **single transaction**. This matches the existing transaction control surface and makes partial-failure recovery moot: either every space advances or none do. + +**Cross-space ordering** follows the implicit dependency direction (app depends on extensions): all extension-space migrations apply first, app-space migrations apply second. Within a space, migrations apply in the order returned by the per-space planner (graph order). This convention is sufficient for v1 because cross-extension dependencies are a non-goal; introducing a formal cross-space dependency graph is deferred until needed. + +### Verification flow + +`dbInit` (and any other verifier path) constructs an in-memory aggregate of all loaded contract spaces by reading the user's repo: + +1. Read the application's `contract.json` from the project root. +2. For each `extensionPacks` entry, read the pinned `migrations//contract.json` from the user's repo. The descriptor module is *not* imported during verification — pinned files are authoritative. +3. Aggregate to a single in-memory `expected schema` representation. Aggregation is deterministic and order-independent across `extensionPacks` declaration order (NFR6); v1 implementation: alphabetical sort by space identifier before aggregation. +4. Compare against the live database; reject if any space's marker-row hash mismatches its expected hash. +5. Reject if any marker row exists for a space not present in `extensionPacks` (orphan marker rows; see Marker table). +6. Reject if any `extensionPacks` entry has no pinned `migrations//contract.json` on disk (the user has declared an extension but never run `migrate`); remediation: run `prisma-next migrate`. +7. Reject if any `migrations//` directory exists on disk for a space not present in `extensionPacks` (orphan pinned directory); remediation: remove the directory or re-add the extension to `extensionPacks`. + +The single canonical "merged hash" question goes away: each space's hash is checked individually against the marker-table row for that space. Strict mode is preserved per space; the IR vocabulary boundary (which objects are verifiable structurally) is the same as today, just applied across all loaded spaces' IRs. + +### `db init` / `db update` + +`db init` (greenfield) and `db update` (advance to head) become **per-space** applications of ADR 208's invariant-aware path-finding primitive: + +- For each loaded space (app + each extension), look up the space's current target ref → `(hash, invariants)` from the user's repo. The application's target ref comes from the user's emitted contract; an extension's target ref comes from the pinned `migrations//refs/head.json`. +- Compute `effectiveRequired = ref.invariants − marker.invariants` for each space. +- Run `findPathWithDecision(currentMarkerHash, ref.hash, effectiveRequired)` per space. +- Concatenate the returned per-space paths in the cross-space ordering convention (extensions first, app-space second). Apply in a single transaction. + +For app-space, the existing `db init` synthetic-edge model is preserved: when no migration exists on disk for app-space, the framework synthesizes a `∅ → head` edge derived directly from the contract IR (today's behaviour). For extension-space, synthesis from the contract alone is impossible — the IR vocabulary boundary excludes the bundle-SQL bodies — so the runner walks the extension's migration graph as emitted into the user's repo. + +Like the verifier, `db init` / `db update` runtime paths consult only the user's repo (pinned head refs, pinned migration directories). Descriptor access is required only at authoring time (`migration plan`). + +This gives extension authors the same authoring expressivity as application authors: multiple paths, multiple baselines, squash, and invariant-aware routing all extend to extension-space without special-casing. The per-space planner is *exactly* `findPathWithDecision`; no new graph algorithm is needed. + +### What this design does not do + +- It does not merge `contract.json` files into a single combined contract. Each space's pinned `contract.json` stays its own file (app-space at the project root; extension-space at `migrations//contract.json`). The verifier aggregates them in memory only, never on disk. +- It does not introduce cross-space dependencies as a first-class concept. Conventions and the single-transaction property cover the v1 cases. +- It does not change the authoring surface of `prisma-next.config.ts` beyond what `extensionPacks` already provides; an extension being listed there continues to mean "use this extension" — what changes is the framework's interpretation of that listing. +- It does not introduce a new authoring tool for extension authors. They use the same emit pipeline as application authors against their extension's own PSL/TS schema. +- It does not require the user to hand-edit pinned per-space artefacts. The framework owns those files and overwrites them on every `migrate`. The user's role is to declare extensions in `extensionPacks` and to run `migrate` after upgrading; the pinned files are framework-managed records, not authoring surfaces. + +# Requirements + +## Functional Requirements + +- **FR1.** Extensions ship a contract space (a `contract.json` + a migration graph + a head ref) exposed as in-memory JSON values via the extension descriptor module. The descriptor module imports the JSON artifacts so they flow through the consuming application's bundler / module resolver — no `node_modules` filesystem walking from the framework. +- **FR2.** The framework loads each `extensionPacks` entry's descriptor only at **authoring time** (during `migration plan` / `migrate`). At apply time and verify time, the framework reads the user's repo only — no descriptor import is required. +- **FR3.** The marker table tracks per-space applied state: one row per `(space-identifier, applied-content-hash, applied-invariants)`. The marker schema gains a `space` column (text, not null) and primary keys by `space`. +- **FR4.** The migration planner runs per space, producing one migration JSON directory per space whose contract changed in this emit. Extension-space migration directories are emitted from the extension descriptor's in-memory values into the user's `migrations///`. App-space migration directories are written at `migrations//`. +- **FR5.** The migration runner applies each space's migrations in order, updating the corresponding marker-table row. Cross-space ordering: all extension-space migrations apply first, app-space migrations apply second. All applied migrations across all changed spaces in a single emit are committed in a single transaction. +- **FR6.** The verifier constructs an in-memory aggregate of all loaded spaces' contracts by reading the user's repo (app-space `contract.json` at the project root + each loaded extension's pinned `migrations//contract.json`). It then checks the live database against the aggregate. Each space's marker-row hash is checked against its pinned contract's content hash; strict mode rejects mismatches per space. The verifier rejects: (a) marker rows for spaces not present in `extensionPacks` (orphan markers), (b) `extensionPacks` entries with no pinned `migrations//contract.json` on disk (declared-but-unmigrated), and (c) `migrations//` directories on disk for spaces not present in `extensionPacks` (orphan pinned directories). Each rejection carries a clear remediation hint. +- **FR7.** Codecs may declare a plan-time lifecycle hook fired on field-added / field-dropped / field-altered events (where 'altered' = any field property changed except `codecId`). The hook is synchronous; receives the prior + new IR for the table containing the changed field (app-space scope only); returns `MigrationOp[]`, each with its own `invariantId`. Returned ops are inlined into the consuming application's migration JSON — the hook cannot return ops targeting other spaces. +- **FR8.** Codec-emitted migration ops are captured into the consuming application's migration JSON (application space), not into the extension's space. The application's emitter runs the hook for each event in the application contract diff. +- **FR9.** The contract IR vocabulary admits anything a column / field can name as `nativeType`: tables, enums, composite types, domains. Persistence structures not in this set (schemas, functions, operators, casts, op classes/families) are carried inside migration ops as opaque steps with `invariantId`s; they are not modelled in the IR. +- **FR10.** Per-space artefacts are self-contained at apply time and verify time: the runner and the verifier read only the JSON files under the user's repo (root-level app-space `contract.json` + each loaded extension's pinned `migrations//contract.json`, `contract.d.ts`, `refs/head.json`, and `/` directories). No extension descriptor is imported during apply or verify. Extension descriptors are consumed only at authoring time (`migration plan` / `migrate`). +- **FR11.** Extension `invariantId`s, once published in a release, are immutable. Renaming or removing a published `invariantId` is a breaking change for downstream consumers. +- **FR12.** The aggregate construction in FR6 is in-memory only; no merged contract is persisted on disk. Each space's `contract.json` remains the single source of truth for that space. +- **FR13.** The existing `databaseDependencies` mechanism is removed at the end of this project. Concretely: pgvector (the only workspace consumer of `databaseDependencies` — confirmed by the T4.4 spike) is migrated to a contract space; cipherstash is authored fresh on the new mechanism (cipherstash is not a workspace package today). arktype-json is out of scope: confirmed by the spike to ship no `databaseDependencies` and require no DB scaffolding (jsonb is a built-in Postgres type), so it does not need a contract space. The framework has a single mechanism for schema-contributing extensions after this project lands. +- **FR14.** `db init` and `db update` are per-space applications of `findPathWithDecision(currentMarker, ref.hash, ref.invariants − marker.invariants)`. Per-space results are concatenated using the cross-space ordering convention (extensions first, app-space second) and applied in a single transaction. App-space's existing synthetic-edge behaviour for greenfield is preserved when no app-space migration is on disk; extension-space always walks the migration graph. Head refs are read from the user's repo: app-space ref from the project-root contract, extension-space refs from the pinned `migrations//refs/head.json`. +- **FR15.** Extensions ship at least one ref (the head ref) declaring their current target hash and required invariants. Multiple refs are permitted with the same semantics as application-space refs. +- **FR16.** User-repo on-disk layout: app-space's current `contract.json` lives at the project root (today's convention). App-space migrations live at `migrations//`. Each loaded extension's pinned current `contract.json`, `contract.d.ts`, and `refs/head.json` live at `migrations//`; that extension's migrations live at `migrations///`. Discovery is convention-based: no manifest or registry file is required. +- **FR17.** On every `migrate` invocation, the framework writes (or overwrites) each loaded extension space's pinned `contract.json`, `contract.d.ts`, and `refs/head.json` from the descriptor's current values, alongside any new migration directories. Bumping an extension produces a reviewable PR diff that includes the pinned contract change and any new migration directories. Drift detection: at every `migrate`, the framework compares the descriptor's current `contractJson` against the on-disk pinned version and surfaces mismatches as "extension bumped — run `migrate` to materialise the change." + +## Non-Functional Requirements + +- **NFR1.** No user-facing semantic change to `dbInit` strict mode. The `strictVerification: false` workaround introduced under cipherstash project execution is reverted as part of this work. +- **NFR2.** The user's repo is **WYSIWYG-complete**: every artefact required to predict, hash, verify, or apply the database's expected schema lives on disk in the user's repo, version-controlled and reviewable. This applies per space — the app-space `contract.json` at the project root and each extension space's pinned `migrations//contract.json`, `contract.d.ts`, `refs/head.json`, and migration directories. A reader of the repo (or a CI pipeline, or an auditor) can answer "what does this database need to look like" without importing any extension package. +- **NFR3.** The framework's planner / runner / verifier consume extension descriptors only at **authoring time** (during `migration plan` / `migrate`), to emit pinned per-space contracts + migrations into the user's repo. At apply time and verify time (CD), the framework reads only the user's repo files — no descriptor-driven contract or migration data is required. (Application code at query-execution time may continue to import extension packages for codec runtime behaviour; that is unchanged and orthogonal to schema verification.) +- **NFR4.** The cipherstash team's vendored EQL bundle SQL remains valid as-is. The bundle SQL becomes the body of one migration op in cipherstash's contract space; no fork or split of the bundle is required of the cipherstash team. +- **NFR5.** Performance: extension-space planning and verifier aggregation must not measurably regress emit-time or `dbInit` performance for applications with no extensions. Target: < 5% wall-clock overhead on a representative no-extension emit + dbInit. +- **NFR6.** The aggregation pass for verification is deterministic and order-independent across `extensionPacks` declaration order: two applications with the same set of installed extensions produce the same aggregate regardless of declaration order. v1 implementation: sort by space identifier alphabetically before aggregating. + +## Non-goals + +- **Extension removal semantics.** What happens when a user removes an extension from `extensionPacks` while their schema still depends on extension-installed types (e.g. `Encrypted` columns referencing `eql_v2_encrypted`). Defer to a follow-up; until then, removal is unsupported and the verifier reports orphan marker rows as errors. +- **Codec-id-changed lifecycle event.** When a user upgrades an extension in a way that changes a codec ID (`cipherstash/string@1` → `@2`), the codec needs a way to emit a "rotate" migration op. Cleanly extends the existing event vocabulary; deferred until needed. +- **Multi-extension interactions.** Two extensions claiming the same table or type name, ordering across extensions, dependency between extensions. Convention-based ordering only for v1. v1 rule for type-name collisions across spaces: the verifier errors with a clear collision report. +- **Formal cross-space dependency graph.** Convention ordering (extensions first, app-space second; scaffolding → structural → codec inside a single migration) is sufficient given the single-transaction property. +- **Replacing or restructuring the application's existing contract IR.** The application's `contract.json` shape is unchanged; what changes is the framework treating multiple such files as siblings rather than the only one. +- **Authoring tools for extension authors.** They will use Prisma Next's existing emit pipeline against their extension's own PSL/TS schema. No new tooling is needed. + +# Acceptance Criteria + +- [ ] **AC1** (covers FR2, FR6, NFR1). A fresh Postgres database has the cipherstash extension installed via the new mechanism. `dbInit` runs in strict mode (no `strictVerification: false` flag, no per-extension allowlist) and succeeds. The verifier sees `eql_v2_configuration`, `eql_v2_configuration_state`, `eql_v2_encrypted`, the various `ore_*` composites, and the domains, and recognises them as expected (because cipherstash's contract space declared them). An additional unexpected column added by hand to `eql_v2_configuration` causes `dbInit` to fail with a strict-mode error, proving strict mode is preserved per space. +- [ ] **AC2** (covers FR1, FR4, FR5, FR7, FR8, FR10, FR16, FR17). A user adds `cipherstash` to `extensionPacks` and adds an `Encrypted` column with `searchable: true` to a fresh `User` table in their PSL. `prisma-next migrate` produces: + - One app-space migration directory at `migrations/_add_user/` containing the user's `CREATE TABLE` op and a codec-emitted `add_search_config` op (with invariantId namespaced `cipherstash-codec:*`), both in the same `ops.json`. + - Pinned cipherstash artefacts: `migrations/cipherstash/contract.json`, `migrations/cipherstash/contract.d.ts`, `migrations/cipherstash/refs/head.json` — byte-equivalent to the descriptor's current values. + - One or more cipherstash-space migration directories at `migrations/cipherstash//` containing cipherstash scaffolding ops (with invariantId namespaced `cipherstash:*`). + + `prisma-next db apply` runs both migrations in a single transaction (extension-space first); the marker table afterwards has two rows (`app`, `cipherstash`), each with the expected hash. +- [ ] **AC3** (covers FR4, FR11, FR15, FR17). The cipherstash team publishes a new package version that adds one new migration to its shipped graph (e.g. adding a column to `eql_v2_configuration`) and bumps its `headRef`. A user upgrades the package and runs `prisma-next migrate`. The pinned `migrations/cipherstash/contract.json`, `contract.d.ts`, and `refs/head.json` are updated in place; one new cipherstash-space migration directory is created containing only the new op. `db apply` advances the cipherstash space's marker row. +- [ ] **AC4** (covers FR1-FR6). A monorepo with two internal packages each declaring its own contract space, plus an aggregating package that depends on both, builds successfully, emits per-space migrations on changes, and applies them. The mechanism for monorepo composition is the same as for extensions; no monorepo-specific framework code is required. +- [ ] **AC5** (covers FR3, FR6, NFR6). After applying any combination of multi-space migrations, an integration test reads the marker table and asserts (a) one row per loaded space, (b) each row's hash equals the corresponding `contract.json`'s content hash, (c) the row set is the same regardless of `extensionPacks` declaration order. +- [ ] **AC6** (covers NFR2, NFR3, FR10). Both the runner (apply path) and the verifier (`dbInit` / `db update` verify path) operate without importing any extension descriptor module — they read only the user's repo (root-level app-space `contract.json` + per-space `migrations//contract.json` and migration directories). Authoring (`migration plan` / `migrate`) is the only flow that needs descriptor access. +- [ ] **AC7** (covers NFR4, FR13). The cipherstash extension's existing vendored EQL bundle SQL is the body of exactly one migration op in cipherstash's contract space (the `installEqlBundle` op). Bundle content is unchanged from what is shipped today. +- [ ] **AC8** (covers FR9). The contract IR includes `eql_v2_encrypted` (composite type), `eql_v2_configuration_state` (enum), and the `eql_v2` domains used as column types. The contract IR does **not** include the EQL bundle's functions, operators, casts, or operator classes/families — those live inside the body of the `installEqlBundle` migration op only. +- [ ] **AC9** (covers FR8, codec ownership of schema-driven ops). When a `searchable: true` `Encrypted` column is dropped from a user table, the codec lifecycle hook emits the corresponding `remove_search_config` op into the application-space migration. No change to cipherstash's contract space's marker row results from this. +- [ ] **AC10** (covers FR13). The pgvector extension is migrated to a contract space. `pgvector` declares the `vector` type in its `contract.json`; its initial migration installs the `vector` extension as the body of one op. A user adds pgvector to `extensionPacks`, adds a column with `nativeType: 'vector(N)'` to their schema, runs `prisma-next migrate` and `db apply`. The marker table has rows for `app` and `pgvector`. `dbInit` against the resulting database succeeds in strict mode. +- [ ] **AC11** (covers FR13). After cipherstash and pgvector are migrated, the framework removes `ComponentDatabaseDependencies` (and related types / re-exports) from the SQL family. A repo-wide search for `ComponentDatabaseDependencies`, `ComponentDatabaseDependency`, and `databaseDependencies` returns no consumer matches. arktype-json is unaffected (it never used `databaseDependencies`). +- [ ] **AC12** (covers FR14). On a fresh database with cipherstash in `extensionPacks`, `db init` walks cipherstash-space's migration graph (applying the bundle install + scaffolding) and synthesizes the app-space delta edge from the user's contract, applying both in a single transaction. Marker rows for both spaces are created with the expected hashes. +- [ ] **AC13** (covers FR6, orphan marker handling). A user removes an extension from `extensionPacks` while a marker row for that extension still exists in the database. `dbInit` fails with a clear error identifying the orphan row and the recommended remediation (manual cleanup). +- [ ] **AC14** (covers FR17, NFR2). A user bumps cipherstash from `vX` to `vY` (descriptor's `contractJson` content changes) and runs `prisma-next migrate`. The user's PR diff includes: (a) updated `migrations/cipherstash/contract.json`, `contract.d.ts`, `refs/head.json`; (b) one new migration directory under `migrations/cipherstash//`. No file outside `migrations/` (and the project root contract for any incidental app-space changes) is touched. +- [ ] **AC15** (covers NFR2, NFR3, FR2, FR10). The verifier and the runner are exercised in a context where extension descriptor modules are *not* importable (e.g. `node_modules` for those extensions deleted prior to the test). `dbInit` and `db apply` succeed, reading per-space contracts and migrations from the user's repo only. (`migrate` / `migration plan` is *not* required to work in this context — it needs the descriptor.) +- [ ] **AC16** (covers FR6, declared-but-unmigrated). A user adds an extension to `extensionPacks` but never runs `migrate` (no `migrations//` directory exists yet). `dbInit` fails with a clear error: "extension `` is declared but has not been emitted; run `prisma-next migrate`." Conversely, a `migrations//` directory present on disk for an extension *not* in `extensionPacks` causes `dbInit` to fail with an orphan-pinned-directory error and remediation hint. + +# Other Considerations + +## Security + +The design does not change the threat model. Extensions are still trusted code (they execute SQL in the user's database). Adding the contract-space mechanism does not give extensions any additional capability they did not have via `databaseDependencies.init`; it gives the framework more visibility into what extensions actually do. That is a net security improvement: the verifier now catches drift in extension-installed objects in strict mode, where today it cannot see them at all. + +## Cost + +Compute and storage costs are negligible. The marker table grows from one row to N rows where N is the number of contract spaces — typically small (1 application + a handful of extensions). Migration JSON sizes grow per emit by the inlined extension-op bodies (cipherstash's bundle adds ~150 KB of SQL string content to migration JSONs that introduce or upgrade cipherstash; one-shot, not recurring). + +## Observability + +The marker table's per-space rows give operators a direct view of which contract space is at which applied hash. No additional metrics are required beyond what the existing migration system emits, except that all such metrics should be tagged with the space they relate to. + +## Data Protection + +No PII or sensitive data crosses any new boundary. Extension contract content is shipped publicly in the extension's package; user data is in the database where it always was. + +## Analytics + +Not applicable. + +# References + +- ADR 197 — Migration packages snapshot their own contract. +- ADR 208 — Invariant-aware migration routing (provides the per-space `findPathWithDecision` primitive used by `db init` / `db update` / `migration apply`). +- ADR 154 — Component-owned database dependencies (partially superseded by this work for schema-contributing extensions; `databaseDependencies.init` is removed at end of project). +- ADR 021 — Contract Marker Storage (marker schema gains a `space` column under this work). +- Cipherstash project handover: `projects/cipherstash-integration/project-1/HANDOVER.md` (transient; will be removed at cipherstash project close-out — see Linear ticket for canonical follow-up). +- Cipherstash team-facing design doc: `projects/cipherstash-integration/project-1/cipherstash-team-design.md`. +- Cipherstash team open questions: `projects/cipherstash-integration/project-1/cipherstash-team-questions.md`. +- Linear: TML-2397 (this project), TML-2373 (cipherstash project parent — the immediate consumer / blocker), TML-2376 and TML-2388 (filed during cipherstash project execution; in the same neighbourhood, independent). +- The original `databaseDependencies.init` hook lives in the framework's component descriptor types — implementer should locate during pre-implementation reconnaissance. + +# Open Questions + +These are residual decisions left for the implementer or for resolution before / during implementation. None affect the architectural shape; all are degrees of freedom inside the design above. + +1. **Namespacing of `invariantId`s.** Recommended default: prefix convention (`cipherstash:install-eql-v1`, `app:create-table-User-v1`, `cipherstash-codec:User.email@v1`). Alternative: structured records `{source: "cipherstash@*", id: "install-eql-v1"}` carried alongside the ID. The prefix convention is simpler and sufficient for v1; structured records would only be needed if extensions need to be renamed in user repos, which is out of scope. +2. **Cipherstash project (TML-2373) integration path.** Whether the in-flight cipherstash project pivots to consume this mechanism, continues with its current band-aid until this lands, or pauses. Decision deferred to a separate conversation; not a spec-level question. diff --git a/projects/extension-contract-spaces/specs/cipherstash-migration.spec.md b/projects/extension-contract-spaces/specs/cipherstash-migration.spec.md new file mode 100644 index 0000000000..dd6b2c5289 --- /dev/null +++ b/projects/extension-contract-spaces/specs/cipherstash-migration.spec.md @@ -0,0 +1,279 @@ +# Summary + +Implementation contract for **authoring the cipherstash extension as a contract space** on the new framework mechanism. Drives [Milestone M3](../plan.md#milestones) of the project plan. This is *new authoring* — cipherstash is not a workspace package today (it lives as in-flight design under `projects/cipherstash-integration/`); M3 produces the first actual `packages/3-extensions/cipherstash/` package, built directly on top of contract spaces. Reads on top of [the project spec](../spec.md) and the [framework-mechanism sub-spec](./framework-mechanism.spec.md). + +**Parent project spec:** [`projects/extension-contract-spaces/spec.md`](../spec.md). + +# Description + +The cipherstash extension is the *driving consumer* of the contract-space mechanism. Its scope: + +- A `~3-5 KB` (pretty-printed) contract describing the typed objects EQL exposes (one composite type, one enum, three domains, several `ore_*` composites, one configuration table). +- A baseline migration that installs the EQL bundle SQL (the existing vendored `~5,750 lines` produced by the cipherstash team) plus the configuration table and types, all carrying `cipherstash:*` invariantIds. +- A `cipherstash:string@1` codec implementing the lifecycle hook from the framework-mechanism sub-spec, emitting `add_search_config` / `remove_search_config` ops on field added / dropped events for `searchable: true` `Encrypted` columns. +- A descriptor that exposes `contractSpace: { contractJson, migrations, headRef }` per the framework-mechanism sub-spec. + +Because cipherstash never used `databaseDependencies` in shipped code (the spike confirmed it's all in-flight design), there's nothing to *migrate from*. The work is greenfield authoring against the new mechanism. The existing in-flight artefacts under `projects/cipherstash-integration/` are reference material — design intent for the EQL bundle, the `eql_v2_configuration` table, the codec hook semantics — but not source-of-truth code. + +# Requirements + +## 1. Package layout + +Location: `packages/3-extensions/cipherstash/`. Mirrors `packages/3-extensions/pgvector/`'s structure: + +``` +packages/3-extensions/cipherstash/ +├── package.json # @prisma-next/extension-cipherstash, public +├── tsdown.config.ts +├── tsconfig.json +├── tsconfig.prod.json +├── vitest.config.ts +├── biome.jsonc +├── README.md +├── src/ +│ ├── core/ +│ │ ├── cipherstash-codec.ts # codec definition + lifecycle hook impl +│ │ ├── eql-bundle.sql # vendored EQL bundle (byte-for-byte) +│ │ ├── pack-meta.ts +│ │ └── contract-space/ +│ │ ├── contract.json # authored, source of truth +│ │ ├── contract.d.ts # authored / co-emitted from PSL or TS schema +│ │ └── migrations/ +│ │ ├── 20260601T0000_install_eql_bundle/ +│ │ │ ├── manifest.json +│ │ │ ├── ops.json # references eql-bundle.sql via build step +│ │ │ └── contract.json # snapshot per ADR 197 +│ │ └── (additional migrations as cipherstash bumps) +│ └── exports/ +│ ├── control.ts # extension descriptor with contractSpace wired +│ └── runtime.ts # codec runtime (encoding/decoding) +└── test/ + └── … +``` + +`contract.json` and `migrations/*/ops.json` are committed source-of-truth. The build step (`tsdown`) inlines `eql-bundle.sql` into the `installEqlBundle` op's body so the published package's descriptor exposes self-contained migration JSON values to the framework's emit pipeline (per FR1). + +## 2. Contract IR contents + +`contract.json` declares the typed objects EQL exposes that user columns can name as `nativeType`. Per the project spec § "IR vocabulary boundary": + +| Object | Kind | Notes | +|---|---|---| +| `eql_v2_configuration` | table | columns: `id` (text PK), `state` (eql_v2_configuration_state enum), `data` (jsonb) | +| `eql_v2_configuration_state` | enum | values: `'pending' \| 'active'` | +| `eql_v2_encrypted` | composite type | the `nativeType` user `Encrypted` columns reference | +| `eql_v2.bloom_filter` | domain | (under `eql_v2` schema) | +| `eql_v2.hmac_256` | domain | | +| `eql_v2.blake3` | domain | | +| `eql_v2.ore_block_u64_8_256` | composite type | | +| `eql_v2.ore_cclw_u64_8` | composite type | | +| (further `ore_*` composites used by encrypted-column nativeTypes) | composite type | enumerated by the EQL bundle | + +**Not in IR** (carried inside the `installEqlBundle` op as opaque DDL): the `eql_v2` schema, all 169 functions, 46 operators, 4 casts, 9 operator classes / families. These are not expressible as column-level `nativeType`s, so they live below the IR vocabulary boundary (project spec FR9, AC8). + +The exact list of `ore_*` composites should be derived by reading the EQL bundle SQL. T3.1 in the plan covers authoring this contract; the implementer should enumerate the composites mechanically from the bundle's `CREATE TYPE` statements. + +## 3. Baseline migration + +`20260601T0000_install_eql_bundle/`: + +- `manifest.json`: standard ADR 197 shape. +- `contract.json`: snapshot of the contract above. +- `ops.json`: an ordered list of operations: + - `cipherstash:install-eql-bundle-v1` — body = full EQL bundle SQL byte-for-byte. + - `cipherstash:create-eql_v2_configuration-v1` — `CREATE TABLE eql_v2_configuration (...)`. + - One op per typed object listed in § 2 not covered by the bundle (`cipherstash:create-eql_v2_configuration_state-v1`, etc.). + +**Bundle byte-equivalence (NFR4 / AC7).** The EQL bundle SQL must be inlined into `ops.json` byte-for-byte from the cipherstash team's vendored file. Implementation choices: + +- **(a) Build-time inline.** `tsdown` (or a small build script) reads `src/core/eql-bundle.sql` and substitutes its content into `ops.json` as a string literal. The on-disk `ops.json` source has a placeholder; the published `dist/` carries the inlined version. *Pro:* source files stay diffable. *Con:* the on-disk source `ops.json` doesn't equal what the framework reads. +- **(b) Authored-once inline.** `ops.json` carries the bundle inline from the start. Updating cipherstash's bundle = updating `ops.json` directly + creating a new migration. *Pro:* WYSIWYG even at the cipherstash source level. *Con:* `ops.json` is large, less reviewable. + +Recommendation: **(a)** — keeps `ops.json` reviewable and the bundle reviewable separately. Build-step contract: emit `ops.json` with the bundle inlined; the descriptor's `contractSpace.migrations` exposes the post-build (inlined) JSON. + +`headRef`: + +```json +{ + "hash": "", + "invariants": [ + "cipherstash:install-eql-bundle-v1", + "cipherstash:create-eql_v2_configuration-v1", + "cipherstash:create-eql_v2_configuration_state-v1", + "cipherstash:create-eql_v2_encrypted-v1" + // … plus ore_* type invariants … + ] +} +``` + +Invariants array is sorted alphabetically per the framework-mechanism sub-spec § 3 canonicalization rule. + +## 4. Codec lifecycle hook (`cipherstash:string@1`) + +Implements `CodecControlHooks.onFieldEvent` per [framework-mechanism sub-spec § 5](./framework-mechanism.spec.md#5-codec-lifecycle-hook-t21-t22). + +Behaviour: + +| Event | Trigger condition | Emitted ops | +|---|---|---| +| `'added'` | new field uses `cipherstash:string@1` codec AND `typeParams.searchable === true` | `cipherstash-codec:.:add-search-config@v1` — `SELECT eql_v2.add_search_config('
', '', …)` | +| `'dropped'` | prior field used `cipherstash:string@1` codec AND `typeParams.searchable === true` | `cipherstash-codec:
.:remove-search-config@v1` — `SELECT eql_v2.remove_search_config('
', '')` | +| `'altered'` | both fields use `cipherstash:string@1`, `typeParams.searchable` differs OR other typeParams change | rotate sequence: drop-then-add, with invariantId `cipherstash-codec:
.:rotate-search-config@v1` (or pair of drop + add) | + +For `'added'` / `'dropped'` where `searchable !== true`, the hook returns `[]` (no DDL needed — the column type is the only concern, handled by the structural ops the user emits). + +`invariantId` template: `cipherstash-codec:
.:@v1`. Stable across regenerations (deterministic from `(table, field, action)`). + +The hook is synchronous and operates only on the table IR passed in. It does *not* read cipherstash's contract-space contract or marker — those advance independently via M1's per-space mechanism. + +## 5. Descriptor wiring + +`src/exports/control.ts`: + +```ts +import { cipherstashContractJson } from '../core/contract-space/contract'; +import { cipherstashMigrations } from '../core/contract-space/migrations'; +import { cipherstashHeadRef } from '../core/contract-space/head-ref'; +import { cipherstashCodecHooks } from '../core/cipherstash-codec'; + +export const cipherstashExtensionDescriptor: SqlControlExtensionDescriptor<'postgres'> = { + ...cipherstashPackMeta, + types: { + ...cipherstashPackMeta.types, + codecTypes: { + ...cipherstashPackMeta.types.codecTypes, + controlPlaneHooks: { + [CIPHERSTASH_STRING_CODEC_ID]: cipherstashCodecHooks, + }, + }, + }, + contractSpace: { + contractJson: cipherstashContractJson, + migrations: cipherstashMigrations, + headRef: cipherstashHeadRef, + }, + // intentionally no `databaseDependencies` — superseded by `contractSpace` + create: () => ({ + familyId: 'sql' as const, + targetId: 'postgres' as const, + }), +}; +``` + +The descriptor never imports build-time-only material; everything it exposes is in-memory JSON values plus the codec runtime functions. + +## 6. End-to-end integration test (T3.6, T3.7) + +Test file: `packages/3-extensions/cipherstash/test/cipherstash.e2e.test.ts` (or under the integration test harness — `packages/.../tests/integration/`). + +**Scenario A (initial setup):** + +1. Fresh Postgres database (PGlite via `@prisma/dev` per AGENTS.md). +2. Application config `extensionPacks: [cipherstashExtensionDescriptor]`. +3. PSL: `model User { id String @id; email Encrypted }`. +4. Run `prisma-next migrate`. +5. Assert directory layout: + - `migrations/_initial/{manifest, ops, contract}.json` exists; `ops.json` contains the user's `CREATE TABLE` op + the codec-emitted `cipherstash-codec:User.email:add-search-config@v1` op. + - `migrations/cipherstash/{contract.json, contract.d.ts, refs/head.json}` written; bytes match descriptor's `contractSpace`. + - `migrations/cipherstash/20260601T0000_install_eql_bundle/{manifest, ops, contract}.json` exists; `ops.json` body equals the vendored bundle byte-for-byte. +6. Run `prisma-next db apply`. +7. Assert single transaction (use Postgres logging or the runner's transaction wrapper). +8. Assert marker rows: `(space='app', hash=, invariants=['app:create-table-User-v1', 'cipherstash-codec:User.email:add-search-config@v1'])`, `(space='cipherstash', hash=, invariants=[])`. +9. Assert `dbInit` runs in strict mode without `strictVerification: false` and succeeds. +10. Insert + select an `Encrypted` value to confirm the codec's runtime path still works. + +**Scenario B (drop column):** + +1. Continue from Scenario A. +2. Remove the `email` field from PSL. Re-run `migrate` + `apply`. +3. Assert the new app-space migration carries `cipherstash-codec:User.email:remove-search-config@v1`. +4. Assert `cipherstash` marker row unchanged (no extension-space migration). + +**Scenario C (extension bump):** + +1. Continue from Scenario A. +2. Author a second cipherstash migration in `src/core/contract-space/migrations/20260615T0000_add_audit_column/` (e.g. adds a column to `eql_v2_configuration`). +3. Bump cipherstash's `headRef` to point at the new contract. +4. Re-run `migrate`. +5. Assert pinned `migrations/cipherstash/{contract.json, contract.d.ts, refs/head.json}` updated in place. +6. Assert new `migrations/cipherstash/20260615T0000_add_audit_column/` directory created. +7. Run `db apply`. Assert cipherstash marker row advances; app-space marker unchanged. + +**Scenario D (revert workaround — T3.7, T3.8):** + +1. Audit `packages/` and `examples/` for `strictVerification: false` flags introduced under the cipherstash project's first attempt. +2. Remove all such flags. +3. Confirm `pnpm test:e2e` (or the cipherstash test) still passes — strict mode now succeeds because cipherstash's typed objects are recognized as expected via its contract space. + +## 7. Bump-cipherstash diff test (T3.7 in plan / Scenario C above) + +Repeat Scenario C as a pure-fixture test (without live Postgres), asserting the file-system diff produced by `migrate`: + +- `migrations/cipherstash/contract.json`: changed. +- `migrations/cipherstash/contract.d.ts`: changed. +- `migrations/cipherstash/refs/head.json`: changed (`hash` and `invariants` updated). +- `migrations/cipherstash/20260615T0000_add_audit_column/`: created. +- No file outside `migrations/cipherstash/` (and any incidental app-space changes) is touched. + +This test is fast and runs in CI without database dependencies. + +# Acceptance Criteria + +Implementation-level acceptance criteria for cipherstash on contract spaces: + +- [ ] **AC3.1.** `packages/3-extensions/cipherstash/` exists with the layout in § 1; `package.json` published as `@prisma-next/extension-cipherstash`. +- [ ] **AC3.2.** Contract IR enumerates the typed objects in § 2; does not include functions / operators / casts / op classes (project AC8). +- [ ] **AC3.3.** Baseline migration's `installEqlBundle` op carries the vendored bundle byte-for-byte (project AC7). +- [ ] **AC3.4.** Codec hook implements all four behaviours in § 4; invariantId templates match. +- [ ] **AC3.5.** Descriptor exposes `contractSpace` per § 5; carries no `databaseDependencies`. +- [ ] **AC3.6.** Scenario A passes (initial setup, strict-mode dbInit succeeds, marker rows correct, codec ops landed). +- [ ] **AC3.7.** Scenario B passes (drop searchable column emits `remove_search_config` in app-space; cipherstash marker unchanged). +- [ ] **AC3.8.** Scenario C passes (cipherstash bump advances pinned files + cipherstash marker; app-space untouched). +- [ ] **AC3.9.** Scenario D passes (`strictVerification: false` workaround removed; tests still green). +- [ ] **AC3.10.** Bump-diff test passes (file-system diff matches § 7 exactly). + +These map onto the project spec's AC1, AC2, AC3, AC7, AC8, AC9, AC12, AC14 and the plan's TC-1 through TC-7, TC-12, TC-14, TC-21, TC-23, TC-25, TC-29. + +# Other Considerations + +## Cipherstash team coordination + +The cipherstash team owns the EQL bundle SQL (vendored as `eql-bundle.sql`). Bumping the bundle = creating a new cipherstash migration. The team should be looped in on: + +- The `cipherstash:*` invariantId namespace and immutability rules (FR11) — once published, an invariantId cannot be renamed. +- The codec hook semantics (§ 4) — they may want input on the rotate-search-config behaviour. +- The migration directory naming convention (`_/`). + +This coordination is captured separately as TML-2373's deliverable — the cipherstash umbrella project. M3 does *not* require cipherstash team approval on every artefact; it requires the artefacts to be correct against EQL's semantics. + +## Test data + +Scenario A's `User.email` value should be a real `Encrypted` payload (not a stub), so the round-trip exercises both the contract-space schema and the codec runtime path. Use the existing `Encrypted` test helpers if present, or construct a minimal encrypted payload with the EQL bundle's helper SQL functions. + +## Build-time bundle inlining (§ 3 (a)) + +The build step (`tsdown` plugin or pre-build script) needs to: + +1. Read `src/core/eql-bundle.sql`. +2. Find the `installEqlBundle` op's `execute[0].sql` placeholder in `src/core/contract-space/migrations/20260601T0000_install_eql_bundle/ops.json`. +3. Substitute the bundle content (escaping appropriately for JSON string-literal embedding). +4. Write the substituted JSON to `dist/` (and to wherever the descriptor imports `migrations` from). + +If `tsdown`'s plugin model doesn't support this cleanly, a small standalone Node script run before `tsdown` is acceptable. + +# References + +- [Project spec](../spec.md) — design rationale, ACs. +- [Project plan](../plan.md) — task breakdown, validation gates. +- [Framework-mechanism sub-spec](./framework-mechanism.spec.md) — the mechanism this milestone consumes. +- `projects/cipherstash-integration/project-1/` — reference material from the in-flight cipherstash design (specs, handover, team-facing design doc). Not source-of-truth code. +- `packages/3-extensions/pgvector/` — reference shape for the new cipherstash package. +- ADR 197 — Migration packages snapshot their own contract. +- ADR 208 — Invariant-aware migration routing. + +# Open Questions + +1. **EQL bundle inlining mechanism.** § 3 recommends (a) build-time inline; confirm during T3.2 implementation. If (b) authored-once inline is chosen, document why and update CI to validate `ops.json`'s bundle bytes match the vendored source. +2. **`ore_*` composite enumeration.** The exact set of `ore_*` composites in the IR (§ 2) needs to be derived from the EQL bundle. Implementer reads the bundle's `CREATE TYPE` statements during T3.1. +3. **Rotate-search-config semantics.** § 4 specifies a drop-then-add sequence for `'altered'` events that change `searchable` or other typeParams. Confirm with the cipherstash team that this matches EQL's expectations (vs an in-place update primitive, if one exists). Defer to TML-2373 review. +4. **Test extension reuse.** Scenarios A-D could potentially reuse the synthetic test extension from T1.10 alongside cipherstash to cover multi-extension interactions. Out of scope for M3 (multi-extension is a project non-goal); flag as a future enhancement. diff --git a/projects/extension-contract-spaces/specs/framework-mechanism.spec.md b/projects/extension-contract-spaces/specs/framework-mechanism.spec.md new file mode 100644 index 0000000000..e179ce0597 --- /dev/null +++ b/projects/extension-contract-spaces/specs/framework-mechanism.spec.md @@ -0,0 +1,397 @@ +# Summary + +Implementation contract for the **contract-space mechanism** in the framework: per-space planner / runner / verifier, the `contractSpace` extension-descriptor field, the marker schema migration, the pinned per-space artefact layout, codec lifecycle hooks, and the per-space `db init` / `db update` flows. Drives [Milestones M1 + M2](../plan.md#milestones) of the project plan. Reads on top of [the project spec](../spec.md) — this document captures API shapes, file-system contracts, and edge-case handling that don't belong in the spec. + +**Parent project spec:** [`projects/extension-contract-spaces/spec.md`](../spec.md) — design rationale, FRs, NFRs, ACs. + +# Description + +The project spec settles the design. This sub-spec locks down the implementation contracts a maker needs before touching the codebase: + +- The exact TS types added to `SqlControlExtensionDescriptor` and `CodecControlHooks`. +- The marker-schema migration SQL. +- The on-disk file paths the framework reads / writes per space. +- The canonicalization rules pinned files use to be byte-equivalent across machines. +- The integration points for codec hooks in the emitter and for per-space pathfinding in `db init` / `db update`. + +Implementation order follows the plan's task ordering ([T1.1…T1.10, T2.1…T2.5](../plan.md#milestones)). Each task in the plan references one or more sections here. + +# Requirements + +## 1. Extension descriptor: `contractSpace` field + +Add an optional `contractSpace` field to `SqlControlExtensionDescriptor`. The contract-space identity types live in `@prisma-next/framework-components/control`: the concept is family-agnostic — a Mongo descriptor would consume the same types specialized to a Mongo contract — so the framework owns them, and the SQL family's descriptor field merely specialises the generic. + +```ts +// packages/1-framework/1-core/framework-components/src/control/control-spaces.ts +import type { Contract } from '@prisma-next/contract/types'; +import type { MigrationMetadata, MigrationPlanOperation } from './control-migration-types'; + +export interface ContractSpaceHeadRef { + readonly hash: string; + readonly invariants: readonly string[]; +} + +export interface MigrationPackage { + readonly dirName: string; // emit-time directory name; preserved from the author + readonly metadata: MigrationMetadata; // ADR 197 metadata; carries `toContract` snapshot + readonly ops: readonly MigrationPlanOperation[]; +} + +export interface ContractSpace { + readonly contractJson: TContract; // typed in-memory contract + readonly migrations: readonly MigrationPackage[]; + readonly headRef: ContractSpaceHeadRef; +} +``` + +```ts +// packages/2-sql/9-family/src/core/migrations/types.ts +import type { ContractSpace } from '@prisma-next/framework-components/control'; + +export interface SqlControlExtensionDescriptor + extends ControlExtensionDescriptor<'sql', TTargetId> { + // existing fields … + readonly contractSpace?: ContractSpace>; +} +``` + +Behaviour: + +- An extension descriptor without `contractSpace` is treated as a non-schema extension (codec-only, query-ops-only). Today's behaviour preserved. +- A descriptor with `contractSpace` is loaded into the per-space pipeline at authoring time only (see § 3). + +Notes on the resolved shape: + +- **`MigrationPackage` is the canonical structural shape** — `{ dirName, metadata, ops }`, in-memory by default. `@prisma-next/migration-tools/package` exposes the augmented `OnDiskMigrationPackage extends MigrationPackage` (adds `dirPath: string`) which the on-disk readers (`readMigrationPackage`, `readMigrationsDir`) return. There is no structural distinction between an "authored" package and any other; the in-memory form is the canonical form, and the on-disk readers add a `dirPath` for diagnostics. The framework's emitter (T1.7) materialises a `MigrationPackage` to disk; downstream loaders observe the same value as an `OnDiskMigrationPackage`. +- **`ContractSpace` is generic over the contract** so each family pins a typed contract value at consumption time. The SQL family specialises to `ContractSpace>` so descriptor authors continue to see a typed contract; serialisation to JSON for hashing / on-disk emission is the framework's job (already implemented for app-space contracts), not the descriptor author's. App-space and extension-space values share this exact type — whether a value is the app's space or an extension's space is a control-plane concern, not a structural one. +- **No `contractSnapshot` field on `ContractSpace`.** Per ADR 197, each migration package's `metadata.toContract` *is* the snapshot; there's no separate snapshot field. + +## 2. Marker schema migration (T1.1) + +Promote `prisma_contract.marker` from a single-row table to N-row, keyed by `space`: + +```sql +-- Idempotent (uses IF [NOT] EXISTS guards). Safe to re-run. +ALTER TABLE prisma_contract.marker + ADD COLUMN IF NOT EXISTS space text NOT NULL DEFAULT 'app'; + +UPDATE prisma_contract.marker + SET space = 'app' + WHERE space IS NULL OR space = ''; + +-- Drop the old single-row PK (today's `id` constant) and re-key by space. +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 FROM pg_constraint WHERE conname = 'marker_pkey' + ) THEN + ALTER TABLE prisma_contract.marker DROP CONSTRAINT marker_pkey; + END IF; +END$$; + +ALTER TABLE prisma_contract.marker + ADD CONSTRAINT marker_pkey PRIMARY KEY (space); + +-- The old `id` column is no longer load-bearing. Drop it for cleanliness. +ALTER TABLE prisma_contract.marker + DROP COLUMN IF EXISTS id; +``` + +Open implementation question: where does this run? Two options: + +- **(A) Framework-internal migration** — a hard-coded migration applied before any user / extension migrations on every framework boot, idempotent on already-migrated databases. +- **(B) Inline in `db init` / `db update`** — the verifier detects pre-migration shape and applies the change as part of the same transaction as the first user migration. + +Recommendation: **(A)** — keeps the marker promotion outside of any user-initiated transaction; runs deterministically on every framework start. The per-space verifier (§ 4) can then assume the new shape unconditionally. + +Validation: dedicated three-state idempotency tests (fresh / legacy single-row / already-migrated) for both Postgres and SQLite drivers. ADR 029's shadow-DB preflight covers user-DDL paths via the migration runner; the marker promotion runs in `ensureControlTables`, which has always been outside that scope (the original `ensureMarkerTableStatement` and `ensureLedgerTableStatement` were applied directly pre-T1.1). Idempotency tests are stronger evidence than shadow-on-empty would be — they exercise the actual transition states. Multi-process concurrency: rely on Postgres's transactional DDL — concurrent runs serialize on the table lock; SQLite uses `BEGIN EXCLUSIVE`. + +## 3. Per-space planner (T1.3) and emitter wiring (T1.6, T1.7, T1.8) + +**Helper location.** The producer-side helpers — `planAllSpaces`, the layout convention, and `materialiseMigrationPackage` (renamed from `writeExtensionMigrationPackage` under M1-cleanup F4 and again from `writeAuthoredMigrationPackage` under M1-cleanup F6) — live in `@prisma-next/migration-tools` (`1-framework`), not in the SQL family. The contract-space concept is target-agnostic per project spec FRs 3-6; placing the helpers in the framework layer lets Mongo (and any future target) reuse them. `pnpm lint:deps` validates that the framework layer carries no target-* references. The SQL family wires them into its CLI / emitter at the consumption site. + +The planner gains a per-space loop. The shipped `planAllSpaces` shape is **generic over contract and package types**: + +```ts +// @prisma-next/migration-tools/exports/spaces +interface SpacePlanInput { + readonly spaceId: string; // 'app' | extension space id + readonly priorContract: TContract | null; // null = first emit for this space + readonly newContract: TContract; + // … plus whatever per-space context the family planner needs +} + +interface SpacePlanOutput { + readonly spaceId: string; + readonly migrationPackages: readonly TPackage[]; // 0 or more +} + +function planAllSpaces( + inputs: readonly SpacePlanInput[], + planSpace: (input: SpacePlanInput) => SpacePlanOutput, +): readonly SpacePlanOutput[]; +``` + +`planAllSpaces` itself never inspects either type — it sorts inputs alphabetically by `spaceId` (deterministic ordering, AM3), rejects duplicate ids with `MIGRATION.DUPLICATE_SPACE_ID` *before* any callback runs (atomicity), and delegates the per-space planning decision to the family. + +The SQL family's call site is the canonical instantiation: + +```ts +// somewhere in @prisma-next/family-sql (consumption site, lands in a later round) +planAllSpaces>( + inputs, + (input) => sqlPlanSpace(input), +); +``` + +A Mongo-family call site would instantiate with `>` against `mongoPlanSpace`; the helper does not need to change. + +For app-space: `priorContract` comes from `/migrations//contract.json` (today's behaviour); `newContract` comes from the just-emitted root `/contract.json`. + +For each loaded extension space: `priorContract` comes from `/migrations//contract.json` (pinned mirror) if it exists, else `null`; `newContract` comes from the descriptor's `contractSpace.contractJson`. When prior == new (byte-equal after canonicalization), emit zero migration packages and skip pinned-file write (no-op). + +**Layout convention (γ).** The emitter writes: + +| Artefact | Path | +|---|---| +| App-space migration directory | `/migrations//` | +| App-space current contract | `/contract.json` (today; unchanged) | +| Per-extension migration directory | `/migrations///` | +| Per-extension current contract (pinned) | `/migrations//contract.json` | +| Per-extension current typings (pinned) | `/migrations//contract.d.ts` | +| Per-extension head ref (pinned) | `/migrations//refs/head.json` | + +Migration names inside a per-extension subdirectory **preserve the names the extension author chose** — no renaming. The per-extension subdirectory must be a valid filesystem name; space identifiers are constrained to `[a-z][a-z0-9_-]{0,63}`. + +**Emission helper (T1.7).** Shipped as `materialiseMigrationPackage(targetDir, pkg)` in `@prisma-next/migration-tools/exports/io` (renamed from `writeExtensionMigrationPackage` under M1-cleanup F4 and again from `writeAuthoredMigrationPackage` under M1-cleanup F6). Takes an in-memory canonical `MigrationPackage` (per § 1's resolved shape: `{ dirName, metadata, ops }`) and writes `migration.json`, `ops.json`, and a canonical-JSON `contract.json` snapshot under `//`. The `migration.json` + `ops.json` writes delegate to the existing app-space `writeMigrationPackage` for byte-parity; the `contract.json` snapshot reuses the existing `canonicalizeJson` helper. Re-emitting the same package across runs / machines produces byte-identical files. The verb-distinction from `writeMigrationPackage(dir, metadata, ops)` is intentional — `materialise*` keeps the snapshot-emission semantic visible at the call site, where the lower-level helper is constituent-taking. + +**Layout helper (T1.6).** Shipped as `spaceMigrationDirectory(projectMigrationsDir, spaceId)` in `@prisma-next/migration-tools/exports/spaces`. App-space passes through unchanged (no subdirectory); extension spaces resolve to `/`. Validates `spaceId` against `[a-z][a-z0-9_-]{0,63}` and throws `MIGRATION.INVALID_SPACE_ID` for filesystem-unsafe names. + +**Pinned artefact emission (T1.8).** Shipped as `emitPinnedSpaceArtefacts(projectMigrationsDir, spaceId, inputs)` in `@prisma-next/migration-tools/exports/spaces`. Framework-neutral primitives signature (same target-agnosticism rationale as R3's generic `planAllSpaces`): + +```ts +// @prisma-next/migration-tools/exports/spaces +function emitPinnedSpaceArtefacts( + projectMigrationsDir: string, + spaceId: string, + inputs: { + readonly contract: unknown; // any JSON-serialisable value + readonly contractDts: string; // pre-rendered; caller's responsibility + readonly headRef: { readonly hash: string; readonly invariants: readonly string[] }; + }, +): Promise; +``` + +Writes `contract.json`, `contract.d.ts`, `refs/head.json` under `//`. Always-overwrite (the framework owns these files). Rejects app-space and invalid space ids. Canonicalisation rules: + +- `contract.json`: passes `inputs.contract` through `canonicalizeJson` so byte-equivalence holds across runs / machines. +- `contract.d.ts`: writes `inputs.contractDts` verbatim. The framework helper does **not** render `.d.ts`; rendering is target / typemap-aware and lives at the consumption site. +- `refs/head.json`: canonical JSON of `{ "hash": headRef.hash, "invariants": [...sorted] }` — invariants sorted alphabetically for determinism. + +The SQL family's call site renders `contractDts` via its existing `generateContractDts` helper (which knows the target's typemaps) before invoking the framework helper: + +```ts +// somewhere in @prisma-next/family-sql (consumption site, lands in M2) +emitPinnedSpaceArtefacts(projectMigrationsDir, spaceId, { + contract: contractSpace.contractJson, + contractDts: generateContractDts(contractSpace.contractJson, /* target typemaps */), + headRef: contractSpace.headRef, +}); +``` + +A Mongo-family call site would compose its own typemap-aware `.d.ts` renderer the same way. + +**Drift detection (T1.9).** Shipped as `detectSpaceContractDrift(spaceId, { descriptorHash, pinnedHash })` (pure 3-discriminant primitive) plus `readPinnedContractHash(projectMigrationsDir, spaceId)` (I/O wrapper) in `@prisma-next/migration-tools/exports/spaces`. + +Before computing `priorContract` for a space: + +- Read pinned hash via `readPinnedContractHash(...)`. The wrapper reads `//refs/head.json.hash` rather than re-hashing the pinned `contract.json` content. This is operationally equivalent under descriptor self-consistency (T1.8 writes `inputs.headRef.hash` verbatim into `refs/head.json`, and `headRef.hash` is the same hash the descriptor's pipeline produces) and slightly more robust — immune to canonical-JSON pipeline evolution between framework versions. Returns `null` on ENOENT (no pinned file yet — first emit case). +- Compute `descriptorHash = hash(descriptor.contractSpace.contractJson)` from the in-memory descriptor side using the same canonical-JSON pipeline. +- Pass both to `detectSpaceContractDrift(spaceId, { descriptorHash, pinnedHash })`. The helper returns `{ kind: 'noDrift' | 'firstEmit' | 'drift'; spaceId; descriptorHash; pinnedHash }` — pure, no I/O, no warning surface (the SQL-family consumption site formats the warning). +- On `kind === 'drift'`: SQL-family consumption site surfaces a non-fatal warning naming the extension and the diff direction. The migration emit proceeds normally — the warning is informational. (`migrate` is the canonical way to materialise extension bumps; the warning just confirms the bump is being captured this run.) +- On `kind === 'firstEmit'` or `kind === 'noDrift'`: no warning. + +**Note for M2 R1 wiring:** at descriptor load time, the SQL family should also verify descriptor self-consistency by recomputing `hash(canonicalize(descriptor.contractSpace.contractJson))` and asserting equality with `descriptor.contractSpace.headRef.hash`. Mismatch indicates the extension author published an inconsistent descriptor (e.g. `headRef.hash` not regenerated after `contractJson` changed); fail fast with a clear error (suggested code: `MIGRATION.DESCRIPTOR_HEAD_HASH_MISMATCH`). + +## 4. Per-space runner (T1.4) and verifier (T1.5) + +**Helper location.** Both runner-ordering and verifier helpers ship in `@prisma-next/migration-tools/exports/spaces` as **pure target-agnostic primitives** — same convention as R3's producer-side helpers. The transaction wrapping, marker row writes, and live-DB schema compare belong at the SQL-family consumption site (lands in M2 R1). `pnpm lint:deps` validates that `packages/1-framework` carries no target-* references. + +**Runner ordering helper (T1.4).** Shipped as `concatenateSpaceApplyInputs(inputs)` in `@prisma-next/migration-tools/exports/spaces`. Pure, generic over per-target op type: + +```ts +// @prisma-next/migration-tools/exports/spaces +interface SpaceApplyInput { + readonly spaceId: string; + readonly migrationDirectory: string; // either projectRoot/migrations or .../ + readonly currentMarkerHash: string | null; + readonly currentMarkerInvariants: readonly string[]; + readonly path: readonly TOp[]; // from per-space planner / findPathWithDecision +} + +function concatenateSpaceApplyInputs( + inputs: readonly SpaceApplyInput[], +): readonly SpaceApplyInput[]; +``` + +Cross-space ordering: extensions alphabetical-by-spaceId first, app-space last. Rejects duplicate `spaceId` with `MIGRATION.DUPLICATE_SPACE_ID`. Returns inputs unchanged in identity (referential pass-through) where ordering already matches. The SQL-family consumption site wraps the resulting concatenation in a single transaction and writes per-space marker rows after apply (using the optional `space` parameter on `WriteMarkerInput` landed in T1.1). + +**Verifier (T1.5).** Shipped as `verifyContractSpaces(inputs)` + `listPinnedSpaceDirectories(projectMigrationsDir)` in `@prisma-next/migration-tools/exports/spaces`. Pure structural verifier — caller supplies the loaded spaces, the pinned per-space contracts, and the marker rows; the helper returns a deterministic list of violations with actionable remediation strings. Five violation kinds: + +- `declaredButUnmigrated`: extension declared in `extensionPacks` but no pinned `contract.json` on disk. +- `orphanMarker`: marker row for a space not in `extensionPacks`. +- `orphanPinnedDir`: pinned directory on disk for a space not in `extensionPacks`. +- `hashMismatch`: marker row's hash differs from pinned contract's hash. +- `invariantsMismatch`: pinned contract's required invariants are not all in the marker row's applied invariants set. + +`listPinnedSpaceDirectories` filters dot-prefixed and timestamp-prefixed (`/^\d{8}T\d{4}_/`) directories so it correctly distinguishes pinned space directories from app-space migration directories. The SQL-family verifier wires `verifyContractSpaces` into its existing `verify` / `dbInit` paths and decides fail-vs-warn semantics per violation kind (M2). + +**Verifier algorithm** (the conceptual flow the consumption site wires together): + +1. Discover loaded spaces from `extensionPacks` + `'app'`. Result: `loadedSpaces: ReadonlySet`. +2. Read app-space `contract.json` from `/contract.json`. +3. For each `extension space` in `loadedSpaces`: + - Read pinned `/migrations//contract.json`. Reject (FR6 case b) if missing — error message: `"Extension '' is declared in extensionPacks but has not been emitted; run 'prisma-next migrate'"`. +4. Sort the `(spaceId, contract)` pairs alphabetically by `spaceId`. Aggregate to a single in-memory `expected schema`. +5. List `/migrations/` subdirectories. For each directory `D`: + - If `D` matches a migration-name pattern (timestamp-prefixed) — it's an app-space migration; skip. + - Else, treat `D` as a space identifier. If `D` is not in `loadedSpaces`, reject (FR6 case c) — error: `"Orphan pinned directory 'migrations//' for an extension not in extensionPacks; remove the directory or re-add the extension"`. +6. Read all marker rows. For each row's `space`: + - If `space` is not in `loadedSpaces`, reject (FR6 case a) — error: `"Orphan marker row for space '' (no longer in extensionPacks); remediation: manually delete the row from prisma_contract.marker"`. +7. Compare each marker row's `(hash, invariants)` to the corresponding space's pinned contract hash + applied invariants. Reject mismatches per space with a strict-mode error. +8. Compare the aggregate expected schema against the live database (today's strict-mode logic, applied to the union). + +Determinism: the alphabetical sort in (4) and the deterministic listing in (5) and (6) make the verifier's behaviour identical regardless of `extensionPacks` declaration order (NFR6). + +## 5. Codec lifecycle hook (T2.1, T2.2) + +Extend `CodecControlHooks`: + +```ts +type FieldEvent = 'added' | 'dropped' | 'altered'; + +interface FieldEventContext { + readonly priorTable?: TableIR; // present for 'dropped' and 'altered' + readonly newTable?: TableIR; // present for 'added' and 'altered' + readonly priorField?: FieldIR; // present for 'dropped' and 'altered' + readonly newField?: FieldIR; // present for 'added' and 'altered' +} + +interface CodecControlHooks { + // … existing hooks … + readonly onFieldEvent?: ( + event: FieldEvent, + ctx: FieldEventContext, + ) => readonly MigrationPlanOperation[]; +} +``` + +Hook contract: + +- **Synchronous.** The emitter must be able to assemble the migration JSON without awaiting hooks. +- **App-space scope only.** `priorTable` and `newTable` are scoped to the application's contract; the hook never sees extension-space IR. This is enforced by API shape — the hook signature has no parameter for cross-space context. +- **`'altered'` semantics.** Fires when a field exists in both `priorTable` and `newTable` and any field property has changed *except* `codecId`. Codec-id changes are a v1 non-goal (see project spec § Non-goals). +- **Return value.** `MigrationPlanOperation[]`. Each op must carry its own `invariantId`. Returned ops are inlined into the app-space migration's `ops.json`, alongside the user's own structural ops. + +**Wiring (T2.2).** In the app-space emitter's per-field diff loop: + +- For each field added → run `onFieldEvent('added', { newTable, newField })` on the *new* field's codec. If hook absent, skip. +- For each field dropped → run `onFieldEvent('dropped', { priorTable, priorField })` on the *prior* field's codec. +- For each field present in both → if any property other than `codecId` differs, run `onFieldEvent('altered', { priorTable, newTable, priorField, newField })` on the new field's codec. +- Concatenate hook-returned ops into the app-space migration's `operations` array. Order: structural ops first, then codec-emitted ops grouped by triggering event (added → dropped → altered). Within a group, deterministic by `(tableName, fieldName)` then by op index. + +## 6. `db init` / `db update` per-space (T2.3, T2.4) + +Both flows reduce to: for each loaded space, run `findPathWithDecision(currentMarker, ref.hash, effectiveRequired)` per ADR 208, then apply the union of paths in the cross-space ordering convention. + +```ts +interface SpacePathInput { + readonly spaceId: string; + readonly currentMarkerHash: string | null; + readonly currentMarkerInvariants: readonly string[]; + readonly targetRef: ContractSpaceHeadRef; // for app-space: read from projectRoot (renamed from ExtensionContractRef under M1-cleanup F4) + readonly migrationGraph: MigrationGraph; // for app-space: synthesized from contract IR if no migrations on disk +} + +function planAllSpacePaths(inputs: readonly SpacePathInput[]): readonly SpaceApplyInput[]; +``` + +For app-space: `targetRef` is read from `/refs/head.json` (or computed inline from the current `contract.json` for greenfield); `migrationGraph` is loaded from `/migrations/`. If no migrations on disk, the existing synthetic-edge model emits a `∅ → head` edge from the contract IR (today's `db init` behaviour). + +For each extension space: `targetRef` is read from the pinned `/migrations//refs/head.json`; `migrationGraph` is loaded from `/migrations//`. **No descriptor access.** Synthesis is not used — extension spaces always walk their explicit graph. + +`effectiveRequired = targetRef.invariants − currentMarkerInvariants` per ADR 208. + +Concatenate all `SpaceApplyInput`s in the cross-space ordering convention and pass to the runner (§ 4). + +## 7. Synthetic test extension (T1.10) + +Location: `test/integration/test/contract-space-fixture/`. Hosted inside the `@prisma-next/integration-tests` workspace as a non-package fixture (no `package.json` of its own; it leverages integration-tests' existing config). Relocated under M1-cleanup T-cleanup.1 (commit `db33795e3`); originally lived as a private workspace package at `packages/3-extensions/test-contract-space/` mirroring `packages/3-extensions/pgvector/`'s shape — that shape was dropped because the fixture had no external consumers and "package under `packages/3-extensions/`" implied "real extension," which it is not. + +The fixture exposes the same descriptor surface that a real extension would (`contractSpace.contractJson`, `migrations`, `headRef`) and is consumed by integration tests in `test/integration/test/contract-space-fixture/descriptor.test.ts`. + +`control.ts`: a descriptor exposing `contractSpace`: + +- One composite type (e.g. `test_box` with two int fields) declared in `contract.json`. +- One baseline migration (op: `CREATE TABLE test_box (x int, y int)`; invariantId `test-contract-space:create-test_box-v1`). M1 R1 substituted a table for a composite type because composite-type IR support is M3-or-later work; the per-space mechanism is type-agnostic so the substitution is design-equivalent. +- `headRef = { hash: , invariants: ['test-contract-space:create-test_box-v1'] }`. + +The test extension is consumed by integration tests in M1 to exercise: + +- Per-space planner / runner / verifier. +- Pinned-artefact emission (TC-25, TC-29). +- Drift detection (TC-30). +- Orphan-marker / orphan-pinned-dir rejection (TC-22, TC-27, TC-28). +- No-descriptor verify (TC-26) via a fixture that mock-deletes the test-extension import resolution before invoking `dbInit` / `db apply`. + +The extension is private (not published); existence in the workspace is solely to exercise the contract-space machinery against the same module-graph descriptor-import path a real extension would use. + +# Acceptance Criteria + +Implementation-level acceptance criteria for the framework mechanism: + +- [ ] **AM1.** `SqlControlExtensionDescriptor.contractSpace` field present and typed per § 1. `pgvector` and `arktype-json` continue to typecheck (their descriptors don't set the field). +- [ ] **AM2.** Marker schema migration SQL applies idempotently against (a) a fresh `prisma_contract.marker` table, (b) a pre-migration single-row marker, (c) an already-migrated marker. Verified by dedicated integration tests on both Postgres and SQLite drivers. +- [ ] **AM3.** Per-space planner (`planAllSpaces`) returns the same shape regardless of `extensionPacks` declaration order (deterministic alphabetical sort). +- [ ] **AM4.** Per-space runner concatenates inputs as extensions-first-then-app-space and applies all in a single transaction. Mid-apply failure rolls back all spaces. +- [ ] **AM5.** Per-space verifier rejects all three orphan / missing cases (orphan marker, declared-but-unmigrated, orphan pinned dir) with the error messages specified in § 4. +- [ ] **AM6.** Pinned per-space artefacts (`contract.json`, `contract.d.ts`, `refs/head.json`) are written under `migrations//` with byte-equivalent canonical content. Re-running `migrate` against an unchanged descriptor produces no file-system change (idempotent). +- [ ] **AM7.** Drift detection: bumping a descriptor's `contractJson` without running `migrate` produces a clear warning on the next `migrate` invocation. Warning is non-fatal; emit proceeds. +- [ ] **AM8.** Codec hook fires for `'added'`, `'dropped'`, `'altered'` events with the contract specified in § 5. `'altered'` does *not* fire when only `codecId` changes. +- [ ] **AM9.** `db init` per-space: on a fresh database with the synthetic test extension, both spaces are initialised in a single transaction; marker rows for `app` and `test-contract-space` exist with expected hashes. +- [ ] **AM10.** `db update` per-space: bumping the synthetic test extension's `headRef` advances only its space's marker, leaving app-space untouched. +- [ ] **AM11.** With the synthetic test fixture's source removed from the test tree, `dbInit` and `db apply` succeed reading only the user's repo (the same property that `packages/1-framework/3-tooling/migration/test/deletable-node-modules.test.ts` locks in inline by inventing the space id rather than importing the fixture). `migrate` would fail because the descriptor is the source of pinned-artefact emission — that's expected and informative. + +# Other Considerations + +## Performance (NFR5) + +The per-space planner's outer loop is bounded by the number of loaded spaces (typically 1-5 for real apps, never more than ~20 even in monorepo composition). The verifier's aggregation pass is linear in total contract size. Both should be sub-millisecond overhead on top of today's single-space planner / verifier — well within the 5% budget. + +## Implementation order (intra-M1) + +The plan's task order (T1.1 → T1.10) is correct but not strictly enforced. A reasonable parallel decomposition: + +- **Track A** (types + descriptor): T1.2 → T2.1. +- **Track B** (DB + marker): T1.1 → T1.5 (verifier). +- **Track C** (emission): T1.6 → T1.7 → T1.8 → T1.9. +- **Track D** (test extension): T1.10 (depends on Track A's types being in place). + +Track B's marker migration (T1.1) blocks all integration tests; T1.1 first. + +# References + +- [Project spec](../spec.md) — design rationale. +- [Project plan](../plan.md) — tasks and validation gates. +- ADR 197 — Migration packages snapshot their own contract. +- ADR 208 — Invariant-aware migration routing. +- ADR 021 — Contract Marker Storage (modified by T1.1). +- ADR 029 — Shadow-DB preflight (covers user-DDL only; T1.1 runs inside `ensureControlTables` which is outside that scope, validated by idempotency tests instead). +- `test/integration/test/contract-space-fixture/` — synthetic test extension fixture, M1-cleanup-relocated home (commit `db33795e3`). + +# Open Questions + +1. **Marker migration mechanism.** § 2 recommends (A) framework-internal migration; confirm during T1.1 implementation. If (B) is chosen, document why and update the verifier to handle pre-migration marker shape. +2. **Drift detection severity.** § 3 specifies the drift warning as non-fatal informational. Should it be opt-in escalatable to error (e.g. `--strict-drift` flag)? Defer until users report a need. +3. **Pinned `contract.d.ts` regeneration.** The `.d.ts` for an extension is derived from its `contractJson` via the existing `.d.ts` emitter. Confirm the emitter is target-agnostic (or has a sensible default target) so the framework can run it for any extension space without per-extension target wiring. If not, fall back to `// @ts-nocheck` placeholder and surface as a follow-up. diff --git a/scripts/lint-app-space-id.mjs b/scripts/lint-app-space-id.mjs new file mode 100644 index 0000000000..d384555763 --- /dev/null +++ b/scripts/lint-app-space-id.mjs @@ -0,0 +1,173 @@ +#!/usr/bin/env node +/** + * Regression guardrail for the `APP_SPACE_ID` canonical-source rule + * (project: extension contract spaces, code-review F3). + * + * Two policed invariants: + * + * 1. **Single source of truth.** `export const APP_SPACE_ID = ...` + * may appear at exactly one path under `packages/` — the canonical + * home in `@prisma-next/framework-components/control`. Re-exports + * (`export { APP_SPACE_ID } from '...'`) are allowed and + * encouraged: they preserve the existing module surface for + * consumers of `migration-tools`, `sql-runtime`, the postgres / + * sqlite target packages, etc., without re-declaring the literal. + * + * 2. **No raw `'app'` / `"app"` string literals** as space identifiers + * inside `packages/2-sql/**` / `packages/3-targets/**` source + * files. Use `APP_SPACE_ID` (or template-string interpolation + * `${APP_SPACE_ID}` inside SQL templates) instead. Test files are + * out of scope — the literal `'app'` is often the test data. + * JSDoc comment lines are also out of scope: they're prose, not + * runtime values, and frequently document the literal. + * + * Exits with code 1 and prints offending locations if any violations + * are found; exits with code 0 otherwise. + */ + +import { readdirSync, readFileSync, statSync } from 'node:fs'; +import { extname, join, relative } from 'node:path'; +import { fileURLToPath } from 'node:url'; + +const repoRoot = join(fileURLToPath(new URL('.', import.meta.url)), '..'); + +const CANONICAL_FILE = join( + 'packages', + '1-framework', + '1-core', + 'framework-components', + 'src', + 'control', + 'control-spaces.ts', +); + +const SCAN_ROOTS_FOR_DECLARATIONS = [join(repoRoot, 'packages')]; +const SCAN_ROOTS_FOR_LITERALS = [ + join(repoRoot, 'packages', '2-sql'), + join(repoRoot, 'packages', '3-targets'), +]; + +const INCLUDED_EXTENSIONS = new Set(['.ts', '.tsx', '.mts', '.cts']); +const EXCLUDED_DIRECTORIES = new Set([ + 'node_modules', + 'dist', + 'dist-tsc', + 'dist-tsc-prod', + 'coverage', + '.tmp-output', + 'test', + 'tests', + '__tests__', + 'fixtures', + 'recordings', + 'templates', +]); + +const DECLARATION_RE = /export\s+const\s+APP_SPACE_ID\b/; +const LITERAL_RE = /(['"])app\1/g; + +function* walk(dir) { + let entries; + try { + entries = readdirSync(dir); + } catch { + return; + } + for (const entry of entries) { + if (EXCLUDED_DIRECTORIES.has(entry)) continue; + const full = join(dir, entry); + let stats; + try { + stats = statSync(full); + } catch { + continue; + } + if (stats.isDirectory()) { + yield* walk(full); + } else if (stats.isFile() && INCLUDED_EXTENSIONS.has(extname(full))) { + yield full; + } + } +} + +const declarationMatches = []; +for (const root of SCAN_ROOTS_FOR_DECLARATIONS) { + for (const file of walk(root)) { + const contents = readFileSync(file, 'utf8'); + if (!DECLARATION_RE.test(contents)) continue; + declarationMatches.push(relative(repoRoot, file)); + } +} + +const expectedDeclaration = relative('.', CANONICAL_FILE); +const unexpectedDeclarations = declarationMatches.filter((m) => m !== expectedDeclaration); +const missingCanonical = !declarationMatches.includes(expectedDeclaration); + +const literalViolations = []; +for (const root of SCAN_ROOTS_FOR_LITERALS) { + for (const file of walk(root)) { + const contents = readFileSync(file, 'utf8'); + if (!LITERAL_RE.test(contents)) continue; + const lines = contents.split('\n'); + LITERAL_RE.lastIndex = 0; + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + LITERAL_RE.lastIndex = 0; + if (!LITERAL_RE.test(line)) continue; + const trimmed = line.trimStart(); + if ( + trimmed.startsWith('//') || + trimmed.startsWith('/**') || + trimmed.startsWith('/*') || + trimmed.startsWith('*/') || + trimmed.startsWith('*') + ) { + continue; + } + literalViolations.push({ + file: relative(repoRoot, file), + line: i + 1, + text: line.trim(), + }); + } + } +} + +let failed = false; + +if (missingCanonical) { + console.error(`Canonical APP_SPACE_ID declaration missing — expected at ${expectedDeclaration}.`); + failed = true; +} + +if (unexpectedDeclarations.length > 0) { + console.error( + `Found ${unexpectedDeclarations.length} unexpected APP_SPACE_ID declaration(s); only ${expectedDeclaration} is allowed:`, + ); + for (const file of unexpectedDeclarations) { + console.error(` ${file}`); + } + failed = true; +} + +if (literalViolations.length > 0) { + console.error( + `Found ${literalViolations.length} raw 'app'/"app" literal(s) under SQL / target source — use APP_SPACE_ID instead:`, + ); + for (const v of literalViolations) { + console.error(` ${v.file}:${v.line}: ${v.text}`); + } + failed = true; +} + +if (failed) { + console.error( + '\nSee `packages/1-framework/1-core/framework-components/src/control/control-spaces.ts` ' + + 'and project review F3 for context.', + ); + process.exit(1); +} + +console.log( + `APP_SPACE_ID canonical-source check passed (declaration at ${expectedDeclaration}; no raw 'app' literals in scoped source trees).`, +); diff --git a/test/e2e/framework/test/sqlite/migrations/harness.ts b/test/e2e/framework/test/sqlite/migrations/harness.ts index 652897fcbc..cb70692975 100644 --- a/test/e2e/framework/test/sqlite/migrations/harness.ts +++ b/test/e2e/framework/test/sqlite/migrations/harness.ts @@ -12,8 +12,11 @@ import sqlFamilyDescriptor, { } from '@prisma-next/family-sql/control'; import sqlFamilyPack from '@prisma-next/family-sql/pack'; import { verifySqlSchema } from '@prisma-next/family-sql/schema-verify'; -import type { MigrationOperationPolicy } from '@prisma-next/framework-components/control'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { + APP_SPACE_ID, + createControlStack, + type MigrationOperationPolicy, +} from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { field } from '@prisma-next/sql-contract-ts/contract-builder'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; @@ -128,6 +131,7 @@ export async function applyMigration( policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents: fw, + spaceId: APP_SPACE_ID, }); if (r.kind !== 'success') throw new Error('Origin planner failed'); const run = await runner.execute({ @@ -149,6 +153,7 @@ export async function applyMigration( policy, fromContract: options.origin ?? null, frameworkComponents: fw, + spaceId: APP_SPACE_ID, }); if (planResult.kind !== 'success') { throw new Error( diff --git a/test/e2e/framework/test/sqlite/utils.ts b/test/e2e/framework/test/sqlite/utils.ts index 3c6f50f360..5fce93e809 100644 --- a/test/e2e/framework/test/sqlite/utils.ts +++ b/test/e2e/framework/test/sqlite/utils.ts @@ -81,7 +81,7 @@ function createSchema>( ): void { db.exec(` CREATE TABLE _prisma_marker ( - id INTEGER PRIMARY KEY CHECK (id = 1), + space TEXT NOT NULL PRIMARY KEY DEFAULT 'app', core_hash TEXT NOT NULL, profile_hash TEXT NOT NULL, contract_json TEXT, @@ -92,8 +92,8 @@ function createSchema>( invariants TEXT NOT NULL DEFAULT '[]' ) `); - db.prepare('INSERT INTO _prisma_marker (id, core_hash, profile_hash) VALUES (?, ?, ?)').run( - 1, + db.prepare('INSERT INTO _prisma_marker (space, core_hash, profile_hash) VALUES (?, ?, ?)').run( + 'app', contract.storage.storageHash, contract.profileHash ?? contract.storage.storageHash, ); diff --git a/test/integration/test/cli-journeys/drift-marker.e2e.test.ts b/test/integration/test/cli-journeys/drift-marker.e2e.test.ts index 81e972560c..d756897d0d 100644 --- a/test/integration/test/cli-journeys/drift-marker.e2e.test.ts +++ b/test/integration/test/cli-journeys/drift-marker.e2e.test.ts @@ -192,7 +192,7 @@ withTempDir(({ createTempDir }) => { // Corrupt the marker await withClient(db.connectionString, async (client) => { await client.query( - `UPDATE prisma_contract.marker SET core_hash = 'sha256:corrupted-garbage' WHERE id = 1`, + `UPDATE prisma_contract.marker SET core_hash = 'sha256:corrupted-garbage' WHERE space = 'app'`, ); }); diff --git a/test/integration/test/cli-journeys/greenfield-setup.e2e.test.ts b/test/integration/test/cli-journeys/greenfield-setup.e2e.test.ts index 13146deba4..ca863cb80a 100644 --- a/test/integration/test/cli-journeys/greenfield-setup.e2e.test.ts +++ b/test/integration/test/cli-journeys/greenfield-setup.e2e.test.ts @@ -65,7 +65,7 @@ withTempDir(({ createTempDir }) => { // Verify marker created const marker = await sql( db.connectionString, - 'SELECT core_hash, profile_hash FROM prisma_contract.marker WHERE id = 1', + "SELECT core_hash, profile_hash FROM prisma_contract.marker WHERE space = 'app'", ); expect(marker.rows.length, 'A.03: marker created').toBe(1); expect(marker.rows[0]?.['core_hash'], 'A.03: marker has core_hash').toBeDefined(); diff --git a/test/integration/test/cli-journeys/invariant-routing.e2e.test.ts b/test/integration/test/cli-journeys/invariant-routing.e2e.test.ts index c058fb825d..a7bb55f836 100644 --- a/test/integration/test/cli-journeys/invariant-routing.e2e.test.ts +++ b/test/integration/test/cli-journeys/invariant-routing.e2e.test.ts @@ -500,12 +500,12 @@ withTempDir(({ createTempDir }) => { // moves the storage hash backward. await sql( db.connectionString, - `UPDATE "prisma_contract"."marker" SET core_hash = $1 WHERE id = 1`, + `UPDATE "prisma_contract"."marker" SET core_hash = $1 WHERE space = 'app'`, [c1Hash], ); const markerAfterReset = await sql( db.connectionString, - `SELECT core_hash, invariants FROM "prisma_contract"."marker" WHERE id = 1`, + `SELECT core_hash, invariants FROM "prisma_contract"."marker" WHERE space = 'app'`, ); expect(markerAfterReset.rows[0]?.['core_hash'], 'R.03: storage hash rolled back').toBe( c1Hash, @@ -536,7 +536,7 @@ withTempDir(({ createTempDir }) => { const markerAfterReapply = await sql( db.connectionString, - `SELECT core_hash, invariants FROM "prisma_contract"."marker" WHERE id = 1`, + `SELECT core_hash, invariants FROM "prisma_contract"."marker" WHERE space = 'app'`, ); expect(markerAfterReapply.rows[0]?.['core_hash'], 'R.05: marker at C2').toBe(c2Hash); expect( @@ -687,7 +687,7 @@ MigrationCLI.run(import.meta.url, M); const markerRow = await sql( db.connectionString, - `SELECT invariants FROM "prisma_contract"."marker" WHERE id = 1`, + `SELECT invariants FROM "prisma_contract"."marker" WHERE space = 'app'`, ); expect( markerRow.rows[0]?.['invariants'], @@ -795,7 +795,7 @@ MigrationCLI.run(import.meta.url, M); ).toBe(0); const markerAfterApply = await sql( db.connectionString, - `SELECT invariants FROM "prisma_contract"."marker" WHERE id = 1`, + `SELECT invariants FROM "prisma_contract"."marker" WHERE space = 'app'`, ); expect( markerAfterApply.rows[0]?.['invariants'], @@ -807,11 +807,11 @@ MigrationCLI.run(import.meta.url, M); // is non-empty — this is the case the bug fix surfaces. await sql( db.connectionString, - `UPDATE "prisma_contract"."marker" SET invariants = '{}' WHERE id = 1`, + `UPDATE "prisma_contract"."marker" SET invariants = '{}' WHERE space = 'app'`, ); const markerAfterReset = await sql( db.connectionString, - `SELECT core_hash, invariants FROM "prisma_contract"."marker" WHERE id = 1`, + `SELECT core_hash, invariants FROM "prisma_contract"."marker" WHERE space = 'app'`, ); expect(markerAfterReset.rows[0]?.['core_hash'], 'T.04: marker still at c1').toBe(c1Hash); expect(markerAfterReset.rows[0]?.['invariants'], 'T.04: marker.invariants cleared').toEqual( diff --git a/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts b/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts index 8ac325b820..c9cba5cf6a 100644 --- a/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts +++ b/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts @@ -112,8 +112,8 @@ withTempDir(({ createTempDir }) => { // Marker stays at the first migration's target hash const marker = await sql( db.connectionString, - 'SELECT core_hash FROM prisma_contract.marker WHERE id = $1', - [1], + 'SELECT core_hash FROM prisma_contract.marker WHERE space = $1', + ['app'], ); expect(marker.rows[0]?.['core_hash'], 'marker unchanged after failure').toBe( firstResult.markerHash, @@ -199,8 +199,8 @@ withTempDir(({ createTempDir }) => { // Verify marker updated const marker = await sql( db.connectionString, - 'SELECT core_hash FROM prisma_contract.marker WHERE id = $1', - [1], + 'SELECT core_hash FROM prisma_contract.marker WHERE space = $1', + ['app'], ); expect(marker.rows[0]?.['core_hash'], 'marker matches result').toBe(result.markerHash); diff --git a/test/integration/test/cli.db-init.e2e.errors.test.ts b/test/integration/test/cli.db-init.e2e.errors.test.ts index 3fed6f0a95..485d521f4b 100644 --- a/test/integration/test/cli.db-init.e2e.errors.test.ts +++ b/test/integration/test/cli.db-init.e2e.errors.test.ts @@ -168,7 +168,7 @@ withTempDir(({ createTempDir }) => { await client.query('CREATE SCHEMA IF NOT EXISTS prisma_contract'); await client.query(` CREATE TABLE IF NOT EXISTS prisma_contract.marker ( - id INTEGER PRIMARY KEY DEFAULT 1, + space TEXT NOT NULL PRIMARY KEY DEFAULT 'app', core_hash TEXT NOT NULL, profile_hash TEXT NOT NULL, contract_json JSONB, @@ -180,9 +180,9 @@ withTempDir(({ createTempDir }) => { ) `); await client.query(` - INSERT INTO prisma_contract.marker (id, core_hash, profile_hash, contract_json) - VALUES (1, 'sha256:different-hash', 'sha256:different-profile', '{}') - ON CONFLICT (id) DO NOTHING + INSERT INTO prisma_contract.marker (space, core_hash, profile_hash, contract_json) + VALUES ('app', 'sha256:different-hash', 'sha256:different-profile', '{}') + ON CONFLICT (space) DO NOTHING `); }); diff --git a/test/integration/test/cli.db-init.e2e.test.ts b/test/integration/test/cli.db-init.e2e.test.ts index f8e07852b3..437a0feb86 100644 --- a/test/integration/test/cli.db-init.e2e.test.ts +++ b/test/integration/test/cli.db-init.e2e.test.ts @@ -50,8 +50,8 @@ withTempDir(({ createTempDir }) => { // Verify marker was created in database await withClient(connectionString, async (client) => { const result = await client.query( - 'select core_hash, profile_hash from prisma_contract.marker where id = $1', - [1], + 'select core_hash, profile_hash from prisma_contract.marker where space = $1', + ['app'], ); expect(result.rows.length).toBe(1); expect(result.rows[0]?.core_hash).toBeDefined(); @@ -327,7 +327,7 @@ withTempDir(({ createTempDir }) => { await client.query('CREATE SCHEMA IF NOT EXISTS prisma_contract'); await client.query(` CREATE TABLE IF NOT EXISTS prisma_contract.marker ( - id INTEGER PRIMARY KEY DEFAULT 1, + space TEXT NOT NULL PRIMARY KEY DEFAULT 'app', core_hash TEXT NOT NULL, profile_hash TEXT NOT NULL, contract_json JSONB, @@ -339,9 +339,9 @@ withTempDir(({ createTempDir }) => { ) `); await client.query(` - INSERT INTO prisma_contract.marker (id, core_hash, profile_hash, contract_json) - VALUES (1, 'sha256:different-hash', 'sha256:different-profile', '{}') - ON CONFLICT (id) DO NOTHING + INSERT INTO prisma_contract.marker (space, core_hash, profile_hash, contract_json) + VALUES ('app', 'sha256:different-hash', 'sha256:different-profile', '{}') + ON CONFLICT (space) DO NOTHING `); }); @@ -372,7 +372,7 @@ withTempDir(({ createTempDir }) => { await client.query('CREATE SCHEMA IF NOT EXISTS prisma_contract'); await client.query(` CREATE TABLE IF NOT EXISTS prisma_contract.marker ( - id INTEGER PRIMARY KEY DEFAULT 1, + space TEXT NOT NULL PRIMARY KEY DEFAULT 'app', core_hash TEXT NOT NULL, profile_hash TEXT NOT NULL, contract_json JSONB, @@ -384,9 +384,9 @@ withTempDir(({ createTempDir }) => { ) `); await client.query(` - INSERT INTO prisma_contract.marker (id, core_hash, profile_hash, contract_json) - VALUES (1, 'sha256:different-hash', 'sha256:different-profile', '{}') - ON CONFLICT (id) DO NOTHING + INSERT INTO prisma_contract.marker (space, core_hash, profile_hash, contract_json) + VALUES ('app', 'sha256:different-hash', 'sha256:different-profile', '{}') + ON CONFLICT (space) DO NOTHING `); }); diff --git a/test/integration/test/cli.db-sign.e2e.test.ts b/test/integration/test/cli.db-sign.e2e.test.ts index 70e97568eb..ae25f816e9 100644 --- a/test/integration/test/cli.db-sign.e2e.test.ts +++ b/test/integration/test/cli.db-sign.e2e.test.ts @@ -108,8 +108,8 @@ withTempDir(({ createTempDir }) => { // Verify marker was created in database await withClient(connectionString, async (client) => { const result = await client.query( - 'select core_hash, profile_hash from prisma_contract.marker where id = $1', - [1], + 'select core_hash, profile_hash from prisma_contract.marker where space = $1', + ['app'], ); expect(result.rows.length).toBe(1); expect(result.rows[0]?.core_hash).toBeDefined(); @@ -153,7 +153,7 @@ withTempDir(({ createTempDir }) => { `); await client.query(` CREATE TABLE IF NOT EXISTS prisma_contract.marker ( - id smallint primary key default 1, + space text not null primary key default 'app', core_hash text not null, profile_hash text not null, contract_json jsonb, @@ -165,8 +165,8 @@ withTempDir(({ createTempDir }) => { ) `); const result = await client.query( - 'select count(*) as count from prisma_contract.marker where id = $1', - [1], + 'select count(*) as count from prisma_contract.marker where space = $1', + ['app'], ); // Marker should not exist (sign should have failed before writing) expect(Number.parseInt(result.rows[0]?.count ?? '0', 10)).toBe(0); diff --git a/test/integration/test/cli.db-verify.e2e.test.ts b/test/integration/test/cli.db-verify.e2e.test.ts index 29125b3535..512fc6535f 100644 --- a/test/integration/test/cli.db-verify.e2e.test.ts +++ b/test/integration/test/cli.db-verify.e2e.test.ts @@ -3,6 +3,7 @@ import { access } from 'node:fs/promises'; import { createContractEmitCommand } from '@prisma-next/cli/commands/contract-emit'; import { createDbVerifyCommand } from '@prisma-next/cli/commands/db-verify'; import type { Contract } from '@prisma-next/contract/types'; +import { APP_SPACE_ID } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { typescriptContract } from '@prisma-next/sql-contract-ts/config-types'; import { @@ -86,6 +87,7 @@ async function writeMatchingMarker( await executeStatement(client, ensureTableStatement); const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contract.storage.storageHash, profileHash: contract.profileHash ?? contract.storage.storageHash, contractJson: contract, @@ -951,6 +953,7 @@ withTempDir(({ createTempDir }) => { // Write marker matching contract const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contract.storage.storageHash, profileHash: contract.profileHash ?? contract.storage.storageHash, contractJson: contract, diff --git a/test/integration/test/cli.migration-apply.e2e.test.ts b/test/integration/test/cli.migration-apply.e2e.test.ts index 7270007c38..0a089ad26d 100644 --- a/test/integration/test/cli.migration-apply.e2e.test.ts +++ b/test/integration/test/cli.migration-apply.e2e.test.ts @@ -149,8 +149,8 @@ withTempDir(({ createTempDir }) => { // Verify marker was written await withClient(connectionString, async (client) => { const result = await client.query( - 'SELECT core_hash FROM prisma_contract.marker WHERE id = $1', - [1], + 'SELECT core_hash FROM prisma_contract.marker WHERE space = $1', + ['app'], ); expect(result.rows.length).toBe(1); expect(result.rows[0]?.core_hash).toBe(parsed.markerHash); @@ -398,8 +398,8 @@ withTempDir(({ createTempDir }) => { await withClient(connectionString, async (client) => { const marker = await client.query( - 'SELECT core_hash FROM prisma_contract.marker WHERE id = $1', - [1], + 'SELECT core_hash FROM prisma_contract.marker WHERE space = $1', + ['app'], ); expect(marker.rows[0]?.core_hash).toBe(firstMigration!.metadata.to); }); @@ -528,8 +528,8 @@ withTempDir(({ createTempDir }) => { // Verify marker was updated await withClient(connectionString, async (client) => { const result = await client.query( - 'SELECT core_hash FROM prisma_contract.marker WHERE id = $1', - [1], + 'SELECT core_hash FROM prisma_contract.marker WHERE space = $1', + ['app'], ); expect(result.rows.length).toBe(1); expect(result.rows[0]?.core_hash).toBe(secondApply.markerHash); diff --git a/test/integration/test/contract-space-fixture/README.md b/test/integration/test/contract-space-fixture/README.md new file mode 100644 index 0000000000..47a4e77ba7 --- /dev/null +++ b/test/integration/test/contract-space-fixture/README.md @@ -0,0 +1,31 @@ +# test-contract-space (fixture) + +Integration-tests fixture exercising the **contract-space mechanism** in +`@prisma-next/migration-tools` (see its `spaces` export at +`packages/1-framework/3-tooling/migration/src/exports/spaces.ts`). + +## What this exists for + +The framework's per-space planner / runner / verifier needs at least one +schema-contributing extension to exercise end-to-end. Real consumers +(cipherstash, pgvector) land in later milestones; this fixture is the +purpose-built scaffolding that exercises: + +- The `contractSpace` descriptor field on `SqlControlExtensionDescriptor`. +- Per-space migration emission under `migrations//`. +- Pinned per-space artefacts (`contract.json`, `contract.d.ts`, `refs/head.json`). +- The verifier's orphan-marker / orphan-pinned-dir / declared-but-unmigrated cases. +- The `node_modules`-deleted scenario (apply / verify must succeed reading + only the user repo, no descriptor import). + +## Why a test-tree fixture rather than a workspace package + +Earlier iterations hosted this surface as `@prisma-next/extension-test-contract-space` +under `packages/3-extensions/`. The `extension-` prefix is reserved for +production extensions (pgvector, cipherstash, arktype-json), and a fixture +sitting in that directory was structurally indistinguishable from a real +extension. The fixture's actual job — providing a typed `contractSpace` +value the framework's helpers can be exercised against — does not require +a workspace package. Keeping it under the integration-tests workspace's +`test/` tree both removes the misleading "real extension" signal and keeps +the fixture visible alongside the tests that consume it. diff --git a/test/integration/test/contract-space-fixture/constants.ts b/test/integration/test/contract-space-fixture/constants.ts new file mode 100644 index 0000000000..2a45520b24 --- /dev/null +++ b/test/integration/test/contract-space-fixture/constants.ts @@ -0,0 +1,26 @@ +/** + * Constants describing the synthetic test extension's contract space. + * + * These values are placeholders authored by hand: in real extensions they + * would come from running the framework's emit pipeline against the + * extension's own PSL/TS schema. For a fixture whose only consumer is the + * framework's contract-space machinery, hand-authored values are sufficient + * — and surface the smallest possible footprint to the planner / runner / + * verifier under test. + * + * The placeholder hashes use a `synthetic-` prefix so they cannot be + * confused with content-addressed `sha256:*` hashes computed by the real + * authoring pipeline. Round-tripping through canonicalisation is exercised + * by integration tests in later milestones, where these values get + * replaced by hashes the emit pipeline computes. + */ + +export const TEST_SPACE_ID = 'test-contract-space'; + +export const TEST_BOX_TABLE = 'test_box'; + +export const TEST_BASELINE_INVARIANT_ID = 'test-contract-space:create-test_box-v1'; + +export const TEST_HEAD_HASH = 'synthetic-test-contract-space-head-v1'; + +export const TEST_BASELINE_MIGRATION_NAME = '20260101T0000_create_test_box'; diff --git a/test/integration/test/contract-space-fixture/contract.ts b/test/integration/test/contract-space-fixture/contract.ts new file mode 100644 index 0000000000..d00e6db3f7 --- /dev/null +++ b/test/integration/test/contract-space-fixture/contract.ts @@ -0,0 +1,35 @@ +import { type Contract, coreHash, profileHash } from '@prisma-next/contract/types'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import { TEST_BOX_TABLE, TEST_HEAD_HASH } from './constants'; + +/** + * The contract value the synthetic test extension publishes through its + * descriptor. Declares a single `test_box` table with two integer columns + * — the simplest non-empty schema representable in today's SQL contract + * IR. Future IR work (composite types, enums, domains) can swap this for + * a richer fixture without changing the descriptor wiring. + */ +export const testContractSpaceContract: Contract = { + target: 'postgres', + targetFamily: 'sql', + roots: {}, + models: {}, + capabilities: {}, + extensionPacks: {}, + meta: {}, + profileHash: profileHash('synthetic-test-contract-space-profile-v1'), + storage: { + storageHash: coreHash(TEST_HEAD_HASH), + tables: { + [TEST_BOX_TABLE]: { + columns: { + x: { codecId: 'pg/int4@1', nativeType: 'integer', nullable: false }, + y: { codecId: 'pg/int4@1', nativeType: 'integer', nullable: false }, + }, + uniques: [], + indexes: [], + foreignKeys: [], + }, + }, + }, +}; diff --git a/test/integration/test/contract-space-fixture/control.ts b/test/integration/test/contract-space-fixture/control.ts new file mode 100644 index 0000000000..5f46212eaa --- /dev/null +++ b/test/integration/test/contract-space-fixture/control.ts @@ -0,0 +1,45 @@ +/** + * Control-plane descriptor for the synthetic test extension. + * + * Exposes a `contractSpace` so the framework's per-space planner / runner / + * verifier (project: extension-contract-spaces, M1) can be exercised + * end-to-end against a fixture — without taking on the baggage (vendored + * bundle SQL, codec hooks, native extension installs) that real consumers + * like cipherstash or pgvector carry. + * + * Hosted as a fixture under the integration-tests workspace rather than as + * a top-level `@prisma-next/extension-*` package: the package shape is + * incidental, not load-bearing for the test surface, and the + * `extension-` prefix is reserved for production extensions (see project + * review F1). + */ + +import type { Contract } from '@prisma-next/contract/types'; +import type { SqlControlExtensionDescriptor } from '@prisma-next/family-sql/control'; +import type { ContractSpace } from '@prisma-next/framework-components/control'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import { TEST_SPACE_ID } from './constants'; +import { testContractSpaceContract } from './contract'; +import { testContractSpaceBaselineMigration, testContractSpaceHeadRef } from './migrations'; + +const testContractSpace: ContractSpace> = { + contractJson: testContractSpaceContract, + migrations: [testContractSpaceBaselineMigration], + headRef: testContractSpaceHeadRef, +}; + +const testContractSpaceExtensionDescriptor: SqlControlExtensionDescriptor<'postgres'> = { + kind: 'extension' as const, + id: TEST_SPACE_ID, + familyId: 'sql' as const, + targetId: 'postgres' as const, + version: '0.0.1', + contractSpace: testContractSpace, + create: () => ({ + familyId: 'sql' as const, + targetId: 'postgres' as const, + }), +}; + +export { testContractSpaceExtensionDescriptor }; +export default testContractSpaceExtensionDescriptor; diff --git a/test/integration/test/contract-space-fixture/descriptor.test.ts b/test/integration/test/contract-space-fixture/descriptor.test.ts new file mode 100644 index 0000000000..97bbd37aef --- /dev/null +++ b/test/integration/test/contract-space-fixture/descriptor.test.ts @@ -0,0 +1,45 @@ +import type { MigrationPlanOperation } from '@prisma-next/framework-components/control'; +import { describe, expect, it } from 'vitest'; +import { + TEST_BASELINE_INVARIANT_ID, + TEST_BASELINE_MIGRATION_NAME, + TEST_BOX_TABLE, + TEST_HEAD_HASH, + TEST_SPACE_ID, +} from './constants'; +import testContractSpaceExtensionDescriptor from './control'; + +describe('test-contract-space fixture descriptor', () => { + it('identifies as a SQL extension targeted at postgres', () => { + expect(testContractSpaceExtensionDescriptor).toMatchObject({ + kind: 'extension', + id: TEST_SPACE_ID, + familyId: 'sql', + targetId: 'postgres', + }); + }); + + it('exposes a contractSpace whose contract declares the test_box table', () => { + const space = testContractSpaceExtensionDescriptor.contractSpace; + expect(space).toBeDefined(); + expect(Object.keys(space!.contractJson.storage.tables)).toEqual([TEST_BOX_TABLE]); + }); + + it('publishes one baseline migration that establishes the head invariant', () => { + const space = testContractSpaceExtensionDescriptor.contractSpace!; + expect(space.migrations).toHaveLength(1); + const baseline = space.migrations[0]!; + expect(baseline.dirName).toBe(TEST_BASELINE_MIGRATION_NAME); + expect(baseline.metadata.providedInvariants).toEqual([TEST_BASELINE_INVARIANT_ID]); + const opIds = baseline.ops.map((op: MigrationPlanOperation) => op.invariantId); + expect(opIds).toContain(TEST_BASELINE_INVARIANT_ID); + }); + + it('points the head ref at the baseline-applied state', () => { + const headRef = testContractSpaceExtensionDescriptor.contractSpace!.headRef; + expect(headRef).toEqual({ + hash: TEST_HEAD_HASH, + invariants: [TEST_BASELINE_INVARIANT_ID], + }); + }); +}); diff --git a/test/integration/test/contract-space-fixture/migrations.ts b/test/integration/test/contract-space-fixture/migrations.ts new file mode 100644 index 0000000000..69aeca35c1 --- /dev/null +++ b/test/integration/test/contract-space-fixture/migrations.ts @@ -0,0 +1,47 @@ +import type { + ContractSpaceHeadRef, + MigrationPackage, +} from '@prisma-next/framework-components/control'; +import { + TEST_BASELINE_INVARIANT_ID, + TEST_BASELINE_MIGRATION_NAME, + TEST_BOX_TABLE, + TEST_HEAD_HASH, +} from './constants'; +import { testContractSpaceContract } from './contract'; + +const baselineMetadata = { + migrationHash: 'synthetic-test-contract-space-baseline-hash-v1', + from: null, + to: TEST_HEAD_HASH, + fromContract: null, + toContract: testContractSpaceContract, + hints: { used: [], applied: [], plannerVersion: '2.0.0' }, + labels: [], + providedInvariants: [TEST_BASELINE_INVARIANT_ID], + createdAt: '2026-01-01T00:00:00.000Z', +} as const satisfies MigrationPackage['metadata']; + +/** + * Single baseline migration: creates the `test_box` table from the empty + * schema. The op carries the same `invariantId` declared in the head ref, + * so a runner that walks this migration graph from a fresh marker reaches + * the head ref in one step. + */ +export const testContractSpaceBaselineMigration: MigrationPackage = { + dirName: TEST_BASELINE_MIGRATION_NAME, + metadata: baselineMetadata, + ops: [ + { + id: `${TEST_BOX_TABLE}.create`, + label: `Create table "${TEST_BOX_TABLE}"`, + operationClass: 'additive', + invariantId: TEST_BASELINE_INVARIANT_ID, + }, + ], +}; + +export const testContractSpaceHeadRef: ContractSpaceHeadRef = { + hash: TEST_HEAD_HASH, + invariants: [TEST_BASELINE_INVARIANT_ID], +}; diff --git a/test/integration/test/family.sign-database.test.ts b/test/integration/test/family.sign-database.test.ts index d4b280f67e..bdef334466 100644 --- a/test/integration/test/family.sign-database.test.ts +++ b/test/integration/test/family.sign-database.test.ts @@ -5,8 +5,11 @@ import postgresDriver from '@prisma-next/driver-postgres/control'; import sql from '@prisma-next/family-sql/control'; import sqlFamily from '@prisma-next/family-sql/pack'; import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; -import type { SignDatabaseResult } from '@prisma-next/framework-components/control'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { + APP_SPACE_ID, + createControlStack, + type SignDatabaseResult, +} from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { validateContract } from '@prisma-next/sql-contract/validate'; import { defineContract, field, model } from '@prisma-next/sql-contract-ts/contract-builder'; @@ -134,7 +137,7 @@ describe('family instance sign', () => { expect(result.timings.total).toBeGreaterThanOrEqual(0); // Verify marker was written to database - const marker = await familyInstance.readMarker({ driver }); + const marker = await familyInstance.readMarker({ driver, space: APP_SPACE_ID }); expect(marker).not.toBeNull(); expect(marker?.storageHash).toBe(validatedContract.storage.storageHash); } finally { @@ -168,6 +171,7 @@ describe('family instance sign', () => { `); // Write initial marker with different hash const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: 'sha256:old-hash', profileHash: 'sha256:old-profile-hash', contractJson: { target: 'postgres' }, @@ -226,7 +230,7 @@ describe('family instance sign', () => { expect(result.timings.total).toBeGreaterThanOrEqual(0); // Verify marker was updated in database - const marker = await familyInstance.readMarker({ driver }); + const marker = await familyInstance.readMarker({ driver, space: APP_SPACE_ID }); expect(marker).not.toBeNull(); expect(marker?.storageHash).toBe(validatedContract.storage.storageHash); expect(marker?.storageHash).not.toBe('sha256:old-hash'); @@ -246,7 +250,7 @@ describe('family instance sign', () => { await withClient(connectionString, async (client) => { await client.query( - 'update prisma_contract.marker set invariants = $1::text[] where id = 1', + `update prisma_contract.marker set invariants = $1::text[] where space = 'app'`, [['email-verified', 'phone-backfill']], ); }); @@ -275,7 +279,7 @@ describe('family instance sign', () => { contractPath: './contract.json', }); - const marker = await familyInstance.readMarker({ driver }); + const marker = await familyInstance.readMarker({ driver, space: APP_SPACE_ID }); expect(marker?.storageHash).toBe(validatedContract.storage.storageHash); expect(marker?.invariants).toEqual(['email-verified', 'phone-backfill']); } finally { @@ -346,7 +350,7 @@ describe('family instance sign', () => { expect(firstResult.marker.created).toBe(true); // Get the marker's updated_at timestamp - const markerAfterFirst = await familyInstance.readMarker({ driver }); + const markerAfterFirst = await familyInstance.readMarker({ driver, space: APP_SPACE_ID }); const firstUpdatedAt = markerAfterFirst?.updatedAt; // Second sign - should be idempotent @@ -370,7 +374,10 @@ describe('family instance sign', () => { expect(secondResult.marker.previous).toBeUndefined(); // Verify marker was not updated (updated_at should be the same) - const markerAfterSecond = await familyInstance.readMarker({ driver }); + const markerAfterSecond = await familyInstance.readMarker({ + driver, + space: APP_SPACE_ID, + }); expect(markerAfterSecond?.updatedAt).toEqual(firstUpdatedAt); } finally { await driver.close(); diff --git a/test/integration/test/family.verify-database.basic.test.ts b/test/integration/test/family.verify-database.basic.test.ts index 4c264e41db..12415d6a53 100644 --- a/test/integration/test/family.verify-database.basic.test.ts +++ b/test/integration/test/family.verify-database.basic.test.ts @@ -9,8 +9,11 @@ import { emit } from '@prisma-next/emitter'; import sql from '@prisma-next/family-sql/control'; import sqlFamily from '@prisma-next/family-sql/pack'; import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; -import type { VerifyDatabaseResult } from '@prisma-next/framework-components/control'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { + APP_SPACE_ID, + createControlStack, + type VerifyDatabaseResult, +} from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { validateContract } from '@prisma-next/sql-contract/validate'; import { sqlEmission } from '@prisma-next/sql-contract-emitter'; @@ -162,6 +165,7 @@ describe('family instance verify - basic', () => { // Write marker matching contract const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contractWithDb.storage.storageHash, profileHash: contractWithDb.profileHash ?? contractWithDb.storage.storageHash, contractJson: contractWithDb, diff --git a/test/integration/test/family.verify-database.errors.test.ts b/test/integration/test/family.verify-database.errors.test.ts index cbe4670c94..8c430ac01e 100644 --- a/test/integration/test/family.verify-database.errors.test.ts +++ b/test/integration/test/family.verify-database.errors.test.ts @@ -8,8 +8,11 @@ import { emit } from '@prisma-next/emitter'; import sql from '@prisma-next/family-sql/control'; import sqlFamily from '@prisma-next/family-sql/pack'; import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; -import type { VerifyDatabaseResult } from '@prisma-next/framework-components/control'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { + APP_SPACE_ID, + createControlStack, + type VerifyDatabaseResult, +} from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { validateContract } from '@prisma-next/sql-contract/validate'; import { sqlEmission } from '@prisma-next/sql-contract-emitter'; @@ -210,6 +213,7 @@ describe('family instance verify - errors', () => { // Write marker with different hash const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: 'sha256:different-hash', profileHash: contractWithDb.profileHash ?? contractWithDb.storage.storageHash, contractJson: contractWithDb, @@ -268,6 +272,7 @@ describe('family instance verify - errors', () => { // Write marker with different profileHash const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contractWithDb.storage.storageHash, profileHash: 'sha256:different-profile-hash', contractJson: contractWithDb, @@ -364,6 +369,7 @@ describe('family instance verify - errors', () => { await executeStatement(client, ensureTableStatement); const write = writeContractMarker({ + space: APP_SPACE_ID, storageHash: contractWithDb.storage.storageHash, profileHash: contractWithDb.profileHash ?? contractWithDb.storage.storageHash, contractJson: contractWithDb, diff --git a/test/integration/test/referential-actions.integration.test.ts b/test/integration/test/referential-actions.integration.test.ts index 3da3c801a3..6c141b09c2 100644 --- a/test/integration/test/referential-actions.integration.test.ts +++ b/test/integration/test/referential-actions.integration.test.ts @@ -6,7 +6,7 @@ import sql, { INIT_ADDITIVE_POLICY } from '@prisma-next/family-sql/control'; import sqlFamily from '@prisma-next/family-sql/pack'; import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import { APP_SPACE_ID, createControlStack } from '@prisma-next/framework-components/control'; import type { SqlStorage } from '@prisma-next/sql-contract/types'; import { validateContract } from '@prisma-next/sql-contract/validate'; import { defineContract, field, model } from '@prisma-next/sql-contract-ts/contract-builder'; @@ -617,6 +617,7 @@ describe('referential actions integration', () => { policy: INIT_ADDITIVE_POLICY, fromContract: null, frameworkComponents, + spaceId: APP_SPACE_ID, }); expect(planResult.kind).toBe('success');