Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,13 @@ jobs:
name: Test
command: |
TEST=$(./node_modules/.bin/jest --listTests)
echo $TEST | circleci tests run --command="xargs ./node_modules/.bin/jest --testEnvironment=node --ci --runInBand --reporters=default --reporters=jest-junit --" --split-by=timings
echo $TEST | circleci tests run --command="xargs ./node_modules/.bin/jest --testEnvironment=node --ci --maxWorkers=4 --reporters=default --reporters=jest-junit --" --split-by=timings
environment:
NODE_OPTIONS: --max-old-space-size=6144 # 75% of 8GB which is the memory of large resource class
JEST_JUNIT_OUTPUT_DIR: ./test-results
JEST_JUNIT_ADD_FILE_ATTRIBUTE: "true"
JEST_JUNIT_FILE_PATH_PREFIX: "/home/circleci/project/"
ENABLE_SCHEMA_ISOLATION: "true"
- store_test_results:
path: ./test-results
- store_artifacts:
Expand Down
3 changes: 2 additions & 1 deletion .infra/Pulumi.adhoc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ config:
otelEnabled: false
otelExporterOtlpEndpoint: http://otel-collector.local.svc.cluster.local:4318/v1/traces
otelTracesSampler: always_on
paddleApiKey: topsecret
paddleApiKey: pdl_sdbx_apikey_01kdq5zxjqkw13cnqcfrx8zqf9_w4972CNdrYn296TxNRffP7_AII
paddleEnvironment: sandbox
paddleWebhookSecret: topsecret
personalizedDigestSecret: topsecret
Expand Down Expand Up @@ -102,3 +102,4 @@ config:
api:temporal:
chain: ''
key: ''
api:image: api-image:tilt-9046cb2312a58005
12 changes: 8 additions & 4 deletions __tests__/boot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,12 @@ beforeEach(async () => {
await con.getRepository(User).save(usersFixture[0]);
await con.getRepository(Source).save(sourcesFixture);
await con.getRepository(Post).save(postsFixture);
await ioRedisPool.execute((client) => client.flushall());

await deleteKeysByPattern('njord:cores_balance:*');
// Delete only keys used by boot tests, not flushall (which affects other workers)
await Promise.all([
deleteKeysByPattern('boot:*'),
deleteKeysByPattern('exp:*'),
deleteKeysByPattern('njord:cores_balance:*'),
]);

const mockTransport = createMockNjordTransport();
jest
Expand Down Expand Up @@ -303,7 +306,8 @@ describe('anonymous boot', () => {
.set('User-Agent', TEST_UA)
.expect(200);
expect(first.body.user.firstVisit).toBeTruthy();
await ioRedisPool.execute((client) => client.flushall());
// Clear boot-related keys to simulate data loss, avoiding flushall which affects other workers
await deleteKeysByPattern('boot:*');
const second = await request(app.server)
.get(BASE_PATH)
.set('User-Agent', TEST_UA)
Expand Down
188 changes: 188 additions & 0 deletions __tests__/globalSetup.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
import { DataSource, QueryRunner } from 'typeorm';

/**
* Replace hardcoded 'public.' schema references with the target schema.
*/
const replaceSchemaReferences = (sql: string, targetSchema: string): string => {
if (targetSchema === 'public') return sql;

let result = sql;

// Handle DROP INDEX separately - remove schema qualification and add IF EXISTS
result = result.replace(
/DROP INDEX\s+(?:IF EXISTS\s+)?(?:"public"\.|public\.)?("[^"]+"|[\w]+)/gi,
(_, indexName) => `DROP INDEX IF EXISTS ${indexName}`,
);

// Replace various patterns of public schema references
result = result
.replace(/\bpublic\."(\w+)"/gi, `"${targetSchema}"."$1"`)
.replace(/\bpublic\.(\w+)(?=[\s,;())]|$)/gi, `"${targetSchema}"."$1"`)
.replace(/"public"\."(\w+)"/gi, `"${targetSchema}"."$1"`)
.replace(/\bON\s+public\./gi, `ON "${targetSchema}".`);

return result;
};

/**
* Wrap a QueryRunner to intercept and transform SQL queries.
*/
const wrapQueryRunner = (
queryRunner: QueryRunner,
targetSchema: string,
): QueryRunner => {
const originalQuery = queryRunner.query.bind(queryRunner);

queryRunner.query = async (
query: string,
parameters?: unknown[],
): Promise<unknown> => {
const transformedQuery = replaceSchemaReferences(query, targetSchema);
return originalQuery(transformedQuery, parameters);
};

return queryRunner;
};

/**
* Create and run migrations for a single worker schema.
*/
const createWorkerSchema = async (schema: string): Promise<void> => {
const workerDataSource = new DataSource({
type: 'postgres',
host: process.env.TYPEORM_HOST || 'localhost',
port: 5432,
username: process.env.TYPEORM_USERNAME || 'postgres',
password: process.env.TYPEORM_PASSWORD || '12345',
database:
process.env.TYPEORM_DATABASE ||
(process.env.NODE_ENV === 'test' ? 'api_test' : 'api'),
schema,
extra: {
max: 2,
options: `-c search_path=${schema},public`,
},
entities: ['src/entity/**/*.{js,ts}'],
migrations: ['src/migration/**/*.{js,ts}'],
migrationsTableName: 'migrations',
logging: false,
});

await workerDataSource.initialize();

const queryRunner = workerDataSource.createQueryRunner();
await queryRunner.connect();
wrapQueryRunner(queryRunner, schema);

try {
// Create migrations table
await queryRunner.query(`
CREATE TABLE IF NOT EXISTS "${schema}"."migrations" (
"id" SERIAL PRIMARY KEY,
"timestamp" bigint NOT NULL,
"name" varchar NOT NULL
)
`);

// Create typeorm_metadata table
await queryRunner.query(`
CREATE TABLE IF NOT EXISTS "${schema}"."typeorm_metadata" (
"type" varchar NOT NULL,
"database" varchar,
"schema" varchar,
"table" varchar,
"name" varchar,
"value" text
)
`);

// Sort migrations by timestamp
const allMigrations = [...workerDataSource.migrations].sort((a, b) => {
const getTimestamp = (migration: {
name?: string;
constructor: { name: string };
}): number => {
const name = migration.name || migration.constructor.name;
const match = name.match(/(\d{13})$/);
return match ? parseInt(match[1], 10) : 0;
};
return getTimestamp(a) - getTimestamp(b);
});

for (const migration of allMigrations) {
const migrationName = migration.name || migration.constructor.name;

const alreadyRun = await queryRunner.query(
`SELECT * FROM "${schema}"."migrations" WHERE "name" = $1`,
[migrationName],
);

if (alreadyRun.length === 0) {
await migration.up(queryRunner);

const timestampMatch = migrationName.match(/(\d{13})$/);
const timestamp = timestampMatch
? parseInt(timestampMatch[1], 10)
: Date.now();

await queryRunner.query(
`INSERT INTO "${schema}"."migrations" ("timestamp", "name") VALUES ($1, $2)`,
[timestamp, migrationName],
);
}
}
} finally {
await queryRunner.release();
}

await workerDataSource.destroy();
};

/**
* Jest global setup - runs once before all workers start.
* Creates worker schemas for parallel test isolation.
*/
export default async function globalSetup(): Promise<void> {
// Only run when schema isolation is enabled
if (process.env.ENABLE_SCHEMA_ISOLATION !== 'true') {
return;
}

const maxWorkers = parseInt(process.env.JEST_MAX_WORKERS || '4', 10);
console.log(
`\nCreating ${maxWorkers} worker schemas for parallel testing...`,
);

// First, create all schemas
const dataSource = new DataSource({
type: 'postgres',
host: process.env.TYPEORM_HOST || 'localhost',
port: 5432,
username: process.env.TYPEORM_USERNAME || 'postgres',
password: process.env.TYPEORM_PASSWORD || '12345',
database:
process.env.TYPEORM_DATABASE ||
(process.env.NODE_ENV === 'test' ? 'api_test' : 'api'),
schema: 'public',
extra: { max: 1 },
});

await dataSource.initialize();

for (let i = 1; i <= maxWorkers; i++) {
const schema = `test_worker_${i}`;
await dataSource.query(`DROP SCHEMA IF EXISTS "${schema}" CASCADE`);
await dataSource.query(`CREATE SCHEMA "${schema}"`);
}

await dataSource.destroy();

// Run migrations for each schema sequentially to avoid memory spikes
for (let i = 1; i <= maxWorkers; i++) {
const schema = `test_worker_${i}`;
console.log(`Running migrations for ${schema}...`);
await createWorkerSchema(schema);
}

console.log('All worker schemas ready!\n');
}
52 changes: 46 additions & 6 deletions __tests__/setup.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import * as matchers from 'jest-extended';
import '../src/config';
import createOrGetConnection from '../src/db';
import { testSchema } from '../src/data-source';
import { remoteConfig } from '../src/remoteConfig';
import { loadAuthKeys } from '../src/auth';

Expand Down Expand Up @@ -57,21 +58,54 @@ jest.mock('../src/remoteConfig', () => ({
},
}));

// Tables that contain seed/reference data that should not be deleted between tests
// These are populated by migrations and tests don't modify them
// NOTE: Most tables are NOT included because tests create their own test data
// and expect tables to start empty (so auto-increment IDs start at 1)
const SEED_DATA_TABLES = new Set([
'migrations', // Required by TypeORM to track applied migrations
'checkpoint', // System checkpoints, tests don't create/modify
]);

const cleanDatabase = async (): Promise<void> => {
await remoteConfig.init();

const con = await createOrGetConnection();
for (const entity of con.entityMetadatas) {
const repository = con.getRepository(entity.name);
if (repository.metadata.tableType === 'view') continue;
await repository.query(`DELETE
FROM "${entity.tableName}";`);

// Skip seed data tables - they're populated once and tests expect them to exist
if (SEED_DATA_TABLES.has(entity.tableName)) continue;

await repository.query(`DELETE FROM "${entity.tableName}";`);

for (const column of entity.primaryColumns) {
if (column.generationStrategy === 'increment') {
await repository.query(
`ALTER SEQUENCE ${entity.tableName}_${column.databaseName}_seq RESTART WITH 1`,
);
// Reset sequences/identity columns for auto-increment primary keys
// Must use schema-qualified table name for schema isolation to work
try {
// First try pg_get_serial_sequence (works for SERIAL columns)
// Schema-qualify the table name for proper resolution in worker schemas
const schemaQualifiedTable = `${testSchema}.${entity.tableName}`;
const seqResult = await repository.query(
`SELECT pg_get_serial_sequence($1, $2) as seq_name`,
[schemaQualifiedTable, column.databaseName],
);
if (seqResult[0]?.seq_name) {
await repository.query(
`ALTER SEQUENCE ${seqResult[0].seq_name} RESTART WITH 1`,
);
} else {
// If no sequence found, try resetting IDENTITY column directly
// This handles GENERATED AS IDENTITY columns
await repository.query(
`ALTER TABLE "${testSchema}"."${entity.tableName}" ALTER COLUMN "${column.databaseName}" RESTART WITH 1`,
);
}
} catch {
// Sequence/identity might not exist or not be resettable, ignore
}
}
}
}
Expand All @@ -82,8 +116,14 @@ jest.mock('file-type', () => ({
fileTypeFromBuffer: () => fileTypeFromBuffer(),
}));

beforeAll(async () => {
// Schema creation is now handled by globalSetup.ts
// This beforeAll just ensures the connection is ready
await createOrGetConnection();
}, 30000);

beforeEach(async () => {
loadAuthKeys();

await cleanDatabase();
});
}, 30000); // 30 second timeout for database cleanup
Loading
Loading