Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .detoxrc.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ module.exports = {
$0: 'jest',
config: 'e2e/jest.e2e.config.js',
},
detached: true,
jest: {
setupTimeout: 220000,
teardownTimeout: 60000, // Increase teardown timeout from default 30s to 60s
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,12 @@ import path from 'path';
import xml2js from 'xml2js';
import https from 'https';

// Converts JUnit XML test reports into a structured JSON report with GitHub job metadata
// and retry tracking. These JSON reports are later on analyzed by the Flaky tests bot daily.

const env = {
TEST_RESULTS_PATH: process.env.TEST_RESULTS_PATH || 'android-merged-test-report',
TEST_RUNS_PATH: process.env.TEST_RUNS_PATH || 'test/test-results/test-runs.json',
TEST_RESULTS_PATH: process.env.TEST_RESULTS_PATH || 'e2e-smoke-android-all-test-artifacts',
TEST_RUNS_PATH: process.env.TEST_RUNS_PATH || 'test/test-results/json-test-report.json',
RUN_ID: process.env.RUN_ID ? parseInt(process.env.RUN_ID) : Date.now(),
PR_NUMBER: process.env.PR_NUMBER ? parseInt(process.env.PR_NUMBER) : 0,
GITHUB_ACTIONS: process.env.GITHUB_ACTIONS === 'true',
Expand Down
276 changes: 276 additions & 0 deletions .github/scripts/e2e-merge-detox-junit-reports.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,276 @@
#!/usr/bin/env node

/**
* Merges multiple Detox junit XML reports into a single deduplicated report.
*
* This is useful when Detox retries failed tests, creating multiple XML
* files per test run, and we want the final report to reflect the actual
* outcome after all retries.
*/

import { readdir, readFile, writeFile } from 'fs/promises';
import { join } from 'path';
import xml2js from 'xml2js';

const env = {
REPORTS_DIR: process.env.E2E_REPORTS_DIR || './e2e/reports',
OUTPUT_FILE: process.env.E2E_OUTPUT_FILE || 'junit.xml',
};

const xmlParser = new xml2js.Parser();
const xmlBuilder = new xml2js.Builder({
xmldec: { version: '1.0', encoding: 'UTF-8' },
renderOpts: { pretty: true, indent: ' ' },
});

/**
* Extract timestamp from filename (e.g., junit-2025-10-17T12-18-43-667Z.xml)
* @param {string} filename - The filename to extract timestamp from
* @returns {string} Sortable timestamp string
*/
function extractTimestamp(filename) {
const match = filename.match(/junit-(.+)\.xml$/);
return match ? match[1] : '';
}

/**
* Find all junit XML files in the reports directory
* @returns {Promise<string[]>} Sorted array of XML filenames
*/
async function findJUnitXmlFiles() {
try {
const files = await readdir(env.REPORTS_DIR);
const xmlFiles = files
.filter(f => f.startsWith('junit-') && f.endsWith('.xml'))
.sort((a, b) => extractTimestamp(a).localeCompare(extractTimestamp(b)));

return xmlFiles;
} catch (error) {
if (error.code === 'ENOENT') {
console.warn(`⚠️ Reports directory not found: ${env.REPORTS_DIR}`);
return [];
}
throw error;
}
}

/**
* Parse a junit XML file
* @param {string} filePath - Path to the XML file
* @returns {Promise<Object>} Parsed XML data
*/
async function parseJUnitXML(filePath) {
const xmlContent = await readFile(filePath, 'utf-8');
return await xmlParser.parseStringPromise(xmlContent);
}

/**
* Parse all XML files and combine test cases, keeping only the LATEST result for each test
* @returns {Promise<Object|null>} Merged report data or null if no files found
*/
async function parseAndMergeReports() {
console.log('🔍 Scanning for junit XML files...');

const xmlFiles = await findJUnitXmlFiles();

if (xmlFiles.length === 0) {
console.log('⚠️ No junit XML files found to merge');
return null;
}

console.log(`📁 Found ${xmlFiles.length} report file(s):`);
xmlFiles.forEach(f => console.log(` - ${f}`));

// Map to store LATEST result for each test case
// Key: "suiteName::testcaseName::classname"
// Value: { testcase, suiteName, suiteAttrs, timestamp }
const testCaseMap = new Map();
const suitePropertiesMap = new Map();

// Process each XML file in chronological order (earliest to latest)
for (const xmlFile of xmlFiles) {
const filePath = join(env.REPORTS_DIR, xmlFile);
const timestamp = extractTimestamp(xmlFile);

console.log(`\n📄 Processing: ${xmlFile}`);

try {
const parsed = await parseJUnitXML(filePath);

if (!parsed.testsuites || !parsed.testsuites.testsuite) {
console.log(' ⚠️ No test suites found, skipping');
continue;
}

const testsuites = Array.isArray(parsed.testsuites.testsuite)
? parsed.testsuites.testsuite
: [parsed.testsuites.testsuite];

for (const testsuite of testsuites) {
const suiteName = testsuite.$.name;

if (!testsuite.testcase) {
console.log(` ⚠️ Suite "${suiteName}" has no test cases`);
continue;
}

const testcases = Array.isArray(testsuite.testcase)
? testsuite.testcase
: [testsuite.testcase];

console.log(` 📦 Suite: "${suiteName}" (${testcases.length} test(s))`);

for (const testcase of testcases) {
const testName = testcase.$.name;
const className = testcase.$.classname || suiteName;
const key = `${suiteName}::${testName}::${className}`;

const isFailure = !!testcase.failure;
const isError = !!testcase.error;
const isSkipped = !!testcase.skipped;

// Check if we've seen this test before
const existing = testCaseMap.get(key);
const isRetry = !!existing;

// Always keep the LATEST result (overwrite previous)
testCaseMap.set(key, {
testcase,
suiteName,
suiteAttrs: testsuite.$,
timestamp,
});

const statusIcon = isFailure ? '❌' : isError ? '⚠️' : isSkipped ? '⊘' : '✅';
const retryLabel = isRetry ? ' (retry)' : '';
console.log(` ${statusIcon} ${testName}${retryLabel}`);
}

// Track properties (use latest)
if (testsuite.properties) {
suitePropertiesMap.set(suiteName, testsuite.properties);
}
}
} catch (error) {
console.warn(`⚠️ Failed to process ${xmlFile}: ${error.message}`);
continue;
}
}

console.log('\n🔄 Building deduplicated report with latest results...');
console.log(` Total unique test cases: ${testCaseMap.size}`);

// Group test cases by suite name
const suiteMap = new Map();

for (const [key, { testcase, suiteName, suiteAttrs }] of testCaseMap) {
if (!suiteMap.has(suiteName)) {
suiteMap.set(suiteName, {
attrs: suiteAttrs,
testcases: [],
properties: suitePropertiesMap.get(suiteName),
});
}
suiteMap.get(suiteName).testcases.push(testcase);
}

// Build test suites with recalculated counts
const finalTestSuites = [];
let totalTests = 0;
let totalFailures = 0;
let totalErrors = 0;
let totalSkipped = 0;
let totalTime = 0;

for (const [suiteName, { attrs, testcases, properties }] of suiteMap) {
// Recalculate suite statistics based on deduplicated test cases
let suiteFailures = 0;
let suiteErrors = 0;
let suiteSkipped = 0;
let suiteTime = 0;

for (const testcase of testcases) {
if (testcase.failure) suiteFailures++;
if (testcase.error) suiteErrors++;
if (testcase.skipped) suiteSkipped++;
suiteTime += parseFloat(testcase.$.time || '0');
}

const suite = {
$: {
...attrs,
name: suiteName,
tests: testcases.length,
failures: suiteFailures,
errors: suiteErrors,
skipped: suiteSkipped,
time: suiteTime.toFixed(3),
},
testcase: testcases,
};

if (properties) {
suite.properties = properties;
}

finalTestSuites.push(suite);

totalTests += testcases.length;
totalFailures += suiteFailures;
totalErrors += suiteErrors;
totalSkipped += suiteSkipped;
totalTime += suiteTime;
}

// Build final XML structure
const mergedReport = {
testsuites: {
$: {
name: 'jest tests',
tests: totalTests,
failures: totalFailures,
errors: totalErrors,
time: totalTime.toFixed(3),
},
testsuite: finalTestSuites,
},
};
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: JUnit Report Missing Skipped Attribute

The root testsuites element in the merged JUnit report is missing the skipped attribute. Although totalSkipped is calculated, it's not included in the final XML output, which creates an inconsistency with individual testsuite elements and deviates from standard JUnit XML format.

Fix in Cursor Fix in Web


console.log('\n📊 Final Report Summary (after deduplication):');
console.log(` Total Test Suites: ${finalTestSuites.length}`);
console.log(` Total Tests: ${totalTests}`);
console.log(` Failures: ${totalFailures}`);
console.log(` Errors: ${totalErrors}`);
console.log(` Skipped: ${totalSkipped}`);
console.log(` Total Time: ${totalTime.toFixed(3)}s`);

return mergedReport;
}


async function main() {
console.log('🚀 Starting JUnit report merge...\n');

const mergedReport = await parseAndMergeReports();

if (!mergedReport) {
console.log('\n⚠️ No reports to merge, skipping output file creation');
process.exit(0);
}

// Convert to XML
const xml = xmlBuilder.buildObject(mergedReport);

// Write merged report
const outputPath = join(env.REPORTS_DIR, env.OUTPUT_FILE);
await writeFile(outputPath, xml, 'utf-8');

console.log(`\n✅ Merged report written to: ${outputPath}`);
console.log('🎉 Merge complete!\n');
}

main().catch((error) => {
console.error('\n❌ Error merging XML reports:', error);
process.exit(1);
});

10 changes: 4 additions & 6 deletions .github/scripts/e2e-split-tags-shards.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -217,15 +217,15 @@ function computeRetryFilePath(originalPath, retryIndex) {
}

/**
* Create two retry copies of a given spec if not already present
* Create retry copies of a given spec if not already present
* @param {*} originalPath - The original path to the spec file
*/
function duplicateSpecFile(originalPath) {
try {
const srcPath = path.resolve(originalPath);
if (!fs.existsSync(srcPath)) return;
const content = fs.readFileSync(srcPath);
for (let i = 1; i <= 2; i += 1) {
for (let i = 1; i <= 1; i += 1) {
const retryRel = computeRetryFilePath(originalPath, i);
if (!retryRel) continue;
const retryAbs = path.resolve(retryRel);
Expand Down Expand Up @@ -304,7 +304,7 @@ function applyFlakinessDetection(splitFiles) {
return splitFiles;
}

// Build expanded list: base + retry-1 + retry-2 for duplicated files
// Build expanded list: base + retry files for duplicated files
const expanded = [];
for (const file of splitFiles) {
const normalized = normalizePathForCompare(file);
Expand All @@ -313,9 +313,7 @@ function applyFlakinessDetection(splitFiles) {
expanded.push(file);
// Add retry files
const retry1 = computeRetryFilePath(normalized, 1);
const retry2 = computeRetryFilePath(normalized, 2);
if (retry1) expanded.push(retry1);
if (retry2) expanded.push(retry2);
} else {
// Not changed, add as-is
expanded.push(file);
Expand Down Expand Up @@ -348,7 +346,7 @@ async function main() {

// 3) Flaky test detector mechanism in PRs (test retries)
// - Only duplicates changed files that are in this shard's split
// - Creates base + retry-1 + retry-2 for flakiness detection
// - Creates base + retry files for flakiness detection
const shouldSkipFlakinessGate = await shouldSkipFlakinessDetection();
if (!shouldSkipFlakinessGate) {
runFiles = applyFlakinessDetection(splitFiles);
Expand Down
Loading
Loading