Files
react/scripts/fiber/record-tests
Sebastian Markbåge 9011182c55 Configure Jest with Stack and Fiber as separate projects (#10214)
* Disable Fiber specific test run in CI

This disables the comparison against previously recorded test. Instead,
we'll rely on jest failures to fail tests.

* Extract jest config into two separate projects for Fiber and Stack

Allows us to run both in the same jest run. The setupMocks file is forked into
specific environment configuration for each project. This replaces the
environment variable.

I used copy pasta here to make it clear. We can abstract this later. It's clear
to me that simply extracting shared stuff is not the best way to abstract this.
setupMocks for example didn't need all the code in both branches.

I think that some of the stuff that is shared such as error message extracting
etc. should probably be lifted out into a stand-alone jest project instead of
being shared.

* Fix class equivalence test

There's a behavior change when projects are used which makes
setupTestFrameworkScriptFile not override the normal config.

This test should probably just move to a separate CI script or something
less hacky.

* Only run Fiber tests with scripts/fiber/record-tests
2017-07-19 10:35:45 -07:00

188 lines
5.4 KiB
JavaScript
Executable File

#!/usr/bin/env node
'use strict';
const child_process = require('child_process');
const crypto = require('crypto');
const fs = require('fs');
const os = require('os');
const path = require('path');
const SearchSource = require('jest').SearchSource;
const TestRunner = require('jest').TestRunner;
const TestWatcher = require('jest').TestWatcher;
const createContext = require('jest-runtime').createContext;
const readConfig = require('jest-config').readConfig;
const root = path.normalize(path.join(__dirname, '..', '..'));
const argv = {
config: path.join(root, 'scripts/jest/fiber.config.json'),
};
const testPathPattern = '';
function wrapRunnerFile(runnerPath) {
const filename = path.join(
os.tmpdir(),
'test-runner-' + crypto.randomBytes(8).toString('hex') + '.js'
);
fs.writeFileSync(
filename,
`
'use strict';
var runnerPath = ${JSON.stringify(runnerPath)};
var wrap = require(${JSON.stringify(__filename)}).wrapRunner;
module.exports = wrap(runnerPath);
`
);
return filename;
}
function wrapRunner(originalPath) {
const original = require(originalPath);
// Assuming originalPath is .../jest-jasmine2/build/index.js
const JasmineReporter = require(
path.join(path.dirname(originalPath), 'reporter.js')
);
// For each spec, we store whether there was any expectDev() failure. This
// relies on the results being returned in the same order as they are run.
const hadDevFailures = [];
let environment;
const oldSpecStarted = JasmineReporter.prototype.specStarted;
JasmineReporter.prototype.specStarted = function(result) {
oldSpecStarted.apply(this, arguments);
environment.global.__suppressDevFailures = true;
environment.global.__hadDevFailures = false;
};
const oldSpecDone = JasmineReporter.prototype.specDone;
JasmineReporter.prototype.specDone = function(result) {
oldSpecDone.apply(this, arguments);
environment.global.__suppressDevFailures = false;
hadDevFailures.push(environment.global.__hadDevFailures);
};
return function runner(globalConfig, config, env, runtime, testPath) {
environment = env;
return original(globalConfig, config, env, runtime, testPath)
.then((results) => {
results.failureMessage = null;
hadDevFailures.forEach((hadFailures, i) => {
results.testResults[i].hadDevFailures = hadFailures;
});
hadDevFailures.length = 0;
return results;
});
};
}
function runJest(maxWorkers) {
let { config } = readConfig(argv, root);
config = Object.assign({}, config, {
testRunner: wrapRunnerFile(config.testRunner),
maxWorkers: maxWorkers,
});
return createContext(config, {}).then((context) => {
const source = new SearchSource(context);
return source.getTestPaths({testPathPattern})
.then((data) => {
const runner = new TestRunner(
config,
{
getTestSummary: () => 'You did it!'
}
);
const watcher = new TestWatcher({isWatchMode: false});
return runner.runTests(data.tests, watcher).catch(x => {
console.log('error', x);
});
});
});
}
function formatResults(runResults, predicate) {
const formatted = [];
runResults.testResults.forEach((fileResult) => {
const filePath = path.relative(root, fileResult.testFilePath);
// on windows, we still want to output forward slashes
const unixFilePath = filePath.replace(/\\/g, '/');
const tests = fileResult.testResults.filter(
(test) => predicate(fileResult, test)
);
if (tests.length) {
const lines = [unixFilePath].concat(tests.map((test) => '* ' + test.title));
formatted.push(lines.join('\n'));
}
});
formatted.sort();
return formatted.join('\n\n');
}
function recordTests(maxWorkers, trackFacts) {
runJest(maxWorkers)
.then((runResults) => {
const passing = formatResults(
runResults,
(file, test) => test.status === 'passed' && !test.hadDevFailures
);
const passingExceptDev = formatResults(
runResults,
(file, test) => test.status === 'passed' && test.hadDevFailures
);
const failing = formatResults(
runResults,
(file, test) => test.status === 'failed'
);
fs.writeFileSync(
path.join(__dirname, 'tests-passing.txt'),
passing + '\n'
);
fs.writeFileSync(
path.join(__dirname, 'tests-passing-except-dev.txt'),
passingExceptDev + '\n'
);
fs.writeFileSync(
path.join(__dirname, 'tests-failing.txt'),
failing + '\n'
);
if (trackFacts) {
const fact = runResults.numPassedTests + '/' + (runResults.numPassedTests + runResults.numFailedTests);
// TODO: Shelling out here is silly.
child_process.spawnSync(
process.execPath,
[
path.join(__dirname, '../facts-tracker/index.js'),
'fiber-tests',
fact,
],
{
stdio: 'inherit',
}
);
}
});
}
if (require.main === module) {
const argv = require('yargs')
.demand(0, 0)
.number('max-workers')
.describe('max-workers', 'Number of workers to use for jest.')
.default('max-workers', Math.max(os.cpus().length - 1, 1))
.boolean('track-facts')
.describe('track-facts', 'Use facts-tracker to record passing tests.')
.strict()
.help()
.argv;
recordTests(argv.maxWorkers, argv.trackFacts);
}
module.exports = {
wrapRunner,
};