Files
react/scripts/bench/measure.py
Ben Alpert 844ca8b6b2 benchmarking: measure and analyze scripts
This uses wall-clock time (for now) so it's noisier than alternatives
(cachegrind, CPU perf-counters), but it's still valuable. In a future diff we
can make it use those.

`measure.py` outputs something that `analyze.py` can understand, but you can use `analyze.py` without `measure.py` too. The file format is simple:

```
$ cat measurements.txt
factory_ms_jsc_jit 13.580322265625
factory_ms_jsc_jit 13.659912109375
factory_ms_jsc_jit 13.67919921875
factory_ms_jsc_nojit 12.827880859375
factory_ms_jsc_nojit 13.105224609375
factory_ms_jsc_nojit 13.195068359375
factory_ms_node 40.4891400039196
factory_ms_node 40.6669420003891
factory_ms_node 43.52413299679756
ssr_pe_cold_ms_jsc_jit 43.06005859375
...
```

(The lines do not need to be sorted.)

Comparing 0.14.0 vs master:

```
$ ./measure.py react-0.14.0.min.js >014.txt
Measuring SSR for PE benchmark (30 trials)
..............................
Measuring SSR for PE with warm JIT (3 slow trials)
...
$ ./measure.py react.min.js >master.txt
Measuring SSR for PE benchmark (30 trials)
..............................
Measuring SSR for PE with warm JIT (3 slow trials)
...
$ ./analyze.py 014.txt master.txt
Comparing 014.txt (control) vs master.txt (test)
Significant differences marked by ***
% change from control to test, with 99% CIs:

* factory_ms_jsc_jit
    % change:  -0.56% [ -2.51%,  +1.39%]
    means: 14.037 (control), 13.9593 (test)
* factory_ms_jsc_nojit
    % change:  +1.23% [ -1.18%,  +3.64%]
    means: 13.2586 (control), 13.4223 (test)
* factory_ms_node
    % change:  +3.53% [ +0.29%,  +6.77%]  ***
    means: 42.0529 (control), 43.54 (test)
* ssr_pe_cold_ms_jsc_jit
    % change:  -6.84% [ -9.04%,  -4.65%]  ***
    means: 44.2444 (control), 41.2187 (test)
* ssr_pe_cold_ms_jsc_nojit
    % change: -11.81% [-14.66%,  -8.96%]  ***
    means: 52.9449 (control), 46.6953 (test)
* ssr_pe_cold_ms_node
    % change:  -2.70% [ -4.52%,  -0.88%]  ***
    means: 96.8909 (control), 94.2741 (test)
* ssr_pe_warm_ms_jsc_jit
    % change: -17.60% [-22.04%, -13.16%]  ***
    means: 13.763 (control), 11.3439 (test)
* ssr_pe_warm_ms_jsc_nojit
    % change: -20.65% [-22.62%, -18.68%]  ***
    means: 30.8829 (control), 24.5074 (test)
* ssr_pe_warm_ms_node
    % change:  -8.76% [-13.48%,  -4.03%]  ***
    means: 30.0193 (control), 27.3964 (test)
$
```
2015-11-18 16:26:01 -08:00

152 lines
4.6 KiB
Python
Executable File

#!/usr/bin/env python
# Copyright 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import functools
import json
import os
import subprocess
import sys
def _run_js_in_jsc(jit, js, env):
return subprocess.check_call(
['jsc', '-e', """
function now() {
return preciseTime() * 1000;
}
function globalEval(code) {
(0, eval)(code);
}
function report(label, time) {
print(label + '_' + %(engine)s, time);
}
this.ENV = %(env)s;
%(js)s
""" % {
'env': json.dumps(env),
'js': js,
'engine': json.dumps('jsc_' + ('jit' if jit else 'nojit')),
}],
env=dict(os.environ, JSC_useJIT='yes' if jit else 'no'),
)
_run_js_in_jsc_jit = functools.partial(_run_js_in_jsc, True)
_run_js_in_jsc_nojit = functools.partial(_run_js_in_jsc, False)
def _run_js_in_node(js, env):
return subprocess.check_call(
['node', '-e', """
function now() {
var hrTime = process.hrtime();
return hrTime[0] * 1e3 + hrTime[1] * 1e-6;
}
function globalEval(code) {
var vm = require('vm');
// Hide "module" so UMD wrappers use the global
vm.runInThisContext('(function(module){' + code + '\\n})()');
}
function readFile(filename) {
var fs = require('fs');
return fs.readFileSync(filename);
}
function report(label, time) {
console.log(label + '_node', time);
}
global.ENV = %(env)s;
%(js)s
""" % {
'env': json.dumps(env),
'js': js
}]
)
def _measure_ssr_ms(engine, react_path, bench_name, bench_path, measure_warm):
engine(
"""
var reactCode = readFile(ENV.react_path);
var START = now();
globalEval(reactCode);
var END = now();
if (typeof React !== 'object') throw new Error('React not laoded');
report('factory_ms', END - START);
globalEval(readFile(ENV.bench_path));
if (typeof Benchmark !== 'function') {
throw new Error('benchmark not loaded');
}
var START = now();
var html = React.renderToString(React.createElement(Benchmark));
html.charCodeAt(0); // flatten ropes
var END = now();
report('ssr_' + ENV.bench_name + '_cold_ms', END - START);
var warmup = ENV.measure_warm ? 80 : 0;
var trials = ENV.measure_warm ? 40 : 0;
for (var i = 0; i < warmup; i++) {
React.renderToString(React.createElement(Benchmark));
}
for (var i = 0; i < trials; i++) {
var START = now();
var html = React.renderToString(React.createElement(Benchmark));
html.charCodeAt(0); // flatten ropes
var END = now();
report('ssr_' + ENV.bench_name + '_warm_ms', END - START);
}
""",
{
'bench_name': bench_name,
'bench_path': bench_path,
'measure_warm': measure_warm,
'react_path': react_path,
},
)
def _main():
if len(sys.argv) != 2:
sys.stderr.write("usage: measure.py react.min.js >out.txt\n")
return 1
react_path = sys.argv[1]
trials = 30
sys.stderr.write("Measuring SSR for PE benchmark (%d trials)\n" % trials)
for i in range(trials):
for engine in [
_run_js_in_jsc_jit,
_run_js_in_jsc_nojit,
_run_js_in_node
]:
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', False)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
trials = 3
sys.stderr.write("Measuring SSR for PE with warm JIT (%d slow trials)\n" % trials)
for i in range(trials):
for engine in [
_run_js_in_jsc_jit,
_run_js_in_jsc_nojit,
_run_js_in_node
]:
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', True)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
if __name__ == '__main__':
sys.exit(_main())