pdf.js/test/test.mjs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1108 lines
30 KiB
JavaScript
Raw Normal View History

2014-03-25 05:52:11 +09:00
/*
* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable no-var */
2014-03-25 05:52:11 +09:00
import { copySubtreeSync, ensureDirSync, removeDirSync } from "./testutils.mjs";
import {
downloadManifestFiles,
verifyManifestFiles,
} from "./downloadutils.mjs";
import fs from "fs";
import os from "os";
import path from "path";
import puppeteer from "puppeteer";
import readline from "readline";
import rimraf from "rimraf";
import { translateFont } from "./font/ttxdriver.mjs";
import url from "url";
import { WebServer } from "./webserver.mjs";
import yargs from "yargs";
const rimrafSync = rimraf.sync;
2014-03-25 05:52:11 +09:00
function parseOptions() {
const parsedArgs = yargs(process.argv)
2014-03-25 05:52:11 +09:00
.usage("Usage: $0")
.option("downloadOnly", {
default: false,
describe: "Download test PDFs without running the tests.",
type: "boolean",
})
.option("fontTest", {
default: false,
describe: "Run the font tests.",
type: "boolean",
})
.option("help", {
alias: "h",
default: false,
describe: "Show this help message.",
type: "boolean",
})
.option("integration", {
default: false,
describe: "Run the integration tests.",
type: "boolean",
})
.option("manifestFile", {
default: "test_manifest.json",
describe: "A path to JSON file in the form of `test_manifest.json`.",
type: "string",
})
.option("masterMode", {
alias: "m",
default: false,
describe: "Run the script in master mode.",
type: "boolean",
})
.option("noChrome", {
default: false,
describe: "Skip Chrome when running tests.",
type: "boolean",
})
.option("noDownload", {
default: false,
describe: "Skip downloading of test PDFs.",
type: "boolean",
})
.option("noPrompts", {
default: false,
describe: "Uses default answers (intended for CLOUD TESTS only!).",
type: "boolean",
})
.option("headless", {
default: false,
describe:
"Run the tests in headless mode, i.e. without visible browser windows.",
type: "boolean",
})
.option("port", {
default: 0,
describe: "The port the HTTP server should listen on.",
type: "number",
})
.option("reftest", {
default: false,
describe:
"Automatically start reftest showing comparison test failures, if there are any.",
type: "boolean",
})
.option("statsDelay", {
default: 0,
describe:
"The amount of time in milliseconds the browser should wait before starting stats.",
type: "number",
})
.option("statsFile", {
default: "",
describe: "The file where to store stats.",
type: "string",
})
.option("strictVerify", {
default: false,
describe: "Error if verifying the manifest files fails.",
type: "boolean",
})
.option("testfilter", {
alias: "t",
default: [],
describe: "Run specific reftest(s).",
type: "array",
})
.example(
"testfilter",
"$0 -t=issue5567 -t=issue5909\n" +
"Run the reftest identified by issue5567 and issue5909."
)
.option("unitTest", {
default: false,
describe: "Run the unit tests.",
type: "boolean",
})
.option("xfaOnly", {
default: false,
describe: "Only run the XFA reftest(s).",
type: "boolean",
})
.check(argv => {
if (
+argv.reftest + argv.unitTest + argv.fontTest + argv.masterMode <=
1
) {
return true;
}
throw new Error(
"--reftest, --unitTest, --fontTest, and --masterMode must not be specified together."
);
})
.check(argv => {
if (
+argv.unitTest + argv.fontTest + argv.integration + argv.xfaOnly <=
1
) {
return true;
}
throw new Error(
"--unitTest, --fontTest, --integration, and --xfaOnly must not be specified together."
);
})
.check(argv => {
if (argv.testfilter && argv.testfilter.length > 0 && argv.xfaOnly) {
throw new Error("--testfilter and --xfaOnly cannot be used together.");
}
return true;
})
.check(argv => {
if (!argv.noDownload || !argv.downloadOnly) {
return true;
}
throw new Error(
"--noDownload and --downloadOnly cannot be used together."
);
})
.check(argv => {
if (!argv.masterMode || argv.manifestFile === "test_manifest.json") {
return true;
}
throw new Error(
"when --masterMode is specified --manifestFile shall be equal to `test_manifest.json`."
);
});
const result = parsedArgs.argv;
result.testfilter = Array.isArray(result.testfilter)
? result.testfilter
: [result.testfilter];
2014-03-25 05:52:11 +09:00
return result;
}
var refsTmpDir = "tmp";
var testResultDir = "test_snapshots";
var refsDir = "ref";
var eqLog = "eq.log";
var browserTimeout = 120;
function monitorBrowserTimeout(session, onTimeout) {
if (session.timeoutMonitor) {
clearTimeout(session.timeoutMonitor);
}
if (!onTimeout) {
session.timeoutMonitor = null;
return;
}
session.timeoutMonitor = setTimeout(function () {
2014-03-25 05:52:11 +09:00
onTimeout(session);
}, browserTimeout * 1000);
}
function updateRefImages() {
function sync(removeTmp) {
console.log(" Updating ref/ ... ");
copySubtreeSync(refsTmpDir, refsDir);
2014-03-25 05:52:11 +09:00
if (removeTmp) {
removeDirSync(refsTmpDir);
2014-03-25 05:52:11 +09:00
}
console.log("done");
}
if (options.noPrompts) {
sync(false); // don't remove tmp/ for botio
return;
}
const reader = readline.createInterface(process.stdin, process.stdout);
reader.question(
2014-03-25 05:52:11 +09:00
"Would you like to update the master copy in ref/? [yn] ",
function (answer) {
if (answer.toLowerCase() === "y") {
2014-03-25 05:52:11 +09:00
sync(true);
} else {
console.log(" OK, not updating.");
}
reader.close();
2014-03-25 05:52:11 +09:00
}
);
}
function examineRefImages() {
startServer();
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
startBrowser({
browserName: "firefox",
headless: false,
startUrl: `http://${host}:${server.port}/test/resources/reftest-analyzer.html#web=/test/eq.log`,
}).then(function (browser) {
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
browser.on("disconnected", function () {
stopServer();
process.exit(0);
});
});
2014-03-25 05:52:11 +09:00
}
async function startRefTest(masterMode, showRefImages) {
2014-03-25 05:52:11 +09:00
function finalize() {
stopServer();
let numRuns = 0;
2014-03-25 05:52:11 +09:00
var numErrors = 0;
var numFBFFailures = 0;
var numEqFailures = 0;
var numEqNoSnapshot = 0;
sessions.forEach(function (session) {
numRuns += session.numRuns;
2014-03-25 05:52:11 +09:00
numErrors += session.numErrors;
numFBFFailures += session.numFBFFailures;
numEqFailures += session.numEqFailures;
numEqNoSnapshot += session.numEqNoSnapshot;
});
var numFatalFailures = numErrors + numFBFFailures;
console.log();
if (!numRuns) {
console.log(`OHNOES! No tests ran!`);
} else if (numFatalFailures + numEqFailures > 0) {
2014-03-25 05:52:11 +09:00
console.log("OHNOES! Some tests failed!");
if (numErrors > 0) {
console.log(" errors: " + numErrors);
}
if (numEqFailures > 0) {
console.log(" different ref/snapshot: " + numEqFailures);
}
if (numFBFFailures > 0) {
console.log(" different first/second rendering: " + numFBFFailures);
}
} else {
console.log("All regression tests passed.");
}
var runtime = (Date.now() - startTime) / 1000;
console.log("Runtime was " + runtime.toFixed(1) + " seconds");
if (options.statsFile) {
fs.writeFileSync(options.statsFile, JSON.stringify(stats, null, 2));
}
if (masterMode) {
if (numEqFailures + numEqNoSnapshot > 0) {
console.log();
console.log("Some eq tests failed or didn't have snapshots.");
console.log("Checking to see if master references can be updated...");
if (numFatalFailures > 0) {
console.log(" No. Some non-eq tests failed.");
} else {
console.log(
" Yes! The references in tmp/ can be synced with ref/."
);
updateRefImages();
}
}
} else if (showRefImages && numEqFailures > 0) {
console.log();
console.log(
`Starting reftest harness to examine ${numEqFailures} eq test failures.`
2014-03-25 05:52:11 +09:00
);
examineRefImages();
2014-03-25 05:52:11 +09:00
}
}
async function setup() {
2014-03-25 05:52:11 +09:00
if (fs.existsSync(refsTmpDir)) {
console.error("tmp/ exists -- unable to proceed with testing");
process.exit(1);
}
if (fs.existsSync(eqLog)) {
fs.unlinkSync(eqLog);
2014-03-25 05:52:11 +09:00
}
if (fs.existsSync(testResultDir)) {
removeDirSync(testResultDir);
2014-03-25 05:52:11 +09:00
}
startTime = Date.now();
startServer();
server.hooks.POST.push(refTestPostHandler);
2014-03-25 05:52:11 +09:00
onAllSessionsClosed = finalize;
await startBrowsers({
baseUrl: `http://${host}:${server.port}/test/test_slave.html`,
initializeSession: session => {
session.masterMode = masterMode;
session.taskResults = {};
session.tasks = {};
session.remaining = manifest.length;
manifest.forEach(function (item) {
var rounds = item.rounds || 1;
var roundsResults = [];
roundsResults.length = rounds;
session.taskResults[item.id] = roundsResults;
session.tasks[item.id] = item;
});
session.numRuns = 0;
session.numErrors = 0;
session.numFBFFailures = 0;
session.numEqNoSnapshot = 0;
session.numEqFailures = 0;
monitorBrowserTimeout(session, handleSessionTimeout);
},
});
2014-03-25 05:52:11 +09:00
}
function checkRefsTmp() {
if (masterMode && fs.existsSync(refsTmpDir)) {
if (options.noPrompts) {
removeDirSync(refsTmpDir);
2014-03-25 05:52:11 +09:00
setup();
return;
}
console.log("Temporary snapshot dir tmp/ is still around.");
console.log("tmp/ can be removed if it has nothing you need.");
const reader = readline.createInterface(process.stdin, process.stdout);
reader.question(
2014-03-25 05:52:11 +09:00
"SHOULD THIS SCRIPT REMOVE tmp/? THINK CAREFULLY [yn] ",
function (answer) {
if (answer.toLowerCase() === "y") {
removeDirSync(refsTmpDir);
2014-03-25 05:52:11 +09:00
}
setup();
reader.close();
2014-03-25 05:52:11 +09:00
}
);
} else {
setup();
}
}
var startTime;
var manifest = getTestManifest();
if (!manifest) {
return;
}
if (!options.noDownload) {
await ensurePDFsDownloaded();
2014-03-25 05:52:11 +09:00
}
checkRefsTmp();
2014-03-25 05:52:11 +09:00
}
function handleSessionTimeout(session) {
if (session.closed) {
return;
}
var browser = session.name;
console.log(
"TEST-UNEXPECTED-FAIL | test failed " +
browser +
" has not responded in " +
browserTimeout +
Enable auto-formatting of the entire code-base using Prettier (issue 11444) Note that Prettier, purposely, has only limited [configuration options](https://prettier.io/docs/en/options.html). The configuration file is based on [the one in `mozilla central`](https://searchfox.org/mozilla-central/source/.prettierrc) with just a few additions (to avoid future breakage if the defaults ever changes). Prettier is being used for a couple of reasons: - To be consistent with `mozilla-central`, where Prettier is already in use across the tree. - To ensure a *consistent* coding style everywhere, which is automatically enforced during linting (since Prettier is used as an ESLint plugin). This thus ends "all" formatting disussions once and for all, removing the need for review comments on most stylistic matters. Many ESLint options are now redundant, and I've tried my best to remove all the now unnecessary options (but I may have missed some). Note also that since Prettier considers the `printWidth` option as a guide, rather than a hard rule, this patch resorts to a small hack in the ESLint config to ensure that *comments* won't become too long. *Please note:* This patch is generated automatically, by appending the `--fix` argument to the ESLint call used in the `gulp lint` task. It will thus require some additional clean-up, which will be done in a *separate* commit. (On a more personal note, I'll readily admit that some of the changes Prettier makes are *extremely* ugly. However, in the name of consistency we'll probably have to live with that.)
2019-12-25 23:59:37 +09:00
"s"
2014-03-25 05:52:11 +09:00
);
session.numErrors += session.remaining;
session.remaining = 0;
closeSession(browser);
}
function getTestManifest() {
var manifest = JSON.parse(fs.readFileSync(options.manifestFile));
const testFilter = options.testfilter.slice(0),
xfaOnly = options.xfaOnly;
if (testFilter.length || xfaOnly) {
manifest = manifest.filter(function (item) {
var i = testFilter.indexOf(item.id);
if (i !== -1) {
testFilter.splice(i, 1);
return true;
}
if (xfaOnly && item.enableXfa) {
return true;
}
return false;
});
if (testFilter.length) {
console.error("Unrecognized test IDs: " + testFilter.join(" "));
return undefined;
}
}
return manifest;
}
2014-03-25 05:52:11 +09:00
function checkEq(task, results, browser, masterMode) {
var taskId = task.id;
var refSnapshotDir = path.join(refsDir, os.platform(), browser, taskId);
var testSnapshotDir = path.join(
testResultDir,
os.platform(),
browser,
taskId
);
var pageResults = results[0];
var taskType = task.type;
var numEqNoSnapshot = 0;
var numEqFailures = 0;
for (var page = 0; page < pageResults.length; page++) {
if (!pageResults[page]) {
continue;
}
const pageResult = pageResults[page];
let testSnapshot = pageResult.snapshot;
if (testSnapshot?.startsWith("data:image/png;base64,")) {
2018-06-12 12:41:56 +09:00
testSnapshot = Buffer.from(testSnapshot.substring(22), "base64");
2014-03-25 05:52:11 +09:00
} else {
console.error("Valid snapshot was not found.");
}
var refSnapshot = null;
var eq = false;
var refPath = path.join(refSnapshotDir, page + 1 + ".png");
if (!fs.existsSync(refPath)) {
numEqNoSnapshot++;
if (!masterMode) {
console.log("WARNING: no reference snapshot " + refPath);
}
} else {
refSnapshot = fs.readFileSync(refPath);
eq = refSnapshot.toString("hex") === testSnapshot.toString("hex");
if (!eq) {
console.log(
"TEST-UNEXPECTED-FAIL | " +
taskType +
" " +
taskId +
" | in " +
browser +
" | rendering of page " +
(page + 1) +
" != reference rendering"
);
ensureDirSync(testSnapshotDir);
2014-03-25 05:52:11 +09:00
fs.writeFileSync(
path.join(testSnapshotDir, page + 1 + ".png"),
testSnapshot
);
fs.writeFileSync(
path.join(testSnapshotDir, page + 1 + "_ref.png"),
refSnapshot
);
// This no longer follows the format of Mozilla reftest output.
const viewportString = `(${pageResult.viewportWidth}x${pageResult.viewportHeight}x${pageResult.outputScale})`;
2014-03-25 05:52:11 +09:00
fs.appendFileSync(
eqLog,
"REFTEST TEST-UNEXPECTED-FAIL | " +
browser +
"-" +
taskId +
"-page" +
(page + 1) +
" | image comparison (==)\n" +
`REFTEST IMAGE 1 (TEST)${viewportString}: ` +
2014-03-25 05:52:11 +09:00
path.join(testSnapshotDir, page + 1 + ".png") +
"\n" +
`REFTEST IMAGE 2 (REFERENCE)${viewportString}: ` +
2014-03-25 05:52:11 +09:00
path.join(testSnapshotDir, page + 1 + "_ref.png") +
"\n"
);
numEqFailures++;
}
}
if (masterMode && (!refSnapshot || !eq)) {
var tmpSnapshotDir = path.join(
refsTmpDir,
os.platform(),
browser,
taskId
);
ensureDirSync(tmpSnapshotDir);
2014-03-25 05:52:11 +09:00
fs.writeFileSync(
path.join(tmpSnapshotDir, page + 1 + ".png"),
testSnapshot
);
}
}
var session = getSession(browser);
session.numEqNoSnapshot += numEqNoSnapshot;
if (numEqFailures > 0) {
session.numEqFailures += numEqFailures;
} else {
console.log(
"TEST-PASS | " + taskType + " test " + taskId + " | in " + browser
);
}
}
function checkFBF(task, results, browser, masterMode) {
2014-03-25 05:52:11 +09:00
var numFBFFailures = 0;
var round0 = results[0],
round1 = results[1];
if (round0.length !== round1.length) {
console.error("round 1 and 2 sizes are different");
}
for (var page = 0; page < round1.length; page++) {
var r0Page = round0[page],
r1Page = round1[page];
if (!r0Page) {
continue;
}
if (r0Page.snapshot !== r1Page.snapshot) {
// The FBF tests fail intermittently in Firefox and Google Chrome when run
// on the bots, ignoring `makeref` failures for now; see
// - https://github.com/mozilla/pdf.js/pull/12368
// - https://github.com/mozilla/pdf.js/pull/11491
//
// TODO: Figure out why this happens, so that we can remove the hack; see
// https://github.com/mozilla/pdf.js/issues/12371
if (masterMode) {
console.log(
"TEST-SKIPPED | forward-back-forward test " +
task.id +
" | in " +
browser +
" | page" +
(page + 1)
);
continue;
}
2014-03-25 05:52:11 +09:00
console.log(
"TEST-UNEXPECTED-FAIL | forward-back-forward test " +
task.id +
" | in " +
browser +
" | first rendering of page " +
(page + 1) +
" != second"
);
numFBFFailures++;
}
}
if (numFBFFailures > 0) {
getSession(browser).numFBFFailures += numFBFFailures;
} else {
console.log(
"TEST-PASS | forward-back-forward test " + task.id + " | in " + browser
);
}
}
function checkLoad(task, results, browser) {
// Load just checks for absence of failure, so if we got here the
// test has passed
console.log("TEST-PASS | load test " + task.id + " | in " + browser);
}
function checkRefTestResults(browser, id, results) {
var failed = false;
var session = getSession(browser);
var task = session.tasks[id];
session.numRuns++;
results.forEach(function (roundResults, round) {
roundResults.forEach(function (pageResult, page) {
2014-03-25 05:52:11 +09:00
if (!pageResult) {
return; // no results
}
if (pageResult.failure) {
failed = true;
if (fs.existsSync(task.file + ".error")) {
console.log(
"TEST-SKIPPED | PDF was not downloaded " +
id +
" | in " +
browser +
" | page" +
(page + 1) +
" round " +
(round + 1) +
" | " +
pageResult.failure
);
} else {
session.numErrors++;
console.log(
"TEST-UNEXPECTED-FAIL | test failed " +
id +
" | in " +
browser +
" | page" +
(page + 1) +
" round " +
(round + 1) +
" | " +
pageResult.failure
);
}
}
});
});
if (failed) {
return;
}
switch (task.type) {
case "eq":
case "text":
case "highlight":
2014-03-25 05:52:11 +09:00
checkEq(task, results, browser, session.masterMode);
break;
case "fbf":
checkFBF(task, results, browser, session.masterMode);
2014-03-25 05:52:11 +09:00
break;
case "load":
checkLoad(task, results, browser);
break;
default:
throw new Error("Unknown test type");
}
2014-04-29 01:15:46 +09:00
// clear memory
results.forEach(function (roundResults, round) {
roundResults.forEach(function (pageResult, page) {
2014-04-29 01:15:46 +09:00
pageResult.snapshot = null;
});
});
2014-03-25 05:52:11 +09:00
}
function refTestPostHandler(req, res) {
var parsedUrl = url.parse(req.url, true);
var pathname = parsedUrl.pathname;
if (
pathname !== "/tellMeToQuit" &&
pathname !== "/info" &&
pathname !== "/submit_task_results"
) {
return false;
}
var body = "";
req.on("data", function (data) {
2014-03-25 05:52:11 +09:00
body += data;
});
req.on("end", function () {
Fix inconsistent spacing and trailing commas in objects in `test/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on http://eslint.org/docs/rules/comma-dangle http://eslint.org/docs/rules/object-curly-spacing Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead. Please note: This patch was created automatically, using the ESLint `--fix` command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch. ```diff diff --git a/test/chromium/test-telemetry.js b/test/chromium/test-telemetry.js index cc412a31..2e5bdfa1 100755 --- a/test/chromium/test-telemetry.js +++ b/test/chromium/test-telemetry.js @@ -324,7 +324,7 @@ var tests = [ var window = createExtensionGlobal(); telemetryScript.runInNewContext(window); window.chrome.runtime.getManifest = function() { - return { version: '1.0.1', }; + return { version: '1.0.1', }; }; window.Date.test_now_value += 12 * 36E5; telemetryScript.runInNewContext(window); diff --git a/test/unit/api_spec.js b/test/unit/api_spec.js index 1f00747a..f22988e7 100644 --- a/test/unit/api_spec.js +++ b/test/unit/api_spec.js @@ -503,8 +503,9 @@ describe('api', function() { it('gets destinations, from /Dests dictionary', function(done) { var promise = doc.getDestinations(); promise.then(function(data) { - expect(data).toEqual({ chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, - 0, 841.89, null], }); + expect(data).toEqual({ + chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, 0, 841.89, null], + }); done(); }).catch(function (reason) { done.fail(reason); diff --git a/test/unit/function_spec.js b/test/unit/function_spec.js index 66441212..62127eb9 100644 --- a/test/unit/function_spec.js +++ b/test/unit/function_spec.js @@ -492,9 +492,11 @@ describe('function', function() { it('check compiled mul', function() { check([0.25, 0.5, 'mul'], [], [0, 1], [{ input: [], output: [0.125], }]); check([0, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); - check([0.5, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); + check([0.5, 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0.125], }]); check([1, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.25], }]); - check([0, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); + check([0, 'exch', 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0], }]); check([0.5, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); check([1, 'exch', 'mul'], [0, 1], [0, 1], ```
2017-06-02 19:55:01 +09:00
res.writeHead(200, { "Content-Type": "text/plain" });
2014-03-25 05:52:11 +09:00
res.end();
2014-04-08 05:43:07 +09:00
var session;
2014-03-25 05:52:11 +09:00
if (pathname === "/tellMeToQuit") {
session = getSession(parsedUrl.query.browser);
2014-03-25 05:52:11 +09:00
monitorBrowserTimeout(session, null);
closeSession(session.name);
return;
}
var data = JSON.parse(body);
if (pathname === "/info") {
console.log(data.message);
return;
}
var browser = data.browser;
var round = data.round;
var id = data.id;
var page = data.page - 1;
var failure = data.failure;
var snapshot = data.snapshot;
var lastPageNum = data.lastPageNum;
2014-04-08 05:43:07 +09:00
session = getSession(browser);
2014-03-25 05:52:11 +09:00
monitorBrowserTimeout(session, handleSessionTimeout);
var taskResults = session.taskResults[id];
if (!taskResults[round]) {
taskResults[round] = [];
}
if (taskResults[round][page]) {
console.error(
"Results for " +
browser +
":" +
id +
":" +
round +
":" +
page +
" were already submitted"
);
// TODO abort testing here?
}
taskResults[round][page] = {
failure,
snapshot,
viewportWidth: data.viewportWidth,
viewportHeight: data.viewportHeight,
outputScale: data.outputScale,
2014-03-25 05:52:11 +09:00
};
if (stats) {
stats.push({
browser,
2014-03-25 05:52:11 +09:00
pdf: id,
page,
round,
Fix inconsistent spacing and trailing commas in objects in `test/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on http://eslint.org/docs/rules/comma-dangle http://eslint.org/docs/rules/object-curly-spacing Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead. Please note: This patch was created automatically, using the ESLint `--fix` command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch. ```diff diff --git a/test/chromium/test-telemetry.js b/test/chromium/test-telemetry.js index cc412a31..2e5bdfa1 100755 --- a/test/chromium/test-telemetry.js +++ b/test/chromium/test-telemetry.js @@ -324,7 +324,7 @@ var tests = [ var window = createExtensionGlobal(); telemetryScript.runInNewContext(window); window.chrome.runtime.getManifest = function() { - return { version: '1.0.1', }; + return { version: '1.0.1', }; }; window.Date.test_now_value += 12 * 36E5; telemetryScript.runInNewContext(window); diff --git a/test/unit/api_spec.js b/test/unit/api_spec.js index 1f00747a..f22988e7 100644 --- a/test/unit/api_spec.js +++ b/test/unit/api_spec.js @@ -503,8 +503,9 @@ describe('api', function() { it('gets destinations, from /Dests dictionary', function(done) { var promise = doc.getDestinations(); promise.then(function(data) { - expect(data).toEqual({ chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, - 0, 841.89, null], }); + expect(data).toEqual({ + chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, 0, 841.89, null], + }); done(); }).catch(function (reason) { done.fail(reason); diff --git a/test/unit/function_spec.js b/test/unit/function_spec.js index 66441212..62127eb9 100644 --- a/test/unit/function_spec.js +++ b/test/unit/function_spec.js @@ -492,9 +492,11 @@ describe('function', function() { it('check compiled mul', function() { check([0.25, 0.5, 'mul'], [], [0, 1], [{ input: [], output: [0.125], }]); check([0, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); - check([0.5, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); + check([0.5, 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0.125], }]); check([1, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.25], }]); - check([0, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); + check([0, 'exch', 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0], }]); check([0.5, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); check([1, 'exch', 'mul'], [0, 1], [0, 1], ```
2017-06-02 19:55:01 +09:00
stats: data.stats,
2014-03-25 05:52:11 +09:00
});
}
var isDone = taskResults.at(-1)?.[lastPageNum - 1];
2014-03-25 05:52:11 +09:00
if (isDone) {
checkRefTestResults(browser, id, taskResults);
session.remaining--;
}
});
return true;
}
function onAllSessionsClosedAfterTests(name) {
const startTime = Date.now();
return function () {
2014-03-25 05:52:11 +09:00
stopServer();
var numRuns = 0,
numErrors = 0;
sessions.forEach(function (session) {
2014-03-25 05:52:11 +09:00
numRuns += session.numRuns;
numErrors += session.numErrors;
});
console.log();
console.log("Run " + numRuns + " tests");
if (!numRuns) {
console.log(`OHNOES! No ${name} tests ran!`);
} else if (numErrors > 0) {
2014-03-25 05:52:11 +09:00
console.log("OHNOES! Some " + name + " tests failed!");
console.log(" " + numErrors + " of " + numRuns + " failed");
} else {
console.log("All " + name + " tests passed.");
}
var runtime = (Date.now() - startTime) / 1000;
console.log(name + " tests runtime was " + runtime.toFixed(1) + " seconds");
};
}
async function startUnitTest(testUrl, name) {
onAllSessionsClosed = onAllSessionsClosedAfterTests(name);
startServer();
server.hooks.POST.push(unitTestPostHandler);
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
await startBrowsers({
baseUrl: `http://${host}:${server.port}${testUrl}`,
initializeSession: session => {
session.numRuns = 0;
session.numErrors = 0;
},
});
}
async function startIntegrationTest() {
onAllSessionsClosed = onAllSessionsClosedAfterTests("integration");
startServer();
const { runTests } = await import("./integration-boot.mjs");
await startBrowsers({
baseUrl: null,
initializeSession: session => {
session.numRuns = 0;
session.numErrors = 0;
},
2014-03-25 05:52:11 +09:00
});
global.integrationBaseUrl = `http://${host}:${server.port}/build/generic/web/viewer.html`;
global.integrationSessions = sessions;
const results = { runs: 0, failures: 0 };
await runTests(results);
sessions[0].numRuns = results.runs;
sessions[0].numErrors = results.failures;
await Promise.all(sessions.map(session => closeSession(session.name)));
2014-03-25 05:52:11 +09:00
}
function unitTestPostHandler(req, res) {
var parsedUrl = url.parse(req.url);
var pathname = parsedUrl.pathname;
if (
pathname !== "/tellMeToQuit" &&
pathname !== "/info" &&
pathname !== "/ttx" &&
pathname !== "/submit_task_results"
) {
return false;
}
var body = "";
req.on("data", function (data) {
2014-03-25 05:52:11 +09:00
body += data;
});
req.on("end", function () {
2014-03-25 05:52:11 +09:00
if (pathname === "/ttx") {
var onCancel = null,
ttxTimeout = 10000;
var timeoutId = setTimeout(function () {
onCancel?.("TTX timeout");
2014-03-25 05:52:11 +09:00
}, ttxTimeout);
translateFont(
body,
function (fn) {
2014-03-25 05:52:11 +09:00
onCancel = fn;
},
function (err, xml) {
2014-03-25 05:52:11 +09:00
clearTimeout(timeoutId);
Fix inconsistent spacing and trailing commas in objects in `test/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on http://eslint.org/docs/rules/comma-dangle http://eslint.org/docs/rules/object-curly-spacing Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead. Please note: This patch was created automatically, using the ESLint `--fix` command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch. ```diff diff --git a/test/chromium/test-telemetry.js b/test/chromium/test-telemetry.js index cc412a31..2e5bdfa1 100755 --- a/test/chromium/test-telemetry.js +++ b/test/chromium/test-telemetry.js @@ -324,7 +324,7 @@ var tests = [ var window = createExtensionGlobal(); telemetryScript.runInNewContext(window); window.chrome.runtime.getManifest = function() { - return { version: '1.0.1', }; + return { version: '1.0.1', }; }; window.Date.test_now_value += 12 * 36E5; telemetryScript.runInNewContext(window); diff --git a/test/unit/api_spec.js b/test/unit/api_spec.js index 1f00747a..f22988e7 100644 --- a/test/unit/api_spec.js +++ b/test/unit/api_spec.js @@ -503,8 +503,9 @@ describe('api', function() { it('gets destinations, from /Dests dictionary', function(done) { var promise = doc.getDestinations(); promise.then(function(data) { - expect(data).toEqual({ chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, - 0, 841.89, null], }); + expect(data).toEqual({ + chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, 0, 841.89, null], + }); done(); }).catch(function (reason) { done.fail(reason); diff --git a/test/unit/function_spec.js b/test/unit/function_spec.js index 66441212..62127eb9 100644 --- a/test/unit/function_spec.js +++ b/test/unit/function_spec.js @@ -492,9 +492,11 @@ describe('function', function() { it('check compiled mul', function() { check([0.25, 0.5, 'mul'], [], [0, 1], [{ input: [], output: [0.125], }]); check([0, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); - check([0.5, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); + check([0.5, 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0.125], }]); check([1, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.25], }]); - check([0, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); + check([0, 'exch', 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0], }]); check([0.5, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); check([1, 'exch', 'mul'], [0, 1], [0, 1], ```
2017-06-02 19:55:01 +09:00
res.writeHead(200, { "Content-Type": "text/xml" });
2014-03-25 05:52:11 +09:00
res.end(err ? "<error>" + err + "</error>" : xml);
}
);
return;
}
Fix inconsistent spacing and trailing commas in objects in `test/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on http://eslint.org/docs/rules/comma-dangle http://eslint.org/docs/rules/object-curly-spacing Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead. Please note: This patch was created automatically, using the ESLint `--fix` command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch. ```diff diff --git a/test/chromium/test-telemetry.js b/test/chromium/test-telemetry.js index cc412a31..2e5bdfa1 100755 --- a/test/chromium/test-telemetry.js +++ b/test/chromium/test-telemetry.js @@ -324,7 +324,7 @@ var tests = [ var window = createExtensionGlobal(); telemetryScript.runInNewContext(window); window.chrome.runtime.getManifest = function() { - return { version: '1.0.1', }; + return { version: '1.0.1', }; }; window.Date.test_now_value += 12 * 36E5; telemetryScript.runInNewContext(window); diff --git a/test/unit/api_spec.js b/test/unit/api_spec.js index 1f00747a..f22988e7 100644 --- a/test/unit/api_spec.js +++ b/test/unit/api_spec.js @@ -503,8 +503,9 @@ describe('api', function() { it('gets destinations, from /Dests dictionary', function(done) { var promise = doc.getDestinations(); promise.then(function(data) { - expect(data).toEqual({ chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, - 0, 841.89, null], }); + expect(data).toEqual({ + chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, 0, 841.89, null], + }); done(); }).catch(function (reason) { done.fail(reason); diff --git a/test/unit/function_spec.js b/test/unit/function_spec.js index 66441212..62127eb9 100644 --- a/test/unit/function_spec.js +++ b/test/unit/function_spec.js @@ -492,9 +492,11 @@ describe('function', function() { it('check compiled mul', function() { check([0.25, 0.5, 'mul'], [], [0, 1], [{ input: [], output: [0.125], }]); check([0, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); - check([0.5, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); + check([0.5, 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0.125], }]); check([1, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.25], }]); - check([0, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); + check([0, 'exch', 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0], }]); check([0.5, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); check([1, 'exch', 'mul'], [0, 1], [0, 1], ```
2017-06-02 19:55:01 +09:00
res.writeHead(200, { "Content-Type": "text/plain" });
2014-03-25 05:52:11 +09:00
res.end();
var data = JSON.parse(body);
if (pathname === "/tellMeToQuit") {
closeSession(data.browser);
return;
}
if (pathname === "/info") {
console.log(data.message);
return;
}
var session = getSession(data.browser);
session.numRuns++;
var message =
data.status + " | " + data.description + " | in " + session.name;
2014-03-25 05:52:11 +09:00
if (data.status === "TEST-UNEXPECTED-FAIL") {
session.numErrors++;
}
if (data.error) {
message += " | " + data.error;
}
console.log(message);
});
return true;
}
async function startBrowser({ browserName, headless, startUrl }) {
const options = {
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
product: browserName,
protocol: "cdp",
// Note that using `headless: true` gives a deprecation warning; see
// https://github.com/puppeteer/puppeteer#default-runtime-settings.
headless: headless === true ? "new" : false,
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
defaultViewport: null,
ignoreDefaultArgs: ["--disable-extensions"],
// The timeout for individual protocol (CDP) calls should always be lower
// than the Jasmine timeout. This way protocol errors are always raised in
// the context of the tests that actually triggered them and don't leak
// through to other tests (causing unrelated failures or tracebacks). The
// timeout is set to 75% of the Jasmine timeout to catch operation errors
// later in the test run and because if a single operation takes that long
// it can't possibly succeed anymore.
protocolTimeout: 0.75 * /* jasmine.DEFAULT_TIMEOUT_INTERVAL = */ 30000,
};
if (!tempDir) {
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), "pdfjs-"));
}
const printFile = path.join(tempDir, "print.pdf");
if (browserName === "chrome") {
// avoid crash
options.args = ["--no-sandbox", "--disable-setuid-sandbox"];
// silent printing in a pdf
options.args.push("--kiosk-printing");
}
if (browserName === "firefox") {
// Run tests with the WebDriver BiDi protocol enabled only for Firefox for
// now given that for Chrome further fixes are needed first.
options.protocol = "webDriverBiDi";
options.extraPrefsFirefox = {
// avoid to have a prompt when leaving a page with a form
"dom.disable_beforeunload": true,
// Disable dialog when saving a pdf
"pdfjs.disabled": true,
"browser.helperApps.neverAsk.saveToDisk": "application/pdf",
// Avoid popup when saving is done
"browser.download.always_ask_before_handling_new_types": true,
"browser.download.panel.shown": true,
"browser.download.alwaysOpenPanel": false,
// Save file in output
"browser.download.folderList": 2,
"browser.download.dir": tempDir,
// Print silently in a pdf
"print.always_print_silent": true,
"print.show_print_progress": false,
print_printer: "PDF",
"print.printer_PDF.print_to_file": true,
"print.printer_PDF.print_to_filename": printFile,
// Enable OffscreenCanvas
"gfx.offscreencanvas.enabled": true,
// Disable gpu acceleration
"gfx.canvas.accelerated": false,
// Enable the `round` CSS function.
"layout.css.round.enabled": true,
// This allow to copy some data in the clipboard.
"dom.events.asyncClipboard.clipboardItem": true,
};
}
const browser = await puppeteer.launch(options);
if (startUrl) {
const pages = await browser.pages();
const page = pages[0];
await page.goto(startUrl, { timeout: 0, waitUntil: "domcontentloaded" });
}
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
return browser;
}
async function startBrowsers({ baseUrl, initializeSession }) {
// Remove old browser revisions from Puppeteer's cache. Updating Puppeteer can
// cause new browser revisions to be downloaded, so trimming the cache will
// prevent the disk from filling up over time.
await puppeteer.trimCache();
const browserNames = options.noChrome ? ["firefox"] : ["firefox", "chrome"];
2014-03-25 05:52:11 +09:00
sessions = [];
for (const browserName of browserNames) {
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
// The session must be pushed first and augmented with the browser once
// it's initialized. The reason for this is that browser initialization
// takes more time when the browser is not found locally yet and we don't
// want `onAllSessionsClosed` to trigger if one of the browsers is done
// and the other one is still initializing, since that would mean that
// once the browser is initialized the server would have stopped already.
// Pushing the session first ensures that `onAllSessionsClosed` will
// only trigger once all browsers are initialized and done.
const session = {
name: browserName,
browser: undefined,
Fix inconsistent spacing and trailing commas in objects in `test/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on http://eslint.org/docs/rules/comma-dangle http://eslint.org/docs/rules/object-curly-spacing Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead. Please note: This patch was created automatically, using the ESLint `--fix` command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch. ```diff diff --git a/test/chromium/test-telemetry.js b/test/chromium/test-telemetry.js index cc412a31..2e5bdfa1 100755 --- a/test/chromium/test-telemetry.js +++ b/test/chromium/test-telemetry.js @@ -324,7 +324,7 @@ var tests = [ var window = createExtensionGlobal(); telemetryScript.runInNewContext(window); window.chrome.runtime.getManifest = function() { - return { version: '1.0.1', }; + return { version: '1.0.1', }; }; window.Date.test_now_value += 12 * 36E5; telemetryScript.runInNewContext(window); diff --git a/test/unit/api_spec.js b/test/unit/api_spec.js index 1f00747a..f22988e7 100644 --- a/test/unit/api_spec.js +++ b/test/unit/api_spec.js @@ -503,8 +503,9 @@ describe('api', function() { it('gets destinations, from /Dests dictionary', function(done) { var promise = doc.getDestinations(); promise.then(function(data) { - expect(data).toEqual({ chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, - 0, 841.89, null], }); + expect(data).toEqual({ + chapter1: [{ gen: 0, num: 17, }, { name: 'XYZ', }, 0, 841.89, null], + }); done(); }).catch(function (reason) { done.fail(reason); diff --git a/test/unit/function_spec.js b/test/unit/function_spec.js index 66441212..62127eb9 100644 --- a/test/unit/function_spec.js +++ b/test/unit/function_spec.js @@ -492,9 +492,11 @@ describe('function', function() { it('check compiled mul', function() { check([0.25, 0.5, 'mul'], [], [0, 1], [{ input: [], output: [0.125], }]); check([0, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); - check([0.5, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); + check([0.5, 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0.125], }]); check([1, 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.25], }]); - check([0, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0], }]); + check([0, 'exch', 'mul'], [0, 1], [0, 1], + [{ input: [0.25], output: [0], }]); check([0.5, 'exch', 'mul'], [0, 1], [0, 1], [{ input: [0.25], output: [0.125], }]); check([1, 'exch', 'mul'], [0, 1], [0, 1], ```
2017-06-02 19:55:01 +09:00
closed: false,
2014-03-25 05:52:11 +09:00
};
sessions.push(session);
// Construct the start URL from the base URL by appending query parameters
// for the runner if necessary.
let startUrl = "";
if (baseUrl) {
const queryParameters =
`?browser=${encodeURIComponent(browserName)}` +
`&manifestFile=${encodeURIComponent("/test/" + options.manifestFile)}` +
`&testFilter=${JSON.stringify(options.testfilter)}` +
`&xfaOnly=${options.xfaOnly}` +
`&delay=${options.statsDelay}` +
`&masterMode=${options.masterMode}`;
startUrl = baseUrl + queryParameters;
}
2014-03-25 05:52:11 +09:00
await startBrowser({ browserName, headless: options.headless, startUrl })
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
.then(function (browser) {
session.browser = browser;
initializeSession(session);
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
})
.catch(function (ex) {
console.log(`Error while starting ${browserName}: ${ex.message}`);
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
closeSession(browserName);
});
}
2014-03-25 05:52:11 +09:00
}
function startServer() {
server = new WebServer();
server.host = host;
server.port = options.port;
server.root = "..";
2014-05-31 01:42:07 +09:00
server.cacheExpirationTime = 3600;
2014-03-25 05:52:11 +09:00
server.start();
}
function stopServer() {
server.stop();
}
function getSession(browser) {
return sessions.find(session => session.name === browser);
2014-03-25 05:52:11 +09:00
}
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
async function closeSession(browser) {
for (const session of sessions) {
if (session.name !== browser) {
continue;
}
if (session.browser !== undefined) {
await session.browser.close();
}
session.closed = true;
const allClosed = sessions.every(function (s) {
return s.closed;
2014-03-25 05:52:11 +09:00
});
if (allClosed) {
if (tempDir) {
rimrafSync(tempDir);
}
onAllSessionsClosed?.();
Introduce Puppeteer for handling browsers during tests This commit replaces our own infrastructure for handling browsers during tests with Puppeteer. Using our own infrastructure for this had a few downsides: - It has proven to not always be reliable, especially when closing the browser, causing failures on the bots because browsers were still running even though they should have been stopped. Puppeteer should do a better job with this because it uses the browser's test built-in instrumentation tools for this (the devtools protocol) which our code didn't. This also means that we don't have to pass parameters/preferences to tweak browser behavior anymore. - It requires the browsers under test to be installed on the system, whereas Puppeteer downloads the browsers before the test. This means that setup is much easier (no more manual installations and browser manifest files) as well as testing with different browser versions (since they can be provisioned on demand). Moreover, this ensures that contributors always run the tests in both Firefox and Chrome, regardless of which browsers they have installed locally. - It's all code we have to maintain, so Puppeteer abstracts away how the browsers start/stop for us so we don't have to keep that code. By default, Puppeteer only installs one browser during installation, hence the need for a post-install script to install the second browser. This requires `cross-env` to make passing the environment variable work on both Linux and Windows.
2020-04-19 00:46:58 +09:00
}
2014-03-25 05:52:11 +09:00
}
}
async function ensurePDFsDownloaded() {
const manifest = getTestManifest();
await downloadManifestFiles(manifest);
try {
await verifyManifestFiles(manifest);
} catch {
console.log(
"Unable to verify the checksum for the files that are " +
"used for testing."
);
console.log(
"Please re-download the files, or adjust the MD5 " +
"checksum in the manifest for the files listed above.\n"
);
if (options.strictVerify) {
process.exit(1);
}
}
2014-03-25 05:52:11 +09:00
}
async function main() {
2014-03-25 05:52:11 +09:00
if (options.statsFile) {
stats = [];
}
2015-07-07 00:17:11 +09:00
if (options.downloadOnly) {
await ensurePDFsDownloaded();
2014-03-25 05:52:11 +09:00
} else if (options.unitTest) {
// Allows linked PDF files in unit-tests as well.
await ensurePDFsDownloaded();
startUnitTest("/test/unit/unit_test.html", "unit");
2014-03-25 05:52:11 +09:00
} else if (options.fontTest) {
startUnitTest("/test/font/font_test.html", "font");
} else if (options.integration) {
// Allows linked PDF files in integration-tests as well.
await ensurePDFsDownloaded();
startIntegrationTest();
2014-03-25 05:52:11 +09:00
} else {
startRefTest(options.masterMode, options.reftest);
}
}
var server;
var sessions;
var onAllSessionsClosed;
var host = "127.0.0.1";
var options = parseOptions();
var stats;
var tempDir = null;
2014-03-25 05:52:11 +09:00
2014-04-08 05:43:07 +09:00
main();