This is mostly done using `gulp lint --fix` with a few manual changes in
the following diff:
```diff
diff --git a/src/core/pattern.js b/src/core/pattern.js
index 365491ed3..eedd8b686 100644
--- a/src/core/pattern.js
+++ b/src/core/pattern.js
@@ -105,7 +105,7 @@ const Pattern = (function PatternClosure() {
return Pattern;
})();
-var Shadings = {};
+const Shadings = {};
// A small number to offset the first/last color stops so we can insert ones to
// support extend. Number.MIN_VALUE is too small and breaks the extend.
@@ -597,16 +597,15 @@ Shadings.Mesh = (function MeshClosure() {
if (!(0 <= f && f <= 3)) {
throw new FormatError("Unknown type6 flag");
}
- var i, ii;
const pi = coords.length;
- for (i = 0, ii = f !== 0 ? 8 : 12; i < ii; i++) {
+ for (let i = 0, ii = f !== 0 ? 8 : 12; i < ii; i++) {
coords.push(reader.readCoordinate());
}
const ci = colors.length;
- for (i = 0, ii = f !== 0 ? 2 : 4; i < ii; i++) {
+ for (let i = 0, ii = f !== 0 ? 2 : 4; i < ii; i++) {
colors.push(reader.readComponents());
}
- var tmp1, tmp2, tmp3, tmp4;
+ let tmp1, tmp2, tmp3, tmp4;
switch (f) {
// prettier-ignore
case 0:
@@ -729,16 +728,15 @@ Shadings.Mesh = (function MeshClosure() {
if (!(0 <= f && f <= 3)) {
throw new FormatError("Unknown type7 flag");
}
- var i, ii;
const pi = coords.length;
- for (i = 0, ii = f !== 0 ? 12 : 16; i < ii; i++) {
+ for (let i = 0, ii = f !== 0 ? 12 : 16; i < ii; i++) {
coords.push(reader.readCoordinate());
}
const ci = colors.length;
- for (i = 0, ii = f !== 0 ? 2 : 4; i < ii; i++) {
+ for (let i = 0, ii = f !== 0 ? 2 : 4; i < ii; i++) {
colors.push(reader.readComponents());
}
- var tmp1, tmp2, tmp3, tmp4;
+ let tmp1, tmp2, tmp3, tmp4;
switch (f) {
// prettier-ignore
case 0:
@@ -897,7 +895,7 @@ Shadings.Mesh = (function MeshClosure() {
decodeType4Shading(this, reader);
break;
case ShadingType.LATTICE_FORM_MESH:
- var verticesPerRow = dict.get("VerticesPerRow") | 0;
+ const verticesPerRow = dict.get("VerticesPerRow") | 0;
if (verticesPerRow < 2) {
throw new FormatError("Invalid VerticesPerRow");
}
```
A significant portion of the code-base has now been converted to use `let`/`const`, rather than `var`, hence it should be possible to simply enable the ESLint `no-var` rule globally.
This way we can ensure that new code won't accidentally use `var`, and it also removes the need to manually enable the rule in various folders.
Obviously it makes sense to continue the efforts to replace `var`, but that should probably happen on a file and/or folder basis.
Please note that this patch excludes the following code:
- The `extensions/` folder, since that seemed easiest for now (and I don't know exactly what the support situation is for the Chromium-extension).
- The entire `external/` folder is ignored, since most of it's currently excluded from linting.
For the code that isn't imported from elsewhere (and should be ignored), we should probably (at some point) bring the code up to the same linting/formatting standard as the rest of the code-base.
- Various files in the `test/` folder are ignored, as necessary, since the way that a lot of this code is loaded will require some care (or perhaps larger re-factoring) when removing `var` usage.
While the JSDocs have never advertised `getDocument` as supporting Node.js `Buffer`s, that apparently doesn't stop users from passing such data structures to `getDocument`.
In theory the existing `instanceof Uint8Array` check ought to have caught Node.js `Buffer`s, however for reasons that I don't even pretend to understand that check actually passes. Hence this patch which, *only* in Node.js environments, will special-case `Buffer`s to hopefully provide a slightly better out-of-the-box behaviour in Node.js environments[1].
---
[1] Although I'm not sure that we necessarily want to advertise this in the JSDocs, given the specialized use-case.
Replace the `objectFromEntries` helper function with an `objectFromMap` one instead, and simplify the data lookup in the AnnotationStorage.getValue method
Note that the majority of these changes were done automatically, by using `gulp lint --fix`, and the manual changes were limited to the following diff:
```diff
diff --git a/src/core/cff_parser.js b/src/core/cff_parser.js
index d684c200e..2e2b811e4 100644
--- a/src/core/cff_parser.js
+++ b/src/core/cff_parser.js
@@ -555,7 +555,7 @@ const CFFParser = (function CFFParserClosure() {
stackSize %= 2;
validationCommand = CharstringValidationData[value];
} else if (value === 10 || value === 29) {
- var subrsIndex;
+ let subrsIndex;
if (value === 10) {
subrsIndex = localSubrIndex;
} else {
@@ -886,15 +886,15 @@ const CFFParser = (function CFFParserClosure() {
format = bytes[pos++];
switch (format & 0x7f) {
case 0:
- var glyphsCount = bytes[pos++];
+ const glyphsCount = bytes[pos++];
for (i = 1; i <= glyphsCount; i++) {
encoding[bytes[pos++]] = i;
}
break;
case 1:
- var rangesCount = bytes[pos++];
- var gid = 1;
+ const rangesCount = bytes[pos++];
+ let gid = 1;
for (i = 0; i < rangesCount; i++) {
const start = bytes[pos++];
const left = bytes[pos++];
@@ -938,7 +938,7 @@ const CFFParser = (function CFFParserClosure() {
}
break;
case 3:
- var rangesCount = (bytes[pos++] << 8) | bytes[pos++];
+ const rangesCount = (bytes[pos++] << 8) | bytes[pos++];
for (i = 0; i < rangesCount; ++i) {
let first = (bytes[pos++] << 8) | bytes[pos++];
if (i === 0 && first !== 0) {
@@ -1173,7 +1173,7 @@ class CFFDict {
}
}
-var CFFTopDict = (function CFFTopDictClosure() {
+const CFFTopDict = (function CFFTopDictClosure() {
const layout = [
[[12, 30], "ROS", ["sid", "sid", "num"], null],
[[12, 20], "SyntheticBase", "num", null],
@@ -1229,7 +1229,7 @@ var CFFTopDict = (function CFFTopDictClosure() {
return CFFTopDict;
})();
-var CFFPrivateDict = (function CFFPrivateDictClosure() {
+const CFFPrivateDict = (function CFFPrivateDictClosure() {
const layout = [
[6, "BlueValues", "delta", null],
[7, "OtherBlues", "delta", null],
@@ -1265,11 +1265,12 @@ var CFFPrivateDict = (function CFFPrivateDictClosure() {
return CFFPrivateDict;
})();
-var CFFCharsetPredefinedTypes = {
+const CFFCharsetPredefinedTypes = {
ISO_ADOBE: 0,
EXPERT: 1,
EXPERT_SUBSET: 2,
};
+
class CFFCharset {
constructor(predefined, format, charset, raw) {
this.predefined = predefined;
@@ -1695,7 +1696,7 @@ class CFFCompiler {
// For offsets we just insert a 32bit integer so we don't have to
// deal with figuring out the length of the offset when it gets
// replaced later on by the compiler.
- var name = dict.keyToNameMap[key];
+ const name = dict.keyToNameMap[key];
// Some offsets have the offset and the length, so just record the
// position of the first one.
if (!offsetTracker.isTracking(name)) {
```
Rather than first checking if data exists before fetching it from storage, we can simply do the lookup directly and then check its value.
Note that this follows the same pattern as utilized in the `AnnotationStorage.setValue` method.
Given that it's only used with `Map`s, and that it's currently implemented in such a way that we (indirectly) must iterate through the data *twice*, some simplification cannot hurt here.
Note that the only reason that we're not using `Object.fromEntries(...)` directly, at each call-site, is that that one won't guarantee that a `null` prototype is being used.
Now that we have scripting support, warning about e.g. JavaScript actions doesn't seem necessary anymore. Especially considering that scripting-related actions are/will not be parsed by the `Catalog.parseDestDictionary` method anyway, since it's intended for handling "simple" actions.
All of this code predates the existence of native JS classes, however we can now clean this up a bit. This patch thus let us remove some variable "shadowing" from the code.
This helper function is first of all only called *twice*, and secondly it also leads to unnecessary intermediate allocations given how the `TypedArray`s are handled.
Hence we can simply inline this small function, and thus directly allocate the combined `TypedArray` instead.
The `compareByteArrays` is first of all duplicated in multiple closures in the `src/core/crypto.js` file. Secondly, despite its name, it's also functionally equivalent to the now existing `isArrayEqual` helper function.
The `isArrayEqual` helper function is changed to use a standard `for`-loop, rather than `Array.prototype.every`, since that ought to be slightly more efficient given that we're now using it with (potentially) larger data.
All of this code predates the existence of native JS classes, however we can now clean this up a bit. This patch thus let us remove some variable "shadowing" from the code.
Note that this particular helper function is, with the exception of the `GENERIC` default viewer and the (unsupported) SVG-backend, mostly unused at this point in time. Hence we should be able to clean-up this helper function slightly.
Also, fixes a small inconsistency in the `SVGGraphics` initialization in the viewer, by passing in the `disableCreateObjectURL` compatibility-option. Given that the SVG-backend isn't officially supported/recommended this shouldn't have been an issue, but given that I spotted this it can't hurt to fix it.
Note how the `PDFAttachmentViewer` handles PDF file attachments specially, by opening them in a new window/tab, rather than forcing them to be downloaded. This is done to improve the overall UX, since browsers in general are able to handle PDF files internally.
However, for file *annotations* we're currently not attempting to do the same thing and are instead just downloading them directly. In order to unify the behaviour, without having to duplicate a lot of code, the opening of PDF file attachments is thus moved into a new `DownloadManager.openOrDownloadData` method.
- strokeColor corresponds to borderColor;
- support fillColor and textColor;
- support colors on the different annotations;
- fix typo in aforms (+test).
This is similar to the other methods, and the only reason for this not having been done originally is that the `cancel` functionality is a later addition.
Rather than converting the `AnnotationStorage`-data to an Object, before sending it to the worker-thread, we should be able to simply send the internal `Map` directly.
The "structured clone algorithm" doesn't have a problem with `Map`s, however the `LoopbackPort` used when workers are *disabled* (e.g. in Node.js environments) didn't use to support them. With PR 12997 having lifted that restriction, we should now be able to simply send the `AnnotationStorage`-data as-is rather than having to iterate through it to first create an Object.
*Please note:* The changes in `src/core/annotation.js` could have been a lot more compact if we were able to use optional chaining in the `src/core` folder. Unfortunately that's still not possible, since SystemJS is being used in the development viewer (i.g. `gulp server`) and fixing that is *still* blocked by [bug 1247687](https://bugzilla.mozilla.org/show_bug.cgi?id=1247687).
*Please note:* The `defer` parameter has been enabled by default ever since PR 9777 (in 2018), which first shipped in PDF.js release `2.0.943`.
With workers *disabled*, e.g. in Node.js environments, this has been used ever since without any problems reported[1].
The impetus for this change was that I happened to notice that *if* the `LoopbackPort` was used with synchronous event dispatching, we'd simply send that data as-is to the listeners. This created an inconsistency in the data returned from the `pdf.worker.js` file, since `postMessage` used with *actual* workers (or the `LoopbackPort` with `defer = true`) will ignore/throw when encountering unclonable data.
Originally my intention was simply to just call `cloneValue` regardless of the event dispatching used in `LoopbackPort`, however looking at the use-cases (or lack thereof) of the `LoopbackPort` it seemed reasonable to simply remove the `defer` parameter instead.
This patch is tagged "[api-minor]" since the `LoopbackPort` is still exposed in the API, although I really hope that no third-party is using this (since disabling workers leads to bad performance).
Finally, this patch changes a `forEach` loop to `for...of` and makes uses of optional changing in existing code.
---
[1] As evident by the `npm test` command run by Github Actions, and previously by Travis.
With the previous patch this function is now *only* accessed on the worker-thread, hence it's no longer necessary to include it in the *built* `pdf.js` file.
With the previous patch this functionality is now *only* accessed on the worker-thread, hence it's no longer necessary to include it in the *built* `pdf.js` file.
The only reason, as far as I can tell, for parsing the Metadata on the main-thread is how it was originally implemented. When Metadata support was first implemented, it utilized the [`DOMParser`](https://developer.mozilla.org/en-US/docs/Web/API/DOMParser) which isn't available in workers.
Today, with the custom XML-parser being used, that's no longer an issue and it seems reasonable to move the Metadata parsing to the worker-thread[1], since that's where all parsing should happen (for performance reasons).
Based on these changes, we'll be able to reduce the now unnecessary duplication of the XML-parser (and related code) in both of the *built* `pdf.js`/`pdf.worker.js` files.
Finally, this patch changes the `_repair` method to use "Array + join" rather than string concatenation.
---
[1] This needed the previous patch, to enable sending of `Map`s between threads with workers disabled.
I happened to look at this code, and I can't for the life of me figure out why I didn't just implement it like this patch in the first place (since the current format feels overly verbose).
The following checks are all unneeded, and could easily cause confusion when reading the code. (All of them are my fault as well, since I've sometimes added those checks without really thinking about the surrounding code.)
- In `PartialEvaluator.hasBlendModes` there cannot be any `MissingDataException`s thrown, given that the `Page.getOperatorList` method waits for all the necessary /Resources to load first. Furthermore, note also that if an error is thrown from `PartialEvaluator.hasBlendModes` then it'd completely break rendering of that page, since any errors thrown from `Page.getOperatorList` are simply sent to the main-thread.
- In `PartialEvaluator.handleColorN` there cannot be any `MissingDataException`s thrown, given that again the `Page.getOperatorList` method waits for all the necessary /Resources to load before operatorList parsing starts.
- In `XRef.readXRef` there cannot be any `MissingDataException`s thrown, given that we're *explicitly* requesting (and waiting for) the entire document in `pdfManagerReady` (in `src/core/worker.js`) before re-parsing of a corrupt document starts.
* don't set a value in annotationStorage by default:
- having an undefined when the annotation is rendered for saving/printing means nothing has changed so use normal appearance
- aims to fix https://bugzilla.mozilla.org/show_bug.cgi?id=1681687
* change the way to compute font size when this one is null in DA:
- make fontSize proportional to line height
- in multiline case, take into account the number of lines for text entered to adapt the font size
- use ascent of the fallback font instead of the one from pdf to position spans
- use TextMetrics.fontBoundingBoxAscent if available or
- use a basic heuristic to guess ascent in drawing char on a canvas
- compute ascent as a ratio of font height
The *third* page of the referenced PDF document currently fails to render completely, since one of its font files fail to load.
Since that error isn't handled, a large part of the text is thus missing which looks quite bad. By "replacing" the font data with an *empty* stream, we'll thus be able to fallback to rendering the text with a standard font (instead of using `ErrorFont`). While there's obviously no guarantee that things will look perfect, actually rendering the text at all should be an improvement in general.
Also, print a warning in `PartialEvaluator.loadFont` when the `PartialEvaluator.translateFont` method rejects, since that'd have helped debug/fix the issue faster.
*As far as I can tell, this has been broken ever since PR 3289 (back in 2013) without anyone noticing.*
For any non-`MissingDataException` errors encountered in `ObjectLoader._walk`, we're simply throwing immediately which thus has the potential to *completely* break rendering of an entire page.
In practice this is obviously only an issue for PDF documents which are in one way or another corrupt, since that's the only way that `XRef.fetch` will throw non-`MissingDataException` errors. To make matters worse these errors are *intermittent*, since they can only occur if the document is still loading when the `ObjectLoader`-code runs (note the early return in `ObjectLoader.load`).
Please note that we cannot simply catch the error and let "normal" parsing continue in `ObjectLoader._walk`, since that could lead to errors elsewhere given that resources "below" the current one (in the graph) might not be checked as intended then.
All-in-all, the only way to make absolutely sure that we won't cause *unexpected* `MissingDataException`s somewhere else in the code-base is to fallback to fetching the *entire* document in this edge-case.
- in order to evaluate SOM expressions nodes and their attributes must be checked in the same order as in the xml;
- add an object XFAObjectArray with a parameter max to handle multiple children with the same name.
- the parser is base on a class extending XMLParserBase
- it handle xml namespaces:
* each namespace is assocated with a builder
* builder builds nodes belonging to the namespace
* when a node is inserted in the parent namespace compatibility is checked (if required)
- to avoid name collision between xml names and object properties, use Symbol.
Given that `FontFaceObject` is not exposed in the public API, but only accessed internally, there's no need to assume that a `FontFaceObject`-instance is ever initialized without `onUnsupportedFeature` being provided. This is also consistent with the `BaseFontLoader` implementation.
Given that we'll only cache `/XObject`s of the `Image`-type globally, we can utilize that in `PartialEvaluator.getTextContent` as well. This way, in cases such as e.g. issue 12098, we can avoid having to fetch/parse `/XObject`s that we already know to be `Image`s. This is helpful, since `Stream`s are not cached on the `XRef` instance (given their potential size) and the lookup can thus be somewhat expensive in general.
Also, skip a redundant `RefSetCache.has` check in the `GlobalImageCache.getData` method.
The widths property should be an object to match what metrics returns.
In ZapfDingbats.pdf I was getting a data clone error with pdfBug enabled.
In buildCharCodeToWidth() there was an encoding with the name "at" which
is also the name of a method on an array. buildCharCodeToWidth assumes an
object is passed in, so when it checked for the "at" property, it found the
method and copied it over.
This only seemed to affect Firefox.
When implementing the `GlobalImageCache` functionality I was mostly worried about the effect of *very large* images, hence the maximum number of cached images were purposely kept quite low[1].
However, there's one fairly obvious problem with that approach: In documents with hundreds, or even thousands, of *small* images the `GlobalImageCache` as implemented becomes essentially pointless.
Hence this patch, where the `GlobalImageCache`-implementation is changed in the following ways:
- We're still guaranteed to be able to cache a *minimum* number of images, set to `10` (similar as before).
- If the *total* size of all the cached image data is below a threshold[2], we're allowed to cache additional images.
This patch thus *improve*, but doesn't completely fix, issue 12098. Note that that document is created by a *very poor* PDF generator, since every single page contains the *entire* document (with all of its /Resources) and to create the individual pages clipping is used.[3]
---
[1] Currently set to `10` images; imagine what would happen to overall memory usage if we encountered e.g. 50 images each 10 MB in size.
[2] This value was chosen, somewhat randomly, to be `40` megabytes; basically five times the [maximum individual image size per page](6249ef517d/src/display/api.js (L2483-L2484)).
[3] This surely has to be some kind of record w.r.t. how badly PDF generators can mess things up...
Note how, in the `if (this.stateManager.stateStack.length !== 0) {` branch, we're attempting to access the not yet defined variable[1] `args`. If this code-path is ever hit, an Error will be thrown and parsing will thus be aborted immediately (likely leading to e.g. rendering bugs).
Note that I found this purely by accident, since I happened to glance at the LGTM report. However, I've since found that the error is also present during the unit-test[2] and with this patch we're actually testing the *intended* thing here.
As part of fixing this, and to avoid re-introducing a similar bug in the future, we'll now instead always reset `args.length` *before* attempting to read the next operator.
Also, we can use the existing `EvaluatorPreprocessor.savedStatesDepth` getter to simplify the save/restore detection a tiny bit.
---
[1] The ESLint rule `no-use-before-define` would have helped catch this problem, but unfortunately we cannot enable that without quite a bit of refactoring all over the code-base.
[2] The unit-test was updated such that it would fail in the `master`-branch.
With the changes in PR 12831, it's no longer necessary to keep track of the `fontName`-string separately since it's available through the `_defaultAppearanceData`-property as well.
As can be seen in 2cba290361/src/core/evaluator.js (L986) the `gStateObj` (which is actually an Array despite its name), is wrapped in Array when it's inserted into the OperatorList. Hence we obviously need to take this into account when accessing it in `TranslatedFont._removeType3ColorOperators`; this mistake happened because we don't have any test-cases for this particular code-path as far as I know.
By changing this a `shadow`ed getter, we can simply access it directly and not worry about it being initialized. I have no idea why I didn't just implement it this way in the first place.
* Add a parser to get font data from the default appearance
- pdfium & poppler use a special parser too to get these info.
* Update src/core/default_appearance.js
Co-authored-by: Jonas Jenwald <jonas.jenwald@gmail.com>
Co-authored-by: Jonas Jenwald <jonas.jenwald@gmail.com>