2012-09-01 07:48:21 +09:00
|
|
|
/* Copyright 2012 Mozilla Foundation
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
2011-10-26 10:18:22 +09:00
|
|
|
|
2017-04-02 23:14:30 +09:00
|
|
|
import {
|
2017-07-20 21:04:54 +09:00
|
|
|
bytesToString, createPromiseCapability, createValidAbsoluteUrl, FormatError,
|
2017-09-02 03:27:13 +09:00
|
|
|
info, InvalidPDFException, isBool, isString, MissingDataException, shadow,
|
2017-12-13 22:51:45 +09:00
|
|
|
stringToPDFString, stringToUTF8String, unreachable, Util, warn,
|
|
|
|
XRefParseException
|
2017-04-02 23:14:30 +09:00
|
|
|
} from '../shared/util';
|
|
|
|
import {
|
|
|
|
Dict, isCmd, isDict, isName, isRef, isRefsEqual, isStream, Ref, RefSet,
|
|
|
|
RefSetCache
|
|
|
|
} from './primitives';
|
|
|
|
import { Lexer, Parser } from './parser';
|
|
|
|
import { ChunkedStream } from './chunked_stream';
|
|
|
|
import { CipherTransformFactory } from './crypto';
|
|
|
|
import { ColorSpace } from './colorspace';
|
2013-06-26 02:33:53 +09:00
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
var Catalog = (function CatalogClosure() {
|
2015-11-22 01:32:47 +09:00
|
|
|
function Catalog(pdfManager, xref, pageFactory) {
|
2013-04-09 07:14:56 +09:00
|
|
|
this.pdfManager = pdfManager;
|
2011-10-25 08:55:23 +09:00
|
|
|
this.xref = xref;
|
2013-02-07 08:19:29 +09:00
|
|
|
this.catDict = xref.getCatalogObj();
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isDict(this.catDict)) {
|
|
|
|
throw new FormatError('catalog object is not a dictionary');
|
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2017-03-29 00:54:41 +09:00
|
|
|
this.fontCache = new RefSetCache();
|
|
|
|
this.builtInCMapCache = Object.create(null);
|
|
|
|
this.pageKidsCountCache = new RefSetCache();
|
2015-11-22 01:32:47 +09:00
|
|
|
// TODO refactor to move getPage() to the PDFDocument.
|
|
|
|
this.pageFactory = pageFactory;
|
2013-02-07 08:19:29 +09:00
|
|
|
this.pagePromises = [];
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
Catalog.prototype = {
|
2012-03-25 03:59:51 +09:00
|
|
|
get metadata() {
|
2012-05-28 08:03:04 +09:00
|
|
|
var streamRef = this.catDict.getRaw('Metadata');
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isRef(streamRef)) {
|
2012-05-28 08:03:04 +09:00
|
|
|
return shadow(this, 'metadata', null);
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2012-05-28 08:03:04 +09:00
|
|
|
|
2014-03-21 04:28:22 +09:00
|
|
|
var encryptMetadata = (!this.xref.encrypt ? false :
|
|
|
|
this.xref.encrypt.encryptMetadata);
|
2012-05-28 08:03:04 +09:00
|
|
|
|
|
|
|
var stream = this.xref.fetch(streamRef, !encryptMetadata);
|
2012-03-27 07:05:14 +09:00
|
|
|
var metadata;
|
|
|
|
if (stream && isDict(stream.dict)) {
|
|
|
|
var type = stream.dict.get('Type');
|
|
|
|
var subtype = stream.dict.get('Subtype');
|
2012-03-25 03:59:51 +09:00
|
|
|
|
2016-08-04 22:13:37 +09:00
|
|
|
if (isName(type, 'Metadata') && isName(subtype, 'XML')) {
|
2012-05-28 05:49:28 +09:00
|
|
|
// XXX: This should examine the charset the XML document defines,
|
|
|
|
// however since there are currently no real means to decode
|
|
|
|
// arbitrary charsets, let's just hope that the author of the PDF
|
|
|
|
// was reasonable enough to stick with the XML default charset,
|
|
|
|
// which is UTF-8.
|
2012-05-28 09:00:13 +09:00
|
|
|
try {
|
|
|
|
metadata = stringToUTF8String(bytesToString(stream.getBytes()));
|
|
|
|
} catch (e) {
|
2017-03-22 22:55:59 +09:00
|
|
|
if (e instanceof MissingDataException) {
|
|
|
|
throw e;
|
|
|
|
}
|
2012-05-30 01:01:46 +09:00
|
|
|
info('Skipping invalid metadata.');
|
2012-05-28 09:00:13 +09:00
|
|
|
}
|
2012-03-25 03:59:51 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-27 07:05:14 +09:00
|
|
|
return shadow(this, 'metadata', metadata);
|
2012-03-25 03:59:51 +09:00
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
get toplevelPagesDict() {
|
|
|
|
var pagesObj = this.catDict.get('Pages');
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isDict(pagesObj)) {
|
|
|
|
throw new FormatError('invalid top-level pages dictionary');
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// shadow the prototype getter
|
2012-04-05 03:43:04 +09:00
|
|
|
return shadow(this, 'toplevelPagesDict', pagesObj);
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
|
|
|
get documentOutline() {
|
2013-06-22 05:42:55 +09:00
|
|
|
var obj = null;
|
|
|
|
try {
|
|
|
|
obj = this.readDocumentOutline();
|
|
|
|
} catch (ex) {
|
|
|
|
if (ex instanceof MissingDataException) {
|
|
|
|
throw ex;
|
|
|
|
}
|
|
|
|
warn('Unable to read document outline');
|
|
|
|
}
|
|
|
|
return shadow(this, 'documentOutline', obj);
|
|
|
|
},
|
|
|
|
readDocumentOutline: function Catalog_readDocumentOutline() {
|
2012-04-05 03:43:04 +09:00
|
|
|
var obj = this.catDict.get('Outlines');
|
2016-02-14 07:13:01 +09:00
|
|
|
if (!isDict(obj)) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
obj = obj.getRaw('First');
|
|
|
|
if (!isRef(obj)) {
|
|
|
|
return null;
|
|
|
|
}
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
var root = { items: [], };
|
2017-04-27 19:58:44 +09:00
|
|
|
var queue = [{ obj, parent: root, }];
|
2016-02-14 07:13:01 +09:00
|
|
|
// To avoid recursion, keep track of the already processed items.
|
|
|
|
var processed = new RefSet();
|
|
|
|
processed.put(obj);
|
2016-01-31 23:35:57 +09:00
|
|
|
var xref = this.xref, blackColor = new Uint8Array(3);
|
2016-02-14 07:13:01 +09:00
|
|
|
|
|
|
|
while (queue.length > 0) {
|
|
|
|
var i = queue.shift();
|
|
|
|
var outlineDict = xref.fetchIfRef(i.obj);
|
|
|
|
if (outlineDict === null) {
|
|
|
|
continue;
|
|
|
|
}
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!outlineDict.has('Title')) {
|
|
|
|
throw new FormatError('Invalid outline item');
|
|
|
|
}
|
2016-02-14 07:13:01 +09:00
|
|
|
|
2016-09-30 23:08:03 +09:00
|
|
|
var data = { url: null, dest: null, };
|
|
|
|
Catalog.parseDestDictionary({
|
|
|
|
destDict: outlineDict,
|
|
|
|
resultObj: data,
|
2016-10-01 19:05:07 +09:00
|
|
|
docBaseUrl: this.pdfManager.docBaseUrl,
|
2016-09-30 23:08:03 +09:00
|
|
|
});
|
2016-02-14 07:13:01 +09:00
|
|
|
var title = outlineDict.get('Title');
|
2016-01-31 23:35:57 +09:00
|
|
|
var flags = outlineDict.get('F') || 0;
|
|
|
|
|
2016-05-06 02:16:35 +09:00
|
|
|
var color = outlineDict.getArray('C'), rgbColor = blackColor;
|
2016-01-31 23:35:57 +09:00
|
|
|
// We only need to parse the color when it's valid, and non-default.
|
2017-09-02 03:27:13 +09:00
|
|
|
if (Array.isArray(color) && color.length === 3 &&
|
2016-01-31 23:35:57 +09:00
|
|
|
(color[0] !== 0 || color[1] !== 0 || color[2] !== 0)) {
|
|
|
|
rgbColor = ColorSpace.singletons.rgb.getRgb(color, 0);
|
|
|
|
}
|
2016-02-14 07:13:01 +09:00
|
|
|
var outlineItem = {
|
2016-09-30 23:08:03 +09:00
|
|
|
dest: data.dest,
|
|
|
|
url: data.url,
|
2016-10-21 20:29:15 +09:00
|
|
|
unsafeUrl: data.unsafeUrl,
|
2016-09-30 23:08:03 +09:00
|
|
|
newWindow: data.newWindow,
|
2016-02-14 07:13:01 +09:00
|
|
|
title: stringToPDFString(title),
|
2016-01-31 23:35:57 +09:00
|
|
|
color: rgbColor,
|
2016-02-14 07:13:01 +09:00
|
|
|
count: outlineDict.get('Count'),
|
2016-01-31 23:35:57 +09:00
|
|
|
bold: !!(flags & 2),
|
|
|
|
italic: !!(flags & 1),
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
items: [],
|
2016-02-14 07:13:01 +09:00
|
|
|
};
|
|
|
|
i.parent.items.push(outlineItem);
|
|
|
|
obj = outlineDict.getRaw('First');
|
|
|
|
if (isRef(obj) && !processed.has(obj)) {
|
2017-04-27 19:58:44 +09:00
|
|
|
queue.push({ obj, parent: outlineItem, });
|
2016-02-14 07:13:01 +09:00
|
|
|
processed.put(obj);
|
|
|
|
}
|
|
|
|
obj = outlineDict.getRaw('Next');
|
|
|
|
if (isRef(obj) && !processed.has(obj)) {
|
2017-04-27 19:58:44 +09:00
|
|
|
queue.push({ obj, parent: i.parent, });
|
2016-02-14 07:13:01 +09:00
|
|
|
processed.put(obj);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
2014-03-21 04:28:22 +09:00
|
|
|
return (root.items.length > 0 ? root.items : null);
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
|
|
|
get numPages() {
|
|
|
|
var obj = this.toplevelPagesDict.get('Count');
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(obj)) {
|
2017-07-20 21:04:54 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'page count in top level pages object is not an integer');
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// shadow the prototype getter
|
2017-07-20 19:35:09 +09:00
|
|
|
return shadow(this, 'numPages', obj);
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
|
|
|
get destinations() {
|
2012-04-05 07:29:50 +09:00
|
|
|
function fetchDestination(dest) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return isDict(dest) ? dest.get('D') : dest;
|
|
|
|
}
|
|
|
|
|
|
|
|
var xref = this.xref;
|
|
|
|
var dests = {}, nameTreeRef, nameDictionaryRef;
|
|
|
|
var obj = this.catDict.get('Names');
|
2014-05-14 19:43:20 +09:00
|
|
|
if (obj && obj.has('Dests')) {
|
2012-04-05 03:43:04 +09:00
|
|
|
nameTreeRef = obj.getRaw('Dests');
|
2014-02-27 21:46:12 +09:00
|
|
|
} else if (this.catDict.has('Dests')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
nameDictionaryRef = this.catDict.get('Dests');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
|
|
|
|
if (nameDictionaryRef) {
|
|
|
|
// reading simple destination dictionary
|
2012-04-05 03:43:04 +09:00
|
|
|
obj = nameDictionaryRef;
|
2011-10-25 08:55:23 +09:00
|
|
|
obj.forEach(function catalogForEach(key, value) {
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!value) {
|
|
|
|
return;
|
|
|
|
}
|
2012-04-05 07:29:50 +09:00
|
|
|
dests[key] = fetchDestination(value);
|
2011-10-25 08:55:23 +09:00
|
|
|
});
|
|
|
|
}
|
|
|
|
if (nameTreeRef) {
|
2013-03-01 08:29:07 +09:00
|
|
|
var nameTree = new NameTree(nameTreeRef, xref);
|
|
|
|
var names = nameTree.getAll();
|
|
|
|
for (var name in names) {
|
|
|
|
dests[name] = fetchDestination(names[name]);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return shadow(this, 'destinations', dests);
|
|
|
|
},
|
2014-10-05 22:56:40 +09:00
|
|
|
getDestination: function Catalog_getDestination(destinationId) {
|
|
|
|
function fetchDestination(dest) {
|
|
|
|
return isDict(dest) ? dest.get('D') : dest;
|
|
|
|
}
|
|
|
|
|
|
|
|
var xref = this.xref;
|
2015-07-08 04:48:57 +09:00
|
|
|
var dest = null, nameTreeRef, nameDictionaryRef;
|
2014-10-05 22:56:40 +09:00
|
|
|
var obj = this.catDict.get('Names');
|
|
|
|
if (obj && obj.has('Dests')) {
|
|
|
|
nameTreeRef = obj.getRaw('Dests');
|
|
|
|
} else if (this.catDict.has('Dests')) {
|
|
|
|
nameDictionaryRef = this.catDict.get('Dests');
|
|
|
|
}
|
|
|
|
|
2015-07-08 22:31:06 +09:00
|
|
|
if (nameDictionaryRef) { // Simple destination dictionary.
|
|
|
|
var value = nameDictionaryRef.get(destinationId);
|
|
|
|
if (value) {
|
|
|
|
dest = fetchDestination(value);
|
|
|
|
}
|
2014-10-05 22:56:40 +09:00
|
|
|
}
|
|
|
|
if (nameTreeRef) {
|
|
|
|
var nameTree = new NameTree(nameTreeRef, xref);
|
2014-10-06 00:34:49 +09:00
|
|
|
dest = fetchDestination(nameTree.get(destinationId));
|
2014-10-05 22:56:40 +09:00
|
|
|
}
|
|
|
|
return dest;
|
|
|
|
},
|
2015-12-26 05:57:08 +09:00
|
|
|
|
|
|
|
get pageLabels() {
|
|
|
|
var obj = null;
|
|
|
|
try {
|
|
|
|
obj = this.readPageLabels();
|
|
|
|
} catch (ex) {
|
|
|
|
if (ex instanceof MissingDataException) {
|
|
|
|
throw ex;
|
|
|
|
}
|
|
|
|
warn('Unable to read page labels.');
|
|
|
|
}
|
|
|
|
return shadow(this, 'pageLabels', obj);
|
|
|
|
},
|
|
|
|
readPageLabels: function Catalog_readPageLabels() {
|
|
|
|
var obj = this.catDict.getRaw('PageLabels');
|
|
|
|
if (!obj) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
var pageLabels = new Array(this.numPages);
|
|
|
|
var style = null;
|
|
|
|
var prefix = '';
|
|
|
|
|
|
|
|
var numberTree = new NumberTree(obj, this.xref);
|
|
|
|
var nums = numberTree.getAll();
|
|
|
|
var currentLabel = '', currentIndex = 1;
|
|
|
|
|
|
|
|
for (var i = 0, ii = this.numPages; i < ii; i++) {
|
2016-01-28 02:04:13 +09:00
|
|
|
if (i in nums) {
|
2015-12-26 05:57:08 +09:00
|
|
|
var labelDict = nums[i];
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isDict(labelDict)) {
|
|
|
|
throw new FormatError('The PageLabel is not a dictionary.');
|
|
|
|
}
|
2015-12-26 05:57:08 +09:00
|
|
|
|
|
|
|
var type = labelDict.get('Type');
|
2017-07-20 21:04:54 +09:00
|
|
|
if (type && !isName(type, 'PageLabel')) {
|
|
|
|
throw new FormatError('Invalid type in PageLabel dictionary.');
|
|
|
|
}
|
2015-12-26 05:57:08 +09:00
|
|
|
|
|
|
|
var s = labelDict.get('S');
|
2017-07-20 21:04:54 +09:00
|
|
|
if (s && !isName(s)) {
|
|
|
|
throw new FormatError('Invalid style in PageLabel dictionary.');
|
|
|
|
}
|
2016-11-04 04:08:06 +09:00
|
|
|
style = s ? s.name : null;
|
2015-12-26 05:57:08 +09:00
|
|
|
|
2016-11-04 04:08:06 +09:00
|
|
|
var p = labelDict.get('P');
|
2017-07-20 21:04:54 +09:00
|
|
|
if (p && !isString(p)) {
|
|
|
|
throw new FormatError('Invalid prefix in PageLabel dictionary.');
|
|
|
|
}
|
2016-11-04 04:08:06 +09:00
|
|
|
prefix = p ? stringToPDFString(p) : '';
|
2015-12-26 05:57:08 +09:00
|
|
|
|
2016-11-04 04:08:06 +09:00
|
|
|
var st = labelDict.get('St');
|
2017-09-01 23:52:50 +09:00
|
|
|
if (st && !(Number.isInteger(st) && st >= 1)) {
|
2017-07-20 21:04:54 +09:00
|
|
|
throw new FormatError('Invalid start in PageLabel dictionary.');
|
|
|
|
}
|
2016-11-04 04:08:06 +09:00
|
|
|
currentIndex = st || 1;
|
2015-12-26 05:57:08 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (style) {
|
|
|
|
case 'D':
|
|
|
|
currentLabel = currentIndex;
|
|
|
|
break;
|
|
|
|
case 'R':
|
|
|
|
case 'r':
|
|
|
|
currentLabel = Util.toRoman(currentIndex, style === 'r');
|
|
|
|
break;
|
|
|
|
case 'A':
|
|
|
|
case 'a':
|
|
|
|
var LIMIT = 26; // Use only the characters A--Z, or a--z.
|
|
|
|
var A_UPPER_CASE = 0x41, A_LOWER_CASE = 0x61;
|
|
|
|
|
|
|
|
var baseCharCode = (style === 'a' ? A_LOWER_CASE : A_UPPER_CASE);
|
|
|
|
var letterIndex = currentIndex - 1;
|
|
|
|
var character = String.fromCharCode(baseCharCode +
|
|
|
|
(letterIndex % LIMIT));
|
|
|
|
var charBuf = [];
|
|
|
|
for (var j = 0, jj = (letterIndex / LIMIT) | 0; j <= jj; j++) {
|
|
|
|
charBuf.push(character);
|
|
|
|
}
|
|
|
|
currentLabel = charBuf.join('');
|
|
|
|
break;
|
|
|
|
default:
|
2017-07-20 21:04:54 +09:00
|
|
|
if (style) {
|
|
|
|
throw new FormatError(
|
|
|
|
`Invalid style "${style}" in PageLabel dictionary.`);
|
|
|
|
}
|
2015-12-26 05:57:08 +09:00
|
|
|
}
|
|
|
|
pageLabels[i] = prefix + currentLabel;
|
|
|
|
|
|
|
|
currentLabel = '';
|
|
|
|
currentIndex++;
|
|
|
|
}
|
2016-01-27 07:01:38 +09:00
|
|
|
return pageLabels;
|
2015-12-26 05:57:08 +09:00
|
|
|
},
|
|
|
|
|
2017-07-18 20:08:02 +09:00
|
|
|
get pageMode() {
|
|
|
|
let obj = this.catDict.get('PageMode');
|
|
|
|
let pageMode = 'UseNone'; // Default value.
|
|
|
|
|
|
|
|
if (isName(obj)) {
|
|
|
|
switch (obj.name) {
|
|
|
|
case 'UseNone':
|
|
|
|
case 'UseOutlines':
|
|
|
|
case 'UseThumbs':
|
|
|
|
case 'FullScreen':
|
|
|
|
case 'UseOC':
|
|
|
|
case 'UseAttachments':
|
|
|
|
pageMode = obj.name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return shadow(this, 'pageMode', pageMode);
|
|
|
|
},
|
|
|
|
|
2014-03-19 05:32:47 +09:00
|
|
|
get attachments() {
|
|
|
|
var xref = this.xref;
|
2014-05-19 06:35:29 +09:00
|
|
|
var attachments = null, nameTreeRef;
|
2014-03-19 05:32:47 +09:00
|
|
|
var obj = this.catDict.get('Names');
|
|
|
|
if (obj) {
|
|
|
|
nameTreeRef = obj.getRaw('EmbeddedFiles');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nameTreeRef) {
|
|
|
|
var nameTree = new NameTree(nameTreeRef, xref);
|
|
|
|
var names = nameTree.getAll();
|
|
|
|
for (var name in names) {
|
|
|
|
var fs = new FileSpec(names[name], xref);
|
|
|
|
if (!attachments) {
|
2016-01-28 02:04:13 +09:00
|
|
|
attachments = Object.create(null);
|
2014-03-19 05:32:47 +09:00
|
|
|
}
|
|
|
|
attachments[stringToPDFString(name)] = fs.serializable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return shadow(this, 'attachments', attachments);
|
|
|
|
},
|
2013-03-01 08:29:07 +09:00
|
|
|
get javaScript() {
|
|
|
|
var xref = this.xref;
|
|
|
|
var obj = this.catDict.get('Names');
|
|
|
|
|
2017-10-16 05:13:58 +09:00
|
|
|
let javaScript = null;
|
2015-07-21 01:25:02 +09:00
|
|
|
function appendIfJavaScriptDict(jsDict) {
|
|
|
|
var type = jsDict.get('S');
|
2016-08-04 22:13:37 +09:00
|
|
|
if (!isName(type, 'JavaScript')) {
|
2015-07-21 01:25:02 +09:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
var js = jsDict.get('JS');
|
|
|
|
if (isStream(js)) {
|
|
|
|
js = bytesToString(js.getBytes());
|
|
|
|
} else if (!isString(js)) {
|
|
|
|
return;
|
|
|
|
}
|
2017-10-16 05:13:58 +09:00
|
|
|
if (!javaScript) {
|
|
|
|
javaScript = [];
|
|
|
|
}
|
2015-07-21 01:25:02 +09:00
|
|
|
javaScript.push(stringToPDFString(js));
|
|
|
|
}
|
2013-03-01 08:29:07 +09:00
|
|
|
if (obj && obj.has('JavaScript')) {
|
|
|
|
var nameTree = new NameTree(obj.getRaw('JavaScript'), xref);
|
|
|
|
var names = nameTree.getAll();
|
|
|
|
for (var name in names) {
|
2014-03-21 04:28:22 +09:00
|
|
|
// We don't really use the JavaScript right now. This code is
|
2013-03-01 08:29:07 +09:00
|
|
|
// defensive so we don't cause errors on document load.
|
|
|
|
var jsDict = names[name];
|
2015-07-21 01:25:02 +09:00
|
|
|
if (isDict(jsDict)) {
|
|
|
|
appendIfJavaScriptDict(jsDict);
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-24 09:07:25 +09:00
|
|
|
|
|
|
|
// Append OpenAction actions to javaScript array
|
|
|
|
var openactionDict = this.catDict.get('OpenAction');
|
2015-07-21 01:25:02 +09:00
|
|
|
if (isDict(openactionDict, 'Action')) {
|
2014-05-24 09:07:25 +09:00
|
|
|
var actionType = openactionDict.get('S');
|
2016-08-04 22:13:37 +09:00
|
|
|
if (isName(actionType, 'Named')) {
|
2015-07-21 01:25:02 +09:00
|
|
|
// The named Print action is not a part of the PDF 1.7 specification,
|
|
|
|
// but is supported by many PDF readers/writers (including Adobe's).
|
|
|
|
var action = openactionDict.get('N');
|
2016-08-04 22:13:37 +09:00
|
|
|
if (isName(action, 'Print')) {
|
2017-10-16 05:13:58 +09:00
|
|
|
if (!javaScript) {
|
|
|
|
javaScript = [];
|
|
|
|
}
|
2015-07-21 01:25:02 +09:00
|
|
|
javaScript.push('print({});');
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
appendIfJavaScriptDict(openactionDict);
|
2014-05-24 09:07:25 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-01 08:29:07 +09:00
|
|
|
return shadow(this, 'javaScript', javaScript);
|
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2013-11-15 06:43:38 +09:00
|
|
|
cleanup: function Catalog_cleanup() {
|
2017-03-29 00:54:41 +09:00
|
|
|
this.pageKidsCountCache.clear();
|
|
|
|
|
2014-05-10 10:21:15 +09:00
|
|
|
var promises = [];
|
|
|
|
this.fontCache.forEach(function (promise) {
|
|
|
|
promises.push(promise);
|
2013-11-15 06:43:38 +09:00
|
|
|
});
|
2017-05-02 18:14:53 +09:00
|
|
|
return Promise.all(promises).then((translatedFonts) => {
|
2014-05-21 11:57:04 +09:00
|
|
|
for (var i = 0, ii = translatedFonts.length; i < ii; i++) {
|
|
|
|
var font = translatedFonts[i].dict;
|
|
|
|
delete font.translated;
|
2014-05-10 10:21:15 +09:00
|
|
|
}
|
|
|
|
this.fontCache.clear();
|
2017-02-14 22:28:31 +09:00
|
|
|
this.builtInCMapCache = Object.create(null);
|
2017-05-02 18:14:53 +09:00
|
|
|
});
|
2013-11-15 06:43:38 +09:00
|
|
|
},
|
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
getPage: function Catalog_getPage(pageIndex) {
|
|
|
|
if (!(pageIndex in this.pagePromises)) {
|
2013-11-14 08:27:46 +09:00
|
|
|
this.pagePromises[pageIndex] = this.getPageDict(pageIndex).then(
|
2017-05-02 18:14:53 +09:00
|
|
|
([dict, ref]) => {
|
|
|
|
return this.pageFactory.createPage(pageIndex, dict, ref,
|
|
|
|
this.fontCache,
|
|
|
|
this.builtInCMapCache);
|
|
|
|
});
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
return this.pagePromises[pageIndex];
|
|
|
|
},
|
|
|
|
|
2013-11-14 08:27:46 +09:00
|
|
|
getPageDict: function Catalog_getPageDict(pageIndex) {
|
2014-05-01 22:27:31 +09:00
|
|
|
var capability = createPromiseCapability();
|
2013-11-14 08:27:46 +09:00
|
|
|
var nodesToVisit = [this.catDict.getRaw('Pages')];
|
2017-03-29 00:54:41 +09:00
|
|
|
var count, currentPageIndex = 0;
|
|
|
|
var xref = this.xref, pageKidsCountCache = this.pageKidsCountCache;
|
2013-11-14 08:27:46 +09:00
|
|
|
|
|
|
|
function next() {
|
|
|
|
while (nodesToVisit.length) {
|
|
|
|
var currentNode = nodesToVisit.pop();
|
|
|
|
|
|
|
|
if (isRef(currentNode)) {
|
2017-03-29 00:54:41 +09:00
|
|
|
count = pageKidsCountCache.get(currentNode);
|
|
|
|
// Skip nodes where the page can't be.
|
|
|
|
if (count > 0 && currentPageIndex + count < pageIndex) {
|
|
|
|
currentPageIndex += count;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-11-14 08:27:46 +09:00
|
|
|
xref.fetchAsync(currentNode).then(function (obj) {
|
2015-01-17 20:53:32 +09:00
|
|
|
if (isDict(obj, 'Page') || (isDict(obj) && !obj.has('Kids'))) {
|
2013-11-14 08:27:46 +09:00
|
|
|
if (pageIndex === currentPageIndex) {
|
2017-03-29 00:54:41 +09:00
|
|
|
// Cache the Page reference, since it can *greatly* improve
|
|
|
|
// performance by reducing redundant lookups in long documents
|
|
|
|
// where all nodes are found at *one* level of the tree.
|
|
|
|
if (currentNode && !pageKidsCountCache.has(currentNode)) {
|
|
|
|
pageKidsCountCache.put(currentNode, 1);
|
|
|
|
}
|
2014-05-01 22:27:31 +09:00
|
|
|
capability.resolve([obj, currentNode]);
|
2013-11-14 08:27:46 +09:00
|
|
|
} else {
|
|
|
|
currentPageIndex++;
|
|
|
|
next();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
nodesToVisit.push(obj);
|
|
|
|
next();
|
2014-08-06 11:22:12 +09:00
|
|
|
}, capability.reject);
|
2013-11-14 08:27:46 +09:00
|
|
|
return;
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
|
2015-01-17 20:53:32 +09:00
|
|
|
// Must be a child page dictionary.
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isDict(currentNode)) {
|
|
|
|
capability.reject(new FormatError(
|
|
|
|
'page dictionary kid reference points to wrong type of object'));
|
|
|
|
return;
|
|
|
|
}
|
2017-02-23 21:28:50 +09:00
|
|
|
|
2017-03-29 00:54:41 +09:00
|
|
|
count = currentNode.get('Count');
|
2018-03-12 22:00:37 +09:00
|
|
|
if (Number.isInteger(count) && count >= 0) {
|
|
|
|
// Cache the Kids count, since it can reduce redundant lookups in
|
|
|
|
// documents where all nodes are found at *one* level of the tree.
|
|
|
|
var objId = currentNode.objId;
|
|
|
|
if (objId && !pageKidsCountCache.has(objId)) {
|
|
|
|
pageKidsCountCache.put(objId, count);
|
|
|
|
}
|
|
|
|
// Skip nodes where the page can't be.
|
|
|
|
if (currentPageIndex + count <= pageIndex) {
|
|
|
|
currentPageIndex += count;
|
|
|
|
continue;
|
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2013-11-14 08:27:46 +09:00
|
|
|
var kids = currentNode.get('Kids');
|
2017-09-02 03:27:13 +09:00
|
|
|
if (!Array.isArray(kids)) {
|
2018-03-12 22:00:37 +09:00
|
|
|
// Prevent errors in corrupt PDF documents that violate the
|
|
|
|
// specification by *inlining* Page dicts directly in the Kids
|
|
|
|
// array, rather than using indirect objects (fixes issue9540.pdf).
|
|
|
|
if (isName(currentNode.get('Type'), 'Page') ||
|
|
|
|
(!currentNode.has('Type') && currentNode.has('Contents'))) {
|
|
|
|
if (currentPageIndex === pageIndex) {
|
|
|
|
capability.resolve([currentNode, null]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
currentPageIndex++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-07-20 21:04:54 +09:00
|
|
|
capability.reject(new FormatError(
|
|
|
|
'page dictionary kids object is not an array'));
|
|
|
|
return;
|
|
|
|
}
|
2017-02-23 21:28:50 +09:00
|
|
|
|
|
|
|
// Always check all `Kids` nodes, to avoid getting stuck in an empty
|
|
|
|
// node further down in the tree (see issue5644.pdf, issue8088.pdf),
|
|
|
|
// and to ensure that we actually find the correct `Page` dict.
|
|
|
|
for (var last = kids.length - 1; last >= 0; last--) {
|
|
|
|
nodesToVisit.push(kids[last]);
|
2013-11-14 08:27:46 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
2017-07-20 21:04:54 +09:00
|
|
|
capability.reject(new Error('Page index ' + pageIndex + ' not found.'));
|
2013-11-14 08:27:46 +09:00
|
|
|
}
|
|
|
|
next();
|
2014-05-01 22:27:31 +09:00
|
|
|
return capability.promise;
|
2013-11-14 08:27:46 +09:00
|
|
|
},
|
|
|
|
|
2016-05-16 23:28:25 +09:00
|
|
|
getPageIndex: function Catalog_getPageIndex(pageRef) {
|
2013-11-14 08:27:46 +09:00
|
|
|
// The page tree nodes have the count of all the leaves below them. To get
|
|
|
|
// how many pages are before we just have to walk up the tree and keep
|
|
|
|
// adding the count of siblings to the left of the node.
|
|
|
|
var xref = this.xref;
|
|
|
|
function pagesBeforeRef(kidRef) {
|
|
|
|
var total = 0;
|
|
|
|
var parentRef;
|
|
|
|
return xref.fetchAsync(kidRef).then(function (node) {
|
2016-05-16 23:28:25 +09:00
|
|
|
if (isRefsEqual(kidRef, pageRef) && !isDict(node, 'Page') &&
|
|
|
|
!(isDict(node) && !node.has('Type') && node.has('Contents'))) {
|
2017-07-20 21:04:54 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'The reference does not point to a /Page Dict.');
|
2016-05-16 23:28:25 +09:00
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
if (!node) {
|
|
|
|
return null;
|
|
|
|
}
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isDict(node)) {
|
|
|
|
throw new FormatError('node must be a Dict.');
|
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
parentRef = node.getRaw('Parent');
|
|
|
|
return node.getAsync('Parent');
|
|
|
|
}).then(function (parent) {
|
|
|
|
if (!parent) {
|
|
|
|
return null;
|
|
|
|
}
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isDict(parent)) {
|
|
|
|
throw new FormatError('parent must be a Dict.');
|
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
return parent.getAsync('Kids');
|
|
|
|
}).then(function (kids) {
|
|
|
|
if (!kids) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
var kidPromises = [];
|
|
|
|
var found = false;
|
|
|
|
for (var i = 0; i < kids.length; i++) {
|
|
|
|
var kid = kids[i];
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isRef(kid)) {
|
|
|
|
throw new FormatError('kid must be a Ref.');
|
|
|
|
}
|
2018-03-12 22:00:37 +09:00
|
|
|
if (isRefsEqual(kid, kidRef)) {
|
2013-11-14 08:27:46 +09:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kidPromises.push(xref.fetchAsync(kid).then(function (kid) {
|
2018-03-12 22:00:37 +09:00
|
|
|
if (!isDict(kid)) {
|
|
|
|
throw new FormatError('kid node must be a Dict.');
|
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
if (kid.has('Count')) {
|
|
|
|
var count = kid.get('Count');
|
|
|
|
total += count;
|
|
|
|
} else { // page leaf node
|
|
|
|
total++;
|
|
|
|
}
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
if (!found) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('kid ref not found in parents kids');
|
2013-11-14 08:27:46 +09:00
|
|
|
}
|
|
|
|
return Promise.all(kidPromises).then(function () {
|
|
|
|
return [total, parentRef];
|
|
|
|
});
|
|
|
|
});
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
|
|
|
|
var total = 0;
|
|
|
|
function next(ref) {
|
|
|
|
return pagesBeforeRef(ref).then(function (args) {
|
|
|
|
if (!args) {
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
var count = args[0];
|
|
|
|
var parentRef = args[1];
|
|
|
|
total += count;
|
|
|
|
return next(parentRef);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2016-05-16 23:28:25 +09:00
|
|
|
return next(pageRef);
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
};
|
|
|
|
|
2016-09-30 23:08:03 +09:00
|
|
|
/**
|
2016-11-27 19:18:18 +09:00
|
|
|
* @typedef ParseDestDictionaryParameters
|
|
|
|
* @property {Dict} destDict - The dictionary containing the destination.
|
|
|
|
* @property {Object} resultObj - The object where the parsed destination
|
2016-09-30 23:08:03 +09:00
|
|
|
* properties will be placed.
|
2016-11-27 19:18:18 +09:00
|
|
|
* @property {string} docBaseUrl - (optional) The document base URL that is
|
|
|
|
* used when attempting to recover valid absolute URLs from relative ones.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Helper function used to parse the contents of destination dictionaries.
|
|
|
|
* @param {ParseDestDictionaryParameters} params
|
2016-09-30 23:08:03 +09:00
|
|
|
*/
|
|
|
|
Catalog.parseDestDictionary = function Catalog_parseDestDictionary(params) {
|
|
|
|
// Lets URLs beginning with 'www.' default to using the 'http://' protocol.
|
|
|
|
function addDefaultProtocolToUrl(url) {
|
2016-09-30 23:32:22 +09:00
|
|
|
if (url.indexOf('www.') === 0) {
|
2016-09-30 23:08:03 +09:00
|
|
|
return ('http://' + url);
|
|
|
|
}
|
|
|
|
return url;
|
|
|
|
}
|
|
|
|
// According to ISO 32000-1:2008, section 12.6.4.7, URIs should be encoded
|
|
|
|
// in 7-bit ASCII. Some bad PDFs use UTF-8 encoding, see Bugzilla 1122280.
|
|
|
|
function tryConvertUrlEncoding(url) {
|
|
|
|
try {
|
|
|
|
return stringToUTF8String(url);
|
|
|
|
} catch (e) {
|
|
|
|
return url;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var destDict = params.destDict;
|
2016-09-30 23:32:22 +09:00
|
|
|
if (!isDict(destDict)) {
|
2017-08-25 20:40:50 +09:00
|
|
|
warn('parseDestDictionary: "destDict" must be a dictionary.');
|
2016-09-30 23:32:22 +09:00
|
|
|
return;
|
|
|
|
}
|
2016-09-30 23:08:03 +09:00
|
|
|
var resultObj = params.resultObj;
|
2016-09-30 23:32:22 +09:00
|
|
|
if (typeof resultObj !== 'object') {
|
2017-08-25 20:40:50 +09:00
|
|
|
warn('parseDestDictionary: "resultObj" must be an object.');
|
2016-09-30 23:32:22 +09:00
|
|
|
return;
|
|
|
|
}
|
2016-10-01 19:05:07 +09:00
|
|
|
var docBaseUrl = params.docBaseUrl || null;
|
2016-09-30 23:08:03 +09:00
|
|
|
|
|
|
|
var action = destDict.get('A'), url, dest;
|
2017-08-25 20:40:50 +09:00
|
|
|
if (!isDict(action) && destDict.has('Dest')) {
|
|
|
|
// A /Dest entry should *only* contain a Name or an Array, but some bad
|
|
|
|
// PDF generators ignore that and treat it as an /A entry.
|
|
|
|
action = destDict.get('Dest');
|
|
|
|
}
|
|
|
|
|
2016-09-30 23:32:22 +09:00
|
|
|
if (isDict(action)) {
|
2017-08-25 20:40:50 +09:00
|
|
|
let actionType = action.get('S');
|
|
|
|
if (!isName(actionType)) {
|
|
|
|
warn('parseDestDictionary: Invalid type in Action dictionary.');
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
let actionName = actionType.name;
|
|
|
|
|
|
|
|
switch (actionName) {
|
2016-09-30 23:08:03 +09:00
|
|
|
case 'URI':
|
|
|
|
url = action.get('URI');
|
|
|
|
if (isName(url)) {
|
|
|
|
// Some bad PDFs do not put parentheses around relative URLs.
|
|
|
|
url = '/' + url.name;
|
2016-09-30 23:32:22 +09:00
|
|
|
} else if (isString(url)) {
|
2016-09-30 23:08:03 +09:00
|
|
|
url = addDefaultProtocolToUrl(url);
|
|
|
|
}
|
|
|
|
// TODO: pdf spec mentions urls can be relative to a Base
|
|
|
|
// entry in the dictionary.
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'GoTo':
|
|
|
|
dest = action.get('D');
|
|
|
|
break;
|
|
|
|
|
2016-10-21 20:29:15 +09:00
|
|
|
case 'Launch':
|
|
|
|
// We neither want, nor can, support arbitrary 'Launch' actions.
|
|
|
|
// However, in practice they are mostly used for linking to other PDF
|
|
|
|
// files, which we thus attempt to support (utilizing `docBaseUrl`).
|
|
|
|
/* falls through */
|
|
|
|
|
2016-09-30 23:08:03 +09:00
|
|
|
case 'GoToR':
|
|
|
|
var urlDict = action.get('F');
|
|
|
|
if (isDict(urlDict)) {
|
|
|
|
// We assume that we found a FileSpec dictionary
|
|
|
|
// and fetch the URL without checking any further.
|
|
|
|
url = urlDict.get('F') || null;
|
|
|
|
} else if (isString(urlDict)) {
|
|
|
|
url = urlDict;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: the destination is relative to the *remote* document.
|
|
|
|
var remoteDest = action.get('D');
|
|
|
|
if (remoteDest) {
|
|
|
|
if (isName(remoteDest)) {
|
|
|
|
remoteDest = remoteDest.name;
|
|
|
|
}
|
|
|
|
if (isString(url)) {
|
2017-05-17 03:35:44 +09:00
|
|
|
let baseUrl = url.split('#')[0];
|
2016-09-30 23:08:03 +09:00
|
|
|
if (isString(remoteDest)) {
|
2017-05-17 03:35:44 +09:00
|
|
|
url = baseUrl + '#' + remoteDest;
|
2017-09-02 03:27:13 +09:00
|
|
|
} else if (Array.isArray(remoteDest)) {
|
2016-09-30 23:08:03 +09:00
|
|
|
url = baseUrl + '#' + JSON.stringify(remoteDest);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// The 'NewWindow' property, equal to `LinkTarget.BLANK`.
|
|
|
|
var newWindow = action.get('NewWindow');
|
|
|
|
if (isBool(newWindow)) {
|
|
|
|
resultObj.newWindow = newWindow;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'Named':
|
2016-09-30 23:32:22 +09:00
|
|
|
var namedAction = action.get('N');
|
|
|
|
if (isName(namedAction)) {
|
|
|
|
resultObj.action = namedAction.name;
|
|
|
|
}
|
2016-09-30 23:08:03 +09:00
|
|
|
break;
|
|
|
|
|
2016-11-09 00:38:22 +09:00
|
|
|
case 'JavaScript':
|
|
|
|
var jsAction = action.get('JS'), js;
|
|
|
|
if (isStream(jsAction)) {
|
|
|
|
js = bytesToString(jsAction.getBytes());
|
|
|
|
} else if (isString(jsAction)) {
|
|
|
|
js = jsAction;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (js) {
|
|
|
|
// Attempt to recover valid URLs from 'JS' entries with certain
|
|
|
|
// white-listed formats, e.g.
|
|
|
|
// - window.open('http://example.com')
|
|
|
|
// - app.launchURL('http://example.com', true)
|
|
|
|
var URL_OPEN_METHODS = [
|
|
|
|
'app.launchURL',
|
|
|
|
'window.open'
|
|
|
|
];
|
2017-02-27 20:08:41 +09:00
|
|
|
var regex = new RegExp(
|
|
|
|
'^\\s*(' + URL_OPEN_METHODS.join('|').split('.').join('\\.') +
|
|
|
|
')\\((?:\'|\")([^\'\"]*)(?:\'|\")(?:,\\s*(\\w+)\\)|\\))', 'i');
|
2016-11-09 00:38:22 +09:00
|
|
|
|
2017-02-27 20:08:41 +09:00
|
|
|
var jsUrl = regex.exec(stringToPDFString(js));
|
|
|
|
if (jsUrl && jsUrl[2]) {
|
|
|
|
url = jsUrl[2];
|
|
|
|
|
|
|
|
if (jsUrl[3] === 'true' && jsUrl[1] === 'app.launchURL') {
|
|
|
|
resultObj.newWindow = true;
|
|
|
|
}
|
2016-11-09 00:38:22 +09:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* falls through */
|
2016-09-30 23:08:03 +09:00
|
|
|
default:
|
2017-08-25 20:40:50 +09:00
|
|
|
warn(`parseDestDictionary: Unsupported Action type "${actionName}".`);
|
2016-09-30 23:32:22 +09:00
|
|
|
break;
|
2016-09-30 23:08:03 +09:00
|
|
|
}
|
2017-08-25 20:40:50 +09:00
|
|
|
} else if (destDict.has('Dest')) { // Simple destination.
|
2016-09-30 23:08:03 +09:00
|
|
|
dest = destDict.get('Dest');
|
|
|
|
}
|
|
|
|
|
2016-09-30 23:32:22 +09:00
|
|
|
if (isString(url)) {
|
|
|
|
url = tryConvertUrlEncoding(url);
|
2016-10-01 19:05:07 +09:00
|
|
|
var absoluteUrl = createValidAbsoluteUrl(url, docBaseUrl);
|
2016-10-03 21:35:29 +09:00
|
|
|
if (absoluteUrl) {
|
|
|
|
resultObj.url = absoluteUrl.href;
|
2016-09-30 23:08:03 +09:00
|
|
|
}
|
2016-09-30 23:32:22 +09:00
|
|
|
resultObj.unsafeUrl = url;
|
2016-09-30 23:08:03 +09:00
|
|
|
}
|
|
|
|
if (dest) {
|
2016-09-30 23:32:22 +09:00
|
|
|
if (isName(dest)) {
|
|
|
|
dest = dest.name;
|
|
|
|
}
|
2017-09-02 03:27:13 +09:00
|
|
|
if (isString(dest) || Array.isArray(dest)) {
|
2016-09-30 23:32:22 +09:00
|
|
|
resultObj.dest = dest;
|
|
|
|
}
|
2016-09-30 23:08:03 +09:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
return Catalog;
|
2011-10-25 08:55:23 +09:00
|
|
|
})();
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
var XRef = (function XRefClosure() {
|
2017-01-03 20:39:38 +09:00
|
|
|
function XRef(stream, pdfManager) {
|
2011-10-25 08:55:23 +09:00
|
|
|
this.stream = stream;
|
2017-01-03 20:39:38 +09:00
|
|
|
this.pdfManager = pdfManager;
|
2011-10-25 08:55:23 +09:00
|
|
|
this.entries = [];
|
2016-01-28 02:04:13 +09:00
|
|
|
this.xrefstms = Object.create(null);
|
2011-10-25 08:55:23 +09:00
|
|
|
// prepare the XRef cache
|
|
|
|
this.cache = [];
|
2014-06-16 23:52:04 +09:00
|
|
|
this.stats = {
|
|
|
|
streamTypes: [],
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
fontTypes: [],
|
2014-06-16 23:52:04 +09:00
|
|
|
};
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
XRef.prototype = {
|
2013-02-07 08:19:29 +09:00
|
|
|
setStartXRef: function XRef_setStartXRef(startXRef) {
|
|
|
|
// Store the starting positions of xref tables as we process them
|
|
|
|
// so we can recover from missing data errors
|
|
|
|
this.startXRefQueue = [startXRef];
|
|
|
|
},
|
|
|
|
|
|
|
|
parse: function XRef_parse(recoveryMode) {
|
|
|
|
var trailerDict;
|
|
|
|
if (!recoveryMode) {
|
|
|
|
trailerDict = this.readXRef();
|
|
|
|
} else {
|
|
|
|
warn('Indexing all PDF objects');
|
|
|
|
trailerDict = this.indexObjects();
|
|
|
|
}
|
|
|
|
trailerDict.assignXref(this);
|
|
|
|
this.trailer = trailerDict;
|
|
|
|
var encrypt = trailerDict.get('Encrypt');
|
2017-01-03 20:39:38 +09:00
|
|
|
if (isDict(encrypt)) {
|
2013-02-07 08:19:29 +09:00
|
|
|
var ids = trailerDict.get('ID');
|
|
|
|
var fileId = (ids && ids.length) ? ids[0] : '';
|
2016-09-23 21:10:27 +09:00
|
|
|
// The 'Encrypt' dictionary itself should not be encrypted, and by
|
|
|
|
// setting `suppressEncryption` we can prevent an infinite loop inside
|
|
|
|
// of `XRef_fetchUncompressed` if the dictionary contains indirect
|
|
|
|
// objects (fixes issue7665.pdf).
|
|
|
|
encrypt.suppressEncryption = true;
|
2014-03-21 04:28:22 +09:00
|
|
|
this.encrypt = new CipherTransformFactory(encrypt, fileId,
|
2017-01-03 20:39:38 +09:00
|
|
|
this.pdfManager.password);
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
// get the root dictionary (catalog) object
|
|
|
|
if (!(this.root = trailerDict.get('Root'))) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('Invalid root reference');
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
processXRefTable: function XRef_processXRefTable(parser) {
|
|
|
|
if (!('tableState' in this)) {
|
|
|
|
// Stores state of the table as we process it so we can resume
|
|
|
|
// from middle of table in case of missing data error
|
|
|
|
this.tableState = {
|
|
|
|
entryNum: 0,
|
|
|
|
streamPos: parser.lexer.stream.pos,
|
|
|
|
parserBuf1: parser.buf1,
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
parserBuf2: parser.buf2,
|
2013-02-07 08:19:29 +09:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
var obj = this.readXRefTable(parser);
|
|
|
|
|
|
|
|
// Sanity check
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isCmd(obj, 'trailer')) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'Invalid XRef table: could not find trailer dictionary');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
// Read trailer dictionary, e.g.
|
|
|
|
// trailer
|
|
|
|
// << /Size 22
|
|
|
|
// /Root 20R
|
|
|
|
// /Info 10R
|
|
|
|
// /ID [ <81b14aafa313db63dbd6f981e49f94f4> ]
|
|
|
|
// >>
|
|
|
|
// The parser goes through the entire stream << ... >> and provides
|
|
|
|
// a getter interface for the key-value table
|
|
|
|
var dict = parser.getObj();
|
2014-05-03 03:45:34 +09:00
|
|
|
|
|
|
|
// The pdflib PDF generator can generate a nested trailer dictionary
|
|
|
|
if (!isDict(dict) && dict.dict) {
|
|
|
|
dict = dict.dict;
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isDict(dict)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'Invalid XRef table: could not parse trailer dictionary');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
delete this.tableState;
|
|
|
|
|
|
|
|
return dict;
|
|
|
|
},
|
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
readXRefTable: function XRef_readXRefTable(parser) {
|
2012-01-31 23:01:04 +09:00
|
|
|
// Example of cross-reference table:
|
|
|
|
// xref
|
|
|
|
// 0 1 <-- subsection header (first obj #, obj count)
|
|
|
|
// 0000000000 65535 f <-- actual object (offset, generation #, f/n)
|
|
|
|
// 23 2 <-- subsection header ... and so on ...
|
2012-02-01 00:57:32 +09:00
|
|
|
// 0000025518 00002 n
|
2012-01-31 23:01:04 +09:00
|
|
|
// 0000025635 00000 n
|
|
|
|
// trailer
|
|
|
|
// ...
|
2012-02-01 00:57:32 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
var stream = parser.lexer.stream;
|
|
|
|
var tableState = this.tableState;
|
|
|
|
stream.pos = tableState.streamPos;
|
|
|
|
parser.buf1 = tableState.parserBuf1;
|
|
|
|
parser.buf2 = tableState.parserBuf2;
|
|
|
|
|
2012-01-31 23:01:04 +09:00
|
|
|
// Outer loop is over subsection headers
|
2011-10-25 08:55:23 +09:00
|
|
|
var obj;
|
2012-01-31 23:01:04 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
while (true) {
|
|
|
|
if (!('firstEntryNum' in tableState) || !('entryCount' in tableState)) {
|
|
|
|
if (isCmd(obj = parser.getObj(), 'trailer')) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tableState.firstEntryNum = obj;
|
|
|
|
tableState.entryCount = parser.getObj();
|
|
|
|
}
|
|
|
|
|
|
|
|
var first = tableState.firstEntryNum;
|
|
|
|
var count = tableState.entryCount;
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(first) || !Number.isInteger(count)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'Invalid XRef table: wrong types in subsection header');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2012-01-31 23:01:04 +09:00
|
|
|
// Inner loop is over objects themselves
|
2013-02-07 08:19:29 +09:00
|
|
|
for (var i = tableState.entryNum; i < count; i++) {
|
|
|
|
tableState.streamPos = stream.pos;
|
|
|
|
tableState.entryNum = i;
|
|
|
|
tableState.parserBuf1 = parser.buf1;
|
|
|
|
tableState.parserBuf2 = parser.buf2;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var entry = {};
|
2012-01-31 23:01:04 +09:00
|
|
|
entry.offset = parser.getObj();
|
|
|
|
entry.gen = parser.getObj();
|
|
|
|
var type = parser.getObj();
|
|
|
|
|
2014-03-21 04:28:22 +09:00
|
|
|
if (isCmd(type, 'f')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
entry.free = true;
|
2014-03-21 04:28:22 +09:00
|
|
|
} else if (isCmd(type, 'n')) {
|
2012-01-31 23:01:04 +09:00
|
|
|
entry.uncompressed = true;
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2012-01-31 23:01:04 +09:00
|
|
|
// Validate entry obj
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(entry.offset) || !Number.isInteger(entry.gen) ||
|
2012-02-01 00:57:32 +09:00
|
|
|
!(entry.free || entry.uncompressed)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
`Invalid entry in XRef subsection: ${first}, ${count}`);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2016-04-21 22:10:40 +09:00
|
|
|
// The first xref table entry, i.e. obj 0, should be free. Attempting
|
|
|
|
// to adjust an incorrect first obj # (fixes issue 3248 and 7229).
|
|
|
|
if (i === 0 && entry.free && first === 1) {
|
|
|
|
first = 0;
|
|
|
|
}
|
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!this.entries[i + first]) {
|
2012-01-31 23:57:12 +09:00
|
|
|
this.entries[i + first] = entry;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
tableState.entryNum = 0;
|
|
|
|
tableState.streamPos = stream.pos;
|
|
|
|
tableState.parserBuf1 = parser.buf1;
|
|
|
|
tableState.parserBuf2 = parser.buf2;
|
|
|
|
delete tableState.firstEntryNum;
|
|
|
|
delete tableState.entryCount;
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2012-04-24 12:14:58 +09:00
|
|
|
// Sanity check: as per spec, first object must be free
|
2014-02-27 21:46:12 +09:00
|
|
|
if (this.entries[0] && !this.entries[0].free) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'Invalid XRef table: unexpected first object');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
return obj;
|
|
|
|
},
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
processXRefStream: function XRef_processXRefStream(stream) {
|
|
|
|
if (!('streamState' in this)) {
|
|
|
|
// Stores state of the stream as we process it so we can resume
|
|
|
|
// from middle of stream in case of missing data error
|
2013-05-10 12:26:28 +09:00
|
|
|
var streamParameters = stream.dict;
|
2013-02-07 08:19:29 +09:00
|
|
|
var byteWidths = streamParameters.get('W');
|
|
|
|
var range = streamParameters.get('Index');
|
|
|
|
if (!range) {
|
|
|
|
range = [0, streamParameters.get('Size')];
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
this.streamState = {
|
|
|
|
entryRanges: range,
|
2017-04-27 19:58:44 +09:00
|
|
|
byteWidths,
|
2013-02-07 08:19:29 +09:00
|
|
|
entryNum: 0,
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
streamPos: stream.pos,
|
2013-02-07 08:19:29 +09:00
|
|
|
};
|
|
|
|
}
|
|
|
|
this.readXRefStream(stream);
|
|
|
|
delete this.streamState;
|
|
|
|
|
2013-05-10 12:26:28 +09:00
|
|
|
return stream.dict;
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
readXRefStream: function XRef_readXRefStream(stream) {
|
2011-10-25 08:55:23 +09:00
|
|
|
var i, j;
|
2013-02-07 08:19:29 +09:00
|
|
|
var streamState = this.streamState;
|
|
|
|
stream.pos = streamState.streamPos;
|
|
|
|
|
|
|
|
var byteWidths = streamState.byteWidths;
|
|
|
|
var typeFieldWidth = byteWidths[0];
|
|
|
|
var offsetFieldWidth = byteWidths[1];
|
|
|
|
var generationFieldWidth = byteWidths[2];
|
|
|
|
|
|
|
|
var entryRanges = streamState.entryRanges;
|
|
|
|
while (entryRanges.length > 0) {
|
|
|
|
var first = entryRanges[0];
|
|
|
|
var n = entryRanges[1];
|
|
|
|
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(first) || !Number.isInteger(n)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
`Invalid XRef range fields: ${first}, ${n}`);
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(typeFieldWidth) ||
|
|
|
|
!Number.isInteger(offsetFieldWidth) ||
|
|
|
|
!Number.isInteger(generationFieldWidth)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
`Invalid XRef entry fields length: ${first}, ${n}`);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
for (i = streamState.entryNum; i < n; ++i) {
|
|
|
|
streamState.entryNum = i;
|
|
|
|
streamState.streamPos = stream.pos;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var type = 0, offset = 0, generation = 0;
|
2014-03-21 04:28:22 +09:00
|
|
|
for (j = 0; j < typeFieldWidth; ++j) {
|
2011-10-25 08:55:23 +09:00
|
|
|
type = (type << 8) | stream.getByte();
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
|
|
|
// if type field is absent, its default value is 1
|
2014-02-27 21:46:12 +09:00
|
|
|
if (typeFieldWidth === 0) {
|
2011-10-25 08:55:23 +09:00
|
|
|
type = 1;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
|
|
|
for (j = 0; j < offsetFieldWidth; ++j) {
|
2011-10-25 08:55:23 +09:00
|
|
|
offset = (offset << 8) | stream.getByte();
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
|
|
|
for (j = 0; j < generationFieldWidth; ++j) {
|
2011-10-25 08:55:23 +09:00
|
|
|
generation = (generation << 8) | stream.getByte();
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
var entry = {};
|
|
|
|
entry.offset = offset;
|
|
|
|
entry.gen = generation;
|
|
|
|
switch (type) {
|
|
|
|
case 0:
|
|
|
|
entry.free = true;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
entry.uncompressed = true;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
break;
|
|
|
|
default:
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(`Invalid XRef entry type: ${type}`);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!this.entries[first + i]) {
|
2011-10-25 08:55:23 +09:00
|
|
|
this.entries[first + i] = entry;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
streamState.entryNum = 0;
|
|
|
|
streamState.streamPos = stream.pos;
|
|
|
|
entryRanges.splice(0, 2);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
indexObjects: function XRef_indexObjects() {
|
2011-10-25 08:55:23 +09:00
|
|
|
// Simple scan through the PDF content to find objects,
|
|
|
|
// trailers and XRef streams.
|
2015-08-21 23:57:08 +09:00
|
|
|
var TAB = 0x9, LF = 0xA, CR = 0xD, SPACE = 0x20;
|
|
|
|
var PERCENT = 0x25, LT = 0x3C;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
function readToken(data, offset) {
|
|
|
|
var token = '', ch = data[offset];
|
2015-08-21 23:57:08 +09:00
|
|
|
while (ch !== LF && ch !== CR && ch !== LT) {
|
2014-03-21 04:28:22 +09:00
|
|
|
if (++offset >= data.length) {
|
2011-10-25 08:55:23 +09:00
|
|
|
break;
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
token += String.fromCharCode(ch);
|
|
|
|
ch = data[offset];
|
|
|
|
}
|
|
|
|
return token;
|
|
|
|
}
|
|
|
|
function skipUntil(data, offset, what) {
|
|
|
|
var length = what.length, dataLength = data.length;
|
|
|
|
var skipped = 0;
|
|
|
|
// finding byte sequence
|
|
|
|
while (offset < dataLength) {
|
|
|
|
var i = 0;
|
2014-08-02 04:45:39 +09:00
|
|
|
while (i < length && data[offset + i] === what[i]) {
|
2011-10-25 08:55:23 +09:00
|
|
|
++i;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2014-03-21 04:28:22 +09:00
|
|
|
if (i >= length) {
|
2011-10-25 08:55:23 +09:00
|
|
|
break; // sequence found
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
offset++;
|
|
|
|
skipped++;
|
|
|
|
}
|
|
|
|
return skipped;
|
|
|
|
}
|
2015-10-02 19:46:58 +09:00
|
|
|
var objRegExp = /^(\d+)\s+(\d+)\s+obj\b/;
|
2017-12-09 00:37:12 +09:00
|
|
|
const endobjRegExp = /\bendobj[\b\s]$/;
|
|
|
|
const nestedObjRegExp = /\s+(\d+\s+\d+\s+obj[\b\s])$/;
|
|
|
|
const CHECK_CONTENT_LENGTH = 25;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var trailerBytes = new Uint8Array([116, 114, 97, 105, 108, 101, 114]);
|
|
|
|
var startxrefBytes = new Uint8Array([115, 116, 97, 114, 116, 120, 114,
|
|
|
|
101, 102]);
|
2017-12-09 00:37:12 +09:00
|
|
|
const objBytes = new Uint8Array([111, 98, 106]);
|
2011-10-25 08:55:23 +09:00
|
|
|
var xrefBytes = new Uint8Array([47, 88, 82, 101, 102]);
|
|
|
|
|
2015-08-21 23:57:08 +09:00
|
|
|
// Clear out any existing entries, since they may be bogus.
|
|
|
|
this.entries.length = 0;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var stream = this.stream;
|
|
|
|
stream.pos = 0;
|
|
|
|
var buffer = stream.getBytes();
|
|
|
|
var position = stream.start, length = buffer.length;
|
|
|
|
var trailers = [], xrefStms = [];
|
|
|
|
while (position < length) {
|
|
|
|
var ch = buffer[position];
|
2015-08-21 23:57:08 +09:00
|
|
|
if (ch === TAB || ch === LF || ch === CR || ch === SPACE) {
|
2011-10-25 08:55:23 +09:00
|
|
|
++position;
|
|
|
|
continue;
|
|
|
|
}
|
2015-08-21 23:57:08 +09:00
|
|
|
if (ch === PERCENT) { // %-comment
|
2011-10-25 08:55:23 +09:00
|
|
|
do {
|
|
|
|
++position;
|
2013-08-24 02:57:11 +09:00
|
|
|
if (position >= length) {
|
|
|
|
break;
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
ch = buffer[position];
|
2015-08-21 23:57:08 +09:00
|
|
|
} while (ch !== LF && ch !== CR);
|
2011-10-25 08:55:23 +09:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
var token = readToken(buffer, position);
|
|
|
|
var m;
|
2015-08-21 23:57:08 +09:00
|
|
|
if (token.indexOf('xref') === 0 &&
|
|
|
|
(token.length === 4 || /\s/.test(token[4]))) {
|
2011-10-25 08:55:23 +09:00
|
|
|
position += skipUntil(buffer, position, trailerBytes);
|
|
|
|
trailers.push(position);
|
|
|
|
position += skipUntil(buffer, position, startxrefBytes);
|
2015-10-02 19:46:58 +09:00
|
|
|
} else if ((m = objRegExp.exec(token))) {
|
2015-04-04 15:15:31 +09:00
|
|
|
if (typeof this.entries[m[1]] === 'undefined') {
|
|
|
|
this.entries[m[1]] = {
|
2015-07-11 03:18:53 +09:00
|
|
|
offset: position - stream.start,
|
2015-04-04 15:15:31 +09:00
|
|
|
gen: m[2] | 0,
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
uncompressed: true,
|
2015-04-04 15:15:31 +09:00
|
|
|
};
|
|
|
|
}
|
2017-12-09 00:37:12 +09:00
|
|
|
let contentLength, startPos = position + token.length;
|
|
|
|
|
|
|
|
// Find the next "obj" string, rather than "endobj", to ensure that
|
|
|
|
// we won't skip over a new 'obj' operator in corrupt files where
|
|
|
|
// 'endobj' operators are missing (fixes issue9105_reduced.pdf).
|
|
|
|
while (startPos < buffer.length) {
|
|
|
|
let endPos = startPos + skipUntil(buffer, startPos, objBytes) + 4;
|
|
|
|
contentLength = endPos - position;
|
|
|
|
|
|
|
|
let checkPos = Math.max(endPos - CHECK_CONTENT_LENGTH, startPos);
|
|
|
|
let tokenStr = bytesToString(buffer.subarray(checkPos, endPos));
|
|
|
|
|
|
|
|
// Check if the current object ends with an 'endobj' operator.
|
|
|
|
if (endobjRegExp.test(tokenStr)) {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// Check if an "obj" occurance is actually a new object,
|
|
|
|
// i.e. the current object is missing the 'endobj' operator.
|
|
|
|
let objToken = nestedObjRegExp.exec(tokenStr);
|
|
|
|
|
|
|
|
if (objToken && objToken[1]) {
|
|
|
|
warn('indexObjects: Found new "obj" inside of another "obj", ' +
|
|
|
|
'caused by missing "endobj" -- trying to recover.');
|
|
|
|
contentLength -= objToken[1].length;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
startPos += contentLength;
|
|
|
|
}
|
|
|
|
let content = buffer.subarray(position, position + contentLength);
|
2011-10-25 08:55:23 +09:00
|
|
|
|
|
|
|
// checking XRef stream suspect
|
|
|
|
// (it shall have '/XRef' and next char is not a letter)
|
|
|
|
var xrefTagOffset = skipUntil(content, 0, xrefBytes);
|
|
|
|
if (xrefTagOffset < contentLength &&
|
|
|
|
content[xrefTagOffset + 5] < 64) {
|
2015-07-11 03:18:53 +09:00
|
|
|
xrefStms.push(position - stream.start);
|
|
|
|
this.xrefstms[position - stream.start] = 1; // Avoid recursion
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
position += contentLength;
|
2015-10-01 21:46:03 +09:00
|
|
|
} else if (token.indexOf('trailer') === 0 &&
|
|
|
|
(token.length === 7 || /\s/.test(token[7]))) {
|
|
|
|
trailers.push(position);
|
|
|
|
position += skipUntil(buffer, position, startxrefBytes);
|
2014-02-27 21:46:12 +09:00
|
|
|
} else {
|
2011-10-25 08:55:23 +09:00
|
|
|
position += token.length + 1;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
// reading XRef streams
|
2014-04-08 06:42:54 +09:00
|
|
|
var i, ii;
|
|
|
|
for (i = 0, ii = xrefStms.length; i < ii; ++i) {
|
2013-02-07 08:19:29 +09:00
|
|
|
this.startXRefQueue.push(xrefStms[i]);
|
|
|
|
this.readXRef(/* recoveryMode */ true);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
// finding main trailer
|
|
|
|
var dict;
|
2014-04-08 06:42:54 +09:00
|
|
|
for (i = 0, ii = trailers.length; i < ii; ++i) {
|
2011-10-25 08:55:23 +09:00
|
|
|
stream.pos = trailers[i];
|
2016-02-25 01:56:28 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), /* allowStreams = */ true,
|
|
|
|
/* xref = */ this, /* recoveryMode = */ true);
|
2011-10-25 08:55:23 +09:00
|
|
|
var obj = parser.getObj();
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isCmd(obj, 'trailer')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
continue;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// read the trailer dictionary
|
2016-02-25 01:56:28 +09:00
|
|
|
dict = parser.getObj();
|
|
|
|
if (!isDict(dict)) {
|
2011-10-25 08:55:23 +09:00
|
|
|
continue;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// taking the first one with 'ID'
|
2014-02-27 21:46:12 +09:00
|
|
|
if (dict.has('ID')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return dict;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
// no tailer with 'ID', taking last one (if exists)
|
2014-02-27 21:46:12 +09:00
|
|
|
if (dict) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return dict;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// nothing helps
|
2012-10-16 19:10:37 +09:00
|
|
|
throw new InvalidPDFException('Invalid PDF structure');
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
readXRef: function XRef_readXRef(recoveryMode) {
|
2011-10-25 08:55:23 +09:00
|
|
|
var stream = this.stream;
|
2017-08-25 02:14:33 +09:00
|
|
|
// Keep track of already parsed XRef tables, to prevent an infinite loop
|
|
|
|
// when parsing corrupt PDF files where e.g. the /Prev entries create a
|
|
|
|
// circular dependency between tables (fixes bug1393476.pdf).
|
|
|
|
let startXRefParsedCache = Object.create(null);
|
2011-12-03 06:35:18 +09:00
|
|
|
|
2011-12-03 06:31:29 +09:00
|
|
|
try {
|
2013-02-07 08:19:29 +09:00
|
|
|
while (this.startXRefQueue.length) {
|
|
|
|
var startXRef = this.startXRefQueue[0];
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2017-08-25 02:14:33 +09:00
|
|
|
if (startXRefParsedCache[startXRef]) {
|
|
|
|
warn('readXRef - skipping XRef table since it was already parsed.');
|
|
|
|
this.startXRefQueue.shift();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
startXRefParsedCache[startXRef] = true;
|
|
|
|
|
2014-03-05 06:16:54 +09:00
|
|
|
stream.pos = startXRef + stream.start;
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2014-06-16 23:52:04 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), true, this);
|
2013-02-07 08:19:29 +09:00
|
|
|
var obj = parser.getObj();
|
|
|
|
var dict;
|
|
|
|
|
|
|
|
// Get dictionary
|
|
|
|
if (isCmd(obj, 'xref')) {
|
|
|
|
// Parse end-of-file XRef
|
|
|
|
dict = this.processXRefTable(parser);
|
|
|
|
if (!this.topDict) {
|
|
|
|
this.topDict = dict;
|
2012-02-01 00:49:06 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
// Recursively get other XRefs 'XRefStm', if any
|
|
|
|
obj = dict.get('XRefStm');
|
2017-09-01 23:52:50 +09:00
|
|
|
if (Number.isInteger(obj)) {
|
2013-02-07 08:19:29 +09:00
|
|
|
var pos = obj;
|
|
|
|
// ignore previously loaded xref streams
|
|
|
|
// (possible infinite recursion)
|
|
|
|
if (!(pos in this.xrefstms)) {
|
|
|
|
this.xrefstms[pos] = 1;
|
|
|
|
this.startXRefQueue.push(pos);
|
|
|
|
}
|
|
|
|
}
|
2017-09-01 23:52:50 +09:00
|
|
|
} else if (Number.isInteger(obj)) {
|
2013-02-07 08:19:29 +09:00
|
|
|
// Parse in-stream XRef
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(parser.getObj()) ||
|
2013-02-07 08:19:29 +09:00
|
|
|
!isCmd(parser.getObj(), 'obj') ||
|
|
|
|
!isStream(obj = parser.getObj())) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('Invalid XRef stream');
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
dict = this.processXRefStream(obj);
|
|
|
|
if (!this.topDict) {
|
|
|
|
this.topDict = dict;
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!dict) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('Failed to read XRef stream');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-11-22 23:49:36 +09:00
|
|
|
} else {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('Invalid XRef stream header');
|
2012-02-01 00:49:06 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
// Recursively get previous dictionary, if any
|
|
|
|
obj = dict.get('Prev');
|
2017-09-01 23:52:50 +09:00
|
|
|
if (Number.isInteger(obj)) {
|
2013-02-07 08:19:29 +09:00
|
|
|
this.startXRefQueue.push(obj);
|
|
|
|
} else if (isRef(obj)) {
|
|
|
|
// The spec says Prev must not be a reference, i.e. "/Prev NNN"
|
|
|
|
// This is a fallback for non-compliant PDFs, i.e. "/Prev NNN 0 R"
|
|
|
|
this.startXRefQueue.push(obj.num);
|
2011-12-03 06:31:29 +09:00
|
|
|
}
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
this.startXRefQueue.shift();
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
return this.topDict;
|
2011-12-03 06:35:18 +09:00
|
|
|
} catch (e) {
|
2013-02-07 08:19:29 +09:00
|
|
|
if (e instanceof MissingDataException) {
|
|
|
|
throw e;
|
|
|
|
}
|
2014-01-16 06:28:31 +09:00
|
|
|
info('(while reading XRef): ' + e);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2011-12-03 06:35:18 +09:00
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
if (recoveryMode) {
|
2012-04-24 12:14:58 +09:00
|
|
|
return;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
throw new XRefParseException();
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
getEntry: function XRef_getEntry(i) {
|
2014-02-27 21:46:12 +09:00
|
|
|
var xrefEntry = this.entries[i];
|
2014-04-12 19:05:12 +09:00
|
|
|
if (xrefEntry && !xrefEntry.free && xrefEntry.offset) {
|
2014-02-27 21:46:12 +09:00
|
|
|
return xrefEntry;
|
|
|
|
}
|
|
|
|
return null;
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2016-09-23 21:10:27 +09:00
|
|
|
fetchIfRef: function XRef_fetchIfRef(obj, suppressEncryption) {
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isRef(obj)) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return obj;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2016-09-23 21:10:27 +09:00
|
|
|
return this.fetch(obj, suppressEncryption);
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
fetch: function XRef_fetch(ref, suppressEncryption) {
|
2017-07-20 21:04:54 +09:00
|
|
|
if (!isRef(ref)) {
|
|
|
|
throw new Error('ref object is not a reference');
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
var num = ref.num;
|
2013-04-09 07:14:56 +09:00
|
|
|
if (num in this.cache) {
|
2014-02-27 21:46:12 +09:00
|
|
|
var cacheEntry = this.cache[num];
|
2017-02-25 00:01:09 +09:00
|
|
|
// In documents with Object Streams, it's possible that cached `Dict`s
|
|
|
|
// have not been assigned an `objId` yet (see e.g. issue3115r.pdf).
|
2017-03-29 00:54:41 +09:00
|
|
|
if (cacheEntry instanceof Dict && !cacheEntry.objId) {
|
2017-02-25 00:01:09 +09:00
|
|
|
cacheEntry.objId = ref.toString();
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
return cacheEntry;
|
2013-04-09 07:14:56 +09:00
|
|
|
}
|
2012-01-09 05:03:00 +09:00
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
var xrefEntry = this.getEntry(num);
|
2012-01-09 05:03:00 +09:00
|
|
|
|
|
|
|
// the referenced entry can be free
|
2014-02-27 21:46:12 +09:00
|
|
|
if (xrefEntry === null) {
|
|
|
|
return (this.cache[num] = null);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xrefEntry.uncompressed) {
|
2014-03-26 23:07:38 +09:00
|
|
|
xrefEntry = this.fetchUncompressed(ref, xrefEntry, suppressEncryption);
|
2014-02-27 21:46:12 +09:00
|
|
|
} else {
|
2014-03-26 23:07:38 +09:00
|
|
|
xrefEntry = this.fetchCompressed(xrefEntry, suppressEncryption);
|
|
|
|
}
|
2016-11-01 23:04:21 +09:00
|
|
|
if (isDict(xrefEntry)) {
|
2014-06-19 12:41:33 +09:00
|
|
|
xrefEntry.objId = ref.toString();
|
2014-06-10 18:29:25 +09:00
|
|
|
} else if (isStream(xrefEntry)) {
|
2014-06-19 12:41:33 +09:00
|
|
|
xrefEntry.dict.objId = ref.toString();
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2014-03-26 23:07:38 +09:00
|
|
|
return xrefEntry;
|
2014-02-27 21:46:12 +09:00
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2014-03-21 04:28:22 +09:00
|
|
|
fetchUncompressed: function XRef_fetchUncompressed(ref, xrefEntry,
|
2014-02-27 21:46:12 +09:00
|
|
|
suppressEncryption) {
|
2011-10-25 08:55:23 +09:00
|
|
|
var gen = ref.gen;
|
2014-02-27 21:46:12 +09:00
|
|
|
var num = ref.num;
|
|
|
|
if (xrefEntry.gen !== gen) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('inconsistent generation in XRef');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2014-03-05 06:16:54 +09:00
|
|
|
var stream = this.stream.makeSubStream(xrefEntry.offset +
|
|
|
|
this.stream.start);
|
2014-02-27 21:46:12 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), true, this);
|
|
|
|
var obj1 = parser.getObj();
|
|
|
|
var obj2 = parser.getObj();
|
|
|
|
var obj3 = parser.getObj();
|
Avoid some redundant type checks in `XRef.fetchUncompressed`
When looking briefly at using `Number.isInteger`/`Number.isNan` rather than `isInt`/`isNaN`, I noticed that there's a couple of not entirely straightforward cases to consider.
At first I really couldn't understand why `parseInt` is being used like it is in `XRef.fetchUncompressed`, since the `num` and `gen` properties of an object reference should *always* be integers.
However, doing a bit of code archaeology pointed to PR 4348, and it thus seem that this was a very deliberate change. Since I didn't want to inadvertently introduce any regressions, I've kept the `parseInt` calls intact but moved them to occur *only* when actually necessary.[1]
Secondly, I noticed that there's a redundant `isCmd` check for an edge-case of broken operators. Since we're throwing a `FormatError` if `obj3` isn't a command, we don't need to repeat that check.
In practice, this patch could perhaps be considered as a micro-optimization, but considering that `XRef.fetchUncompressed` can be called *many* thousand times when loading larger PDF documents these changes at least cannot hurt.
---
[1] I even ran all tests locally, with an added `assert(Number.isInteger(obj1) && Number.isInteger(obj2));` check, and everything passed with flying colours.
However, since it appears that this was in fact necessary at one point, one possible explanation is that the failing test-case(s) have now been replaced by reduced ones.
2017-08-31 23:11:00 +09:00
|
|
|
|
|
|
|
if (!Number.isInteger(obj1)) {
|
|
|
|
obj1 = parseInt(obj1, 10);
|
|
|
|
}
|
|
|
|
if (!Number.isInteger(obj2)) {
|
|
|
|
obj2 = parseInt(obj2, 10);
|
|
|
|
}
|
|
|
|
if (obj1 !== num || obj2 !== gen || !isCmd(obj3)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('bad XRef entry');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
Avoid some redundant type checks in `XRef.fetchUncompressed`
When looking briefly at using `Number.isInteger`/`Number.isNan` rather than `isInt`/`isNaN`, I noticed that there's a couple of not entirely straightforward cases to consider.
At first I really couldn't understand why `parseInt` is being used like it is in `XRef.fetchUncompressed`, since the `num` and `gen` properties of an object reference should *always* be integers.
However, doing a bit of code archaeology pointed to PR 4348, and it thus seem that this was a very deliberate change. Since I didn't want to inadvertently introduce any regressions, I've kept the `parseInt` calls intact but moved them to occur *only* when actually necessary.[1]
Secondly, I noticed that there's a redundant `isCmd` check for an edge-case of broken operators. Since we're throwing a `FormatError` if `obj3` isn't a command, we don't need to repeat that check.
In practice, this patch could perhaps be considered as a micro-optimization, but considering that `XRef.fetchUncompressed` can be called *many* thousand times when loading larger PDF documents these changes at least cannot hurt.
---
[1] I even ran all tests locally, with an added `assert(Number.isInteger(obj1) && Number.isInteger(obj2));` check, and everything passed with flying colours.
However, since it appears that this was in fact necessary at one point, one possible explanation is that the failing test-case(s) have now been replaced by reduced ones.
2017-08-31 23:11:00 +09:00
|
|
|
if (obj3.cmd !== 'obj') {
|
2014-03-21 04:28:22 +09:00
|
|
|
// some bad PDFs use "obj1234" and really mean 1234
|
2014-02-27 21:46:12 +09:00
|
|
|
if (obj3.cmd.indexOf('obj') === 0) {
|
|
|
|
num = parseInt(obj3.cmd.substring(3), 10);
|
Avoid some redundant type checks in `XRef.fetchUncompressed`
When looking briefly at using `Number.isInteger`/`Number.isNan` rather than `isInt`/`isNaN`, I noticed that there's a couple of not entirely straightforward cases to consider.
At first I really couldn't understand why `parseInt` is being used like it is in `XRef.fetchUncompressed`, since the `num` and `gen` properties of an object reference should *always* be integers.
However, doing a bit of code archaeology pointed to PR 4348, and it thus seem that this was a very deliberate change. Since I didn't want to inadvertently introduce any regressions, I've kept the `parseInt` calls intact but moved them to occur *only* when actually necessary.[1]
Secondly, I noticed that there's a redundant `isCmd` check for an edge-case of broken operators. Since we're throwing a `FormatError` if `obj3` isn't a command, we don't need to repeat that check.
In practice, this patch could perhaps be considered as a micro-optimization, but considering that `XRef.fetchUncompressed` can be called *many* thousand times when loading larger PDF documents these changes at least cannot hurt.
---
[1] I even ran all tests locally, with an added `assert(Number.isInteger(obj1) && Number.isInteger(obj2));` check, and everything passed with flying colours.
However, since it appears that this was in fact necessary at one point, one possible explanation is that the failing test-case(s) have now been replaced by reduced ones.
2017-08-31 23:11:00 +09:00
|
|
|
if (!Number.isNaN(num)) {
|
2014-02-27 21:46:12 +09:00
|
|
|
return num;
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('bad XRef entry');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
|
|
|
if (this.encrypt && !suppressEncryption) {
|
2014-06-19 08:30:27 +09:00
|
|
|
xrefEntry = parser.getObj(this.encrypt.createCipherTransform(num, gen));
|
2014-02-27 21:46:12 +09:00
|
|
|
} else {
|
|
|
|
xrefEntry = parser.getObj();
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isStream(xrefEntry)) {
|
|
|
|
this.cache[num] = xrefEntry;
|
|
|
|
}
|
|
|
|
return xrefEntry;
|
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
fetchCompressed: function XRef_fetchCompressed(xrefEntry,
|
|
|
|
suppressEncryption) {
|
|
|
|
var tableOffset = xrefEntry.offset;
|
|
|
|
var stream = this.fetch(new Ref(tableOffset, 0));
|
|
|
|
if (!isStream(stream)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('bad ObjStm stream');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-05-10 12:26:28 +09:00
|
|
|
var first = stream.dict.get('First');
|
|
|
|
var n = stream.dict.get('N');
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(first) || !Number.isInteger(n)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
'invalid first and n parameters for ObjStm stream');
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), false, this);
|
2012-11-02 22:26:45 +09:00
|
|
|
parser.allowStreams = true;
|
2014-02-27 21:46:12 +09:00
|
|
|
var i, entries = [], num, nums = [];
|
2011-10-25 08:55:23 +09:00
|
|
|
// read the object numbers to populate cache
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
num = parser.getObj();
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(num)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
`invalid object number in the ObjStm stream: ${num}`);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
nums.push(num);
|
|
|
|
var offset = parser.getObj();
|
2017-09-01 23:52:50 +09:00
|
|
|
if (!Number.isInteger(offset)) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError(
|
|
|
|
`invalid object offset in the ObjStm stream: ${offset}`);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// read stream objects for cache
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
entries.push(parser.getObj());
|
2016-05-09 01:28:18 +09:00
|
|
|
// The ObjStm should not contain 'endobj'. If it's present, skip over it
|
|
|
|
// to support corrupt PDFs (fixes issue 5241, bug 898610, bug 1037816).
|
|
|
|
if (isCmd(parser.buf1, 'endobj')) {
|
|
|
|
parser.shift();
|
|
|
|
}
|
2012-11-04 13:03:52 +09:00
|
|
|
num = nums[i];
|
|
|
|
var entry = this.entries[num];
|
|
|
|
if (entry && entry.offset === tableOffset && entry.gen === i) {
|
|
|
|
this.cache[num] = entries[i];
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
xrefEntry = entries[xrefEntry.gen];
|
|
|
|
if (xrefEntry === undefined) {
|
2017-06-29 05:51:31 +09:00
|
|
|
throw new FormatError('bad XRef entry for compressed object');
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
return xrefEntry;
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2016-09-23 21:10:27 +09:00
|
|
|
fetchIfRefAsync: function XRef_fetchIfRefAsync(obj, suppressEncryption) {
|
2013-06-05 09:57:52 +09:00
|
|
|
if (!isRef(obj)) {
|
2014-05-01 22:27:31 +09:00
|
|
|
return Promise.resolve(obj);
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
2016-09-23 21:10:27 +09:00
|
|
|
return this.fetchAsync(obj, suppressEncryption);
|
2013-06-05 09:57:52 +09:00
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2013-06-05 09:57:52 +09:00
|
|
|
fetchAsync: function XRef_fetchAsync(ref, suppressEncryption) {
|
2014-08-06 11:22:12 +09:00
|
|
|
var streamManager = this.stream.manager;
|
|
|
|
var xref = this;
|
|
|
|
return new Promise(function tryFetch(resolve, reject) {
|
|
|
|
try {
|
|
|
|
resolve(xref.fetch(ref, suppressEncryption));
|
|
|
|
} catch (e) {
|
|
|
|
if (e instanceof MissingDataException) {
|
2015-10-21 07:45:55 +09:00
|
|
|
streamManager.requestRange(e.begin, e.end).then(function () {
|
2014-08-06 11:22:12 +09:00
|
|
|
tryFetch(resolve, reject);
|
2015-10-21 07:45:55 +09:00
|
|
|
}, reject);
|
2014-08-06 11:22:12 +09:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
reject(e);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
getCatalogObj: function XRef_getCatalogObj() {
|
2012-04-06 00:12:48 +09:00
|
|
|
return this.root;
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
};
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
return XRef;
|
2011-10-25 08:55:23 +09:00
|
|
|
})();
|
|
|
|
|
2013-03-01 08:29:07 +09:00
|
|
|
/**
|
2015-12-26 01:35:21 +09:00
|
|
|
* A NameTree/NumberTree is like a Dict but has some advantageous properties,
|
|
|
|
* see the specification (7.9.6 and 7.9.7) for additional details.
|
|
|
|
* TODO: implement all the Dict functions and make this more efficient.
|
2013-03-01 08:29:07 +09:00
|
|
|
*/
|
2015-12-26 01:35:21 +09:00
|
|
|
var NameOrNumberTree = (function NameOrNumberTreeClosure() {
|
|
|
|
function NameOrNumberTree(root, xref) {
|
2017-12-13 22:51:45 +09:00
|
|
|
unreachable('Cannot initialize NameOrNumberTree.');
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
NameOrNumberTree.prototype = {
|
|
|
|
getAll: function NameOrNumberTree_getAll() {
|
2016-01-28 02:04:13 +09:00
|
|
|
var dict = Object.create(null);
|
2013-03-01 08:29:07 +09:00
|
|
|
if (!this.root) {
|
|
|
|
return dict;
|
|
|
|
}
|
|
|
|
var xref = this.xref;
|
2015-12-26 01:35:21 +09:00
|
|
|
// Reading Name/Number tree.
|
2013-03-01 08:29:07 +09:00
|
|
|
var processed = new RefSet();
|
|
|
|
processed.put(this.root);
|
|
|
|
var queue = [this.root];
|
|
|
|
while (queue.length > 0) {
|
|
|
|
var i, n;
|
2013-03-02 23:00:17 +09:00
|
|
|
var obj = xref.fetchIfRef(queue.shift());
|
2013-03-19 22:36:12 +09:00
|
|
|
if (!isDict(obj)) {
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-01 08:29:07 +09:00
|
|
|
if (obj.has('Kids')) {
|
|
|
|
var kids = obj.get('Kids');
|
|
|
|
for (i = 0, n = kids.length; i < n; i++) {
|
|
|
|
var kid = kids[i];
|
2017-07-20 21:04:54 +09:00
|
|
|
if (processed.has(kid)) {
|
|
|
|
throw new FormatError(`Duplicate entry in "${this._type}" tree.`);
|
|
|
|
}
|
2013-03-01 08:29:07 +09:00
|
|
|
queue.push(kid);
|
|
|
|
processed.put(kid);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2015-12-26 01:35:21 +09:00
|
|
|
var entries = obj.get(this._type);
|
2017-09-02 03:27:13 +09:00
|
|
|
if (Array.isArray(entries)) {
|
2015-12-26 01:35:21 +09:00
|
|
|
for (i = 0, n = entries.length; i < n; i += 2) {
|
|
|
|
dict[xref.fetchIfRef(entries[i])] = xref.fetchIfRef(entries[i + 1]);
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dict;
|
2014-10-06 00:34:49 +09:00
|
|
|
},
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
get: function NameOrNumberTree_get(key) {
|
2014-10-06 00:34:49 +09:00
|
|
|
if (!this.root) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
var xref = this.xref;
|
2015-12-26 01:35:21 +09:00
|
|
|
var kidsOrEntries = xref.fetchIfRef(this.root);
|
2014-10-06 00:34:49 +09:00
|
|
|
var loopCount = 0;
|
2015-12-26 01:35:21 +09:00
|
|
|
var MAX_LEVELS = 10;
|
2014-10-06 00:34:49 +09:00
|
|
|
var l, r, m;
|
|
|
|
|
|
|
|
// Perform a binary search to quickly find the entry that
|
2015-12-26 01:35:21 +09:00
|
|
|
// contains the key we are looking for.
|
|
|
|
while (kidsOrEntries.has('Kids')) {
|
|
|
|
if (++loopCount > MAX_LEVELS) {
|
|
|
|
warn('Search depth limit reached for "' + this._type + '" tree.');
|
2014-10-06 00:34:49 +09:00
|
|
|
return null;
|
|
|
|
}
|
2015-02-03 00:12:52 +09:00
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
var kids = kidsOrEntries.get('Kids');
|
2017-09-02 03:27:13 +09:00
|
|
|
if (!Array.isArray(kids)) {
|
2014-10-06 00:34:49 +09:00
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
l = 0;
|
|
|
|
r = kids.length - 1;
|
|
|
|
while (l <= r) {
|
|
|
|
m = (l + r) >> 1;
|
|
|
|
var kid = xref.fetchIfRef(kids[m]);
|
|
|
|
var limits = kid.get('Limits');
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
if (key < xref.fetchIfRef(limits[0])) {
|
2014-10-06 00:34:49 +09:00
|
|
|
r = m - 1;
|
2015-12-26 01:35:21 +09:00
|
|
|
} else if (key > xref.fetchIfRef(limits[1])) {
|
2014-10-06 00:34:49 +09:00
|
|
|
l = m + 1;
|
|
|
|
} else {
|
2015-12-26 01:35:21 +09:00
|
|
|
kidsOrEntries = xref.fetchIfRef(kids[m]);
|
2014-10-06 00:34:49 +09:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (l > r) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
// If we get here, then we have found the right entry. Now go through the
|
|
|
|
// entries in the dictionary until we find the key we're looking for.
|
|
|
|
var entries = kidsOrEntries.get(this._type);
|
2017-09-02 03:27:13 +09:00
|
|
|
if (Array.isArray(entries)) {
|
2014-10-06 00:34:49 +09:00
|
|
|
// Perform a binary search to reduce the lookup time.
|
|
|
|
l = 0;
|
2015-12-26 01:35:21 +09:00
|
|
|
r = entries.length - 2;
|
2014-10-06 00:34:49 +09:00
|
|
|
while (l <= r) {
|
|
|
|
// Check only even indices (0, 2, 4, ...) because the
|
2015-12-26 01:35:21 +09:00
|
|
|
// odd indices contain the actual data.
|
2014-10-06 00:34:49 +09:00
|
|
|
m = (l + r) & ~1;
|
2015-12-26 01:35:21 +09:00
|
|
|
var currentKey = xref.fetchIfRef(entries[m]);
|
|
|
|
if (key < currentKey) {
|
2014-10-06 00:34:49 +09:00
|
|
|
r = m - 2;
|
2015-12-26 01:35:21 +09:00
|
|
|
} else if (key > currentKey) {
|
2014-10-06 00:34:49 +09:00
|
|
|
l = m + 2;
|
|
|
|
} else {
|
2015-12-26 01:35:21 +09:00
|
|
|
return xref.fetchIfRef(entries[m + 1]);
|
2014-10-06 00:34:49 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return null;
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
},
|
2013-03-01 08:29:07 +09:00
|
|
|
};
|
2015-12-26 01:35:21 +09:00
|
|
|
return NameOrNumberTree;
|
|
|
|
})();
|
|
|
|
|
|
|
|
var NameTree = (function NameTreeClosure() {
|
|
|
|
function NameTree(root, xref) {
|
|
|
|
this.root = root;
|
|
|
|
this.xref = xref;
|
|
|
|
this._type = 'Names';
|
|
|
|
}
|
|
|
|
|
|
|
|
Util.inherit(NameTree, NameOrNumberTree, {});
|
|
|
|
|
2013-03-01 08:29:07 +09:00
|
|
|
return NameTree;
|
|
|
|
})();
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
var NumberTree = (function NumberTreeClosure() {
|
|
|
|
function NumberTree(root, xref) {
|
|
|
|
this.root = root;
|
|
|
|
this.xref = xref;
|
|
|
|
this._type = 'Nums';
|
|
|
|
}
|
|
|
|
|
|
|
|
Util.inherit(NumberTree, NameOrNumberTree, {});
|
|
|
|
|
|
|
|
return NumberTree;
|
|
|
|
})();
|
|
|
|
|
2014-03-19 05:32:47 +09:00
|
|
|
/**
|
2015-02-03 00:12:52 +09:00
|
|
|
* "A PDF file can refer to the contents of another file by using a File
|
2014-03-19 05:32:47 +09:00
|
|
|
* Specification (PDF 1.1)", see the spec (7.11) for more details.
|
|
|
|
* NOTE: Only embedded files are supported (as part of the attachments support)
|
2015-02-03 00:12:52 +09:00
|
|
|
* TODO: support the 'URL' file system (with caching if !/V), portable
|
2014-03-19 05:32:47 +09:00
|
|
|
* collections attributes and related files (/RF)
|
|
|
|
*/
|
|
|
|
var FileSpec = (function FileSpecClosure() {
|
|
|
|
function FileSpec(root, xref) {
|
|
|
|
if (!root || !isDict(root)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
this.xref = xref;
|
|
|
|
this.root = root;
|
|
|
|
if (root.has('FS')) {
|
|
|
|
this.fs = root.get('FS');
|
|
|
|
}
|
|
|
|
this.description = root.has('Desc') ?
|
|
|
|
stringToPDFString(root.get('Desc')) :
|
|
|
|
'';
|
|
|
|
if (root.has('RF')) {
|
|
|
|
warn('Related file specifications are not supported');
|
|
|
|
}
|
|
|
|
this.contentAvailable = true;
|
|
|
|
if (!root.has('EF')) {
|
|
|
|
this.contentAvailable = false;
|
|
|
|
warn('Non-embedded file specifications are not supported');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function pickPlatformItem(dict) {
|
|
|
|
// Look for the filename in this order:
|
|
|
|
// UF, F, Unix, Mac, DOS
|
|
|
|
if (dict.has('UF')) {
|
|
|
|
return dict.get('UF');
|
|
|
|
} else if (dict.has('F')) {
|
|
|
|
return dict.get('F');
|
|
|
|
} else if (dict.has('Unix')) {
|
|
|
|
return dict.get('Unix');
|
|
|
|
} else if (dict.has('Mac')) {
|
|
|
|
return dict.get('Mac');
|
|
|
|
} else if (dict.has('DOS')) {
|
|
|
|
return dict.get('DOS');
|
|
|
|
}
|
2016-12-16 21:05:33 +09:00
|
|
|
return null;
|
2014-03-19 05:32:47 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
FileSpec.prototype = {
|
|
|
|
get filename() {
|
|
|
|
if (!this._filename && this.root) {
|
|
|
|
var filename = pickPlatformItem(this.root) || 'unnamed';
|
|
|
|
this._filename = stringToPDFString(filename).
|
|
|
|
replace(/\\\\/g, '\\').
|
|
|
|
replace(/\\\//g, '/').
|
|
|
|
replace(/\\/g, '/');
|
|
|
|
}
|
|
|
|
return this._filename;
|
|
|
|
},
|
|
|
|
get content() {
|
|
|
|
if (!this.contentAvailable) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
if (!this.contentRef && this.root) {
|
|
|
|
this.contentRef = pickPlatformItem(this.root.get('EF'));
|
|
|
|
}
|
|
|
|
var content = null;
|
|
|
|
if (this.contentRef) {
|
|
|
|
var xref = this.xref;
|
|
|
|
var fileObj = xref.fetchIfRef(this.contentRef);
|
|
|
|
if (fileObj && isStream(fileObj)) {
|
|
|
|
content = fileObj.getBytes();
|
|
|
|
} else {
|
|
|
|
warn('Embedded file specification points to non-existing/invalid ' +
|
|
|
|
'content');
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
warn('Embedded file specification does not have a content');
|
|
|
|
}
|
|
|
|
return content;
|
|
|
|
},
|
|
|
|
get serializable() {
|
|
|
|
return {
|
|
|
|
filename: this.filename,
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
content: this.content,
|
2014-03-19 05:32:47 +09:00
|
|
|
};
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
},
|
2014-03-19 05:32:47 +09:00
|
|
|
};
|
|
|
|
return FileSpec;
|
|
|
|
})();
|
|
|
|
|
2013-06-05 09:57:52 +09:00
|
|
|
/**
|
2017-06-13 17:22:11 +09:00
|
|
|
* A helper for loading missing data in `Dict` graphs. It traverses the graph
|
2013-06-05 09:57:52 +09:00
|
|
|
* depth first and queues up any objects that have missing data. Once it has
|
|
|
|
* has traversed as many objects that are available it attempts to bundle the
|
|
|
|
* missing data requests and then resume from the nodes that weren't ready.
|
|
|
|
*
|
|
|
|
* NOTE: It provides protection from circular references by keeping track of
|
2017-06-13 16:56:49 +09:00
|
|
|
* loaded references. However, you must be careful not to load any graphs
|
2013-06-05 09:57:52 +09:00
|
|
|
* that have references to the catalog or other pages since that will cause the
|
|
|
|
* entire PDF document object graph to be traversed.
|
|
|
|
*/
|
2017-06-13 16:56:49 +09:00
|
|
|
let ObjectLoader = (function() {
|
2013-06-05 09:57:52 +09:00
|
|
|
function mayHaveChildren(value) {
|
2017-09-02 03:27:13 +09:00
|
|
|
return isRef(value) || isDict(value) || Array.isArray(value) ||
|
|
|
|
isStream(value);
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
function addChildren(node, nodesToVisit) {
|
|
|
|
if (isDict(node) || isStream(node)) {
|
2017-06-13 17:22:11 +09:00
|
|
|
let dict = isDict(node) ? node : node.dict;
|
|
|
|
let dictKeys = dict.getKeys();
|
|
|
|
for (let i = 0, ii = dictKeys.length; i < ii; i++) {
|
|
|
|
let rawValue = dict.getRaw(dictKeys[i]);
|
|
|
|
if (mayHaveChildren(rawValue)) {
|
|
|
|
nodesToVisit.push(rawValue);
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
}
|
2017-09-02 03:27:13 +09:00
|
|
|
} else if (Array.isArray(node)) {
|
2017-06-13 16:56:49 +09:00
|
|
|
for (let i = 0, ii = node.length; i < ii; i++) {
|
2017-06-13 17:22:11 +09:00
|
|
|
let value = node[i];
|
2013-06-05 09:57:52 +09:00
|
|
|
if (mayHaveChildren(value)) {
|
|
|
|
nodesToVisit.push(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-13 17:22:11 +09:00
|
|
|
function ObjectLoader(dict, keys, xref) {
|
|
|
|
this.dict = dict;
|
2013-06-05 09:57:52 +09:00
|
|
|
this.keys = keys;
|
|
|
|
this.xref = xref;
|
|
|
|
this.refSet = null;
|
2015-10-21 07:45:55 +09:00
|
|
|
this.capability = null;
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
ObjectLoader.prototype = {
|
2017-06-13 16:56:49 +09:00
|
|
|
load() {
|
2014-05-01 22:27:31 +09:00
|
|
|
this.capability = createPromiseCapability();
|
2013-06-05 09:57:52 +09:00
|
|
|
// Don't walk the graph if all the data is already loaded.
|
|
|
|
if (!(this.xref.stream instanceof ChunkedStream) ||
|
|
|
|
this.xref.stream.getMissingChunks().length === 0) {
|
2014-05-01 22:27:31 +09:00
|
|
|
this.capability.resolve();
|
|
|
|
return this.capability.promise;
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
2017-06-13 17:22:11 +09:00
|
|
|
let { keys, dict, } = this;
|
2013-06-05 09:57:52 +09:00
|
|
|
this.refSet = new RefSet();
|
|
|
|
// Setup the initial nodes to visit.
|
2017-06-13 16:56:49 +09:00
|
|
|
let nodesToVisit = [];
|
|
|
|
for (let i = 0, ii = keys.length; i < ii; i++) {
|
2017-06-13 17:22:11 +09:00
|
|
|
let rawValue = dict.getRaw(keys[i]);
|
|
|
|
// Skip nodes that are guaranteed to be empty.
|
|
|
|
if (rawValue !== undefined) {
|
|
|
|
nodesToVisit.push(rawValue);
|
|
|
|
}
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
2015-10-21 07:45:55 +09:00
|
|
|
this._walk(nodesToVisit);
|
2014-05-01 22:27:31 +09:00
|
|
|
return this.capability.promise;
|
2013-06-05 09:57:52 +09:00
|
|
|
},
|
|
|
|
|
2017-06-13 16:56:49 +09:00
|
|
|
_walk(nodesToVisit) {
|
|
|
|
let nodesToRevisit = [];
|
|
|
|
let pendingRequests = [];
|
2013-06-05 09:57:52 +09:00
|
|
|
// DFS walk of the object graph.
|
|
|
|
while (nodesToVisit.length) {
|
2017-06-13 16:56:49 +09:00
|
|
|
let currentNode = nodesToVisit.pop();
|
2013-06-05 09:57:52 +09:00
|
|
|
|
|
|
|
// Only references or chunked streams can cause missing data exceptions.
|
|
|
|
if (isRef(currentNode)) {
|
|
|
|
// Skip nodes that have already been visited.
|
|
|
|
if (this.refSet.has(currentNode)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
try {
|
2017-06-13 16:56:49 +09:00
|
|
|
this.refSet.put(currentNode);
|
2013-06-05 09:57:52 +09:00
|
|
|
currentNode = this.xref.fetch(currentNode);
|
2017-06-13 16:56:49 +09:00
|
|
|
} catch (ex) {
|
|
|
|
if (!(ex instanceof MissingDataException)) {
|
|
|
|
throw ex;
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
nodesToRevisit.push(currentNode);
|
2017-06-13 16:56:49 +09:00
|
|
|
pendingRequests.push({ begin: ex.begin, end: ex.end, });
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
}
|
2013-07-04 06:29:38 +09:00
|
|
|
if (currentNode && currentNode.getBaseStreams) {
|
2017-06-13 16:56:49 +09:00
|
|
|
let baseStreams = currentNode.getBaseStreams();
|
|
|
|
let foundMissingData = false;
|
|
|
|
for (let i = 0, ii = baseStreams.length; i < ii; i++) {
|
|
|
|
let stream = baseStreams[i];
|
2013-07-04 06:29:38 +09:00
|
|
|
if (stream.getMissingChunks && stream.getMissingChunks().length) {
|
|
|
|
foundMissingData = true;
|
2017-06-13 16:56:49 +09:00
|
|
|
pendingRequests.push({ begin: stream.start, end: stream.end, });
|
2013-07-04 06:29:38 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (foundMissingData) {
|
|
|
|
nodesToRevisit.push(currentNode);
|
|
|
|
}
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
addChildren(currentNode, nodesToVisit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pendingRequests.length) {
|
2017-05-02 18:14:53 +09:00
|
|
|
this.xref.stream.manager.requestRanges(pendingRequests).then(() => {
|
2017-06-13 16:56:49 +09:00
|
|
|
for (let i = 0, ii = nodesToRevisit.length; i < ii; i++) {
|
|
|
|
let node = nodesToRevisit[i];
|
|
|
|
// Remove any reference nodes from the current `RefSet` so they
|
2013-06-05 09:57:52 +09:00
|
|
|
// aren't skipped when we revist them.
|
|
|
|
if (isRef(node)) {
|
|
|
|
this.refSet.remove(node);
|
|
|
|
}
|
|
|
|
}
|
2017-06-13 16:56:49 +09:00
|
|
|
this._walk(nodesToRevisit);
|
2017-05-02 18:14:53 +09:00
|
|
|
}, this.capability.reject);
|
2013-06-05 09:57:52 +09:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Everything is loaded.
|
|
|
|
this.refSet = null;
|
2014-05-01 22:27:31 +09:00
|
|
|
this.capability.resolve();
|
Fix inconsistent spacing and trailing commas in objects in `src/core/` files, so we can enable the `comma-dangle` and `object-curly-spacing` ESLint rules later on
*Unfortunately this patch is fairly big, even though it only covers the `src/core` folder, but splitting it even further seemed difficult.*
http://eslint.org/docs/rules/comma-dangle
http://eslint.org/docs/rules/object-curly-spacing
Given that we currently have quite inconsistent object formatting, fixing this in *one* big patch probably wouldn't be feasible (since I cannot imagine anyone wanting to review that); hence I've opted to try and do this piecewise instead.
Please note: This patch was created automatically, using the ESLint --fix command line option. In a couple of places this caused lines to become too long, and I've fixed those manually; please refer to the interdiff below for the only hand-edits in this patch.
```diff
diff --git a/src/core/evaluator.js b/src/core/evaluator.js
index abab9027..dcd3594b 100644
--- a/src/core/evaluator.js
+++ b/src/core/evaluator.js
@@ -2785,7 +2785,8 @@ var EvaluatorPreprocessor = (function EvaluatorPreprocessorClosure() {
t['Tz'] = { id: OPS.setHScale, numArgs: 1, variableArgs: false, };
t['TL'] = { id: OPS.setLeading, numArgs: 1, variableArgs: false, };
t['Tf'] = { id: OPS.setFont, numArgs: 2, variableArgs: false, };
- t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1, variableArgs: false, };
+ t['Tr'] = { id: OPS.setTextRenderingMode, numArgs: 1,
+ variableArgs: false, };
t['Ts'] = { id: OPS.setTextRise, numArgs: 1, variableArgs: false, };
t['Td'] = { id: OPS.moveText, numArgs: 2, variableArgs: false, };
t['TD'] = { id: OPS.setLeadingMoveText, numArgs: 2, variableArgs: false, };
diff --git a/src/core/jbig2.js b/src/core/jbig2.js
index 5a17d482..71671541 100644
--- a/src/core/jbig2.js
+++ b/src/core/jbig2.js
@@ -123,19 +123,22 @@ var Jbig2Image = (function Jbig2ImageClosure() {
{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -2, y: 0, },
{ x: -1, y: 0, }],
[{ x: -3, y: -1, }, { x: -2, y: -1, }, { x: -1, y: -1, }, { x: 0, y: -1, },
- { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, }, { x: -1, y: 0, }]
+ { x: 1, y: -1, }, { x: -4, y: 0, }, { x: -3, y: 0, }, { x: -2, y: 0, },
+ { x: -1, y: 0, }]
];
var RefinementTemplates = [
{
coding: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
- { x: 1, y: 0, }, { x: -1, y: 1, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
+ reference: [{ x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, },
+ { x: 0, y: 0, }, { x: 1, y: 0, }, { x: -1, y: 1, },
+ { x: 0, y: 1, }, { x: 1, y: 1, }],
},
{
- coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, }, { x: -1, y: 0, }],
- reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, }, { x: 1, y: 0, },
- { x: 0, y: 1, }, { x: 1, y: 1, }],
+ coding: [{ x: -1, y: -1, }, { x: 0, y: -1, }, { x: 1, y: -1, },
+ { x: -1, y: 0, }],
+ reference: [{ x: 0, y: -1, }, { x: -1, y: 0, }, { x: 0, y: 0, },
+ { x: 1, y: 0, }, { x: 0, y: 1, }, { x: 1, y: 1, }],
}
];
```
2017-06-02 18:16:24 +09:00
|
|
|
},
|
2013-06-05 09:57:52 +09:00
|
|
|
};
|
|
|
|
|
|
|
|
return ObjectLoader;
|
|
|
|
})();
|
2015-11-22 01:32:47 +09:00
|
|
|
|
2017-04-02 23:14:30 +09:00
|
|
|
export {
|
|
|
|
Catalog,
|
|
|
|
ObjectLoader,
|
|
|
|
XRef,
|
|
|
|
FileSpec,
|
|
|
|
};
|