2012-09-01 07:48:21 +09:00
|
|
|
/* Copyright 2012 Mozilla Foundation
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
2011-10-26 10:18:22 +09:00
|
|
|
|
|
|
|
'use strict';
|
|
|
|
|
2015-11-22 01:32:47 +09:00
|
|
|
(function (root, factory) {
|
|
|
|
if (typeof define === 'function' && define.amd) {
|
|
|
|
define('pdfjs/core/obj', ['exports', 'pdfjs/shared/util',
|
|
|
|
'pdfjs/core/primitives', 'pdfjs/core/crypto', 'pdfjs/core/parser',
|
2016-01-31 23:35:57 +09:00
|
|
|
'pdfjs/core/chunked_stream', 'pdfjs/core/colorspace'], factory);
|
2015-11-22 01:32:47 +09:00
|
|
|
} else if (typeof exports !== 'undefined') {
|
|
|
|
factory(exports, require('../shared/util.js'), require('./primitives.js'),
|
|
|
|
require('./crypto.js'), require('./parser.js'),
|
2016-01-31 23:35:57 +09:00
|
|
|
require('./chunked_stream.js'), require('./colorspace.js'));
|
2015-11-22 01:32:47 +09:00
|
|
|
} else {
|
|
|
|
factory((root.pdfjsCoreObj = {}), root.pdfjsSharedUtil,
|
|
|
|
root.pdfjsCorePrimitives, root.pdfjsCoreCrypto, root.pdfjsCoreParser,
|
2016-01-31 23:35:57 +09:00
|
|
|
root.pdfjsCoreChunkedStream, root.pdfjsCoreColorSpace);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2015-11-22 01:32:47 +09:00
|
|
|
}(this, function (exports, sharedUtil, corePrimitives, coreCrypto, coreParser,
|
2016-01-31 23:35:57 +09:00
|
|
|
coreChunkedStream, coreColorSpace) {
|
2015-11-22 01:32:47 +09:00
|
|
|
|
|
|
|
var InvalidPDFException = sharedUtil.InvalidPDFException;
|
|
|
|
var MissingDataException = sharedUtil.MissingDataException;
|
|
|
|
var XRefParseException = sharedUtil.XRefParseException;
|
|
|
|
var assert = sharedUtil.assert;
|
|
|
|
var bytesToString = sharedUtil.bytesToString;
|
|
|
|
var createPromiseCapability = sharedUtil.createPromiseCapability;
|
|
|
|
var error = sharedUtil.error;
|
|
|
|
var info = sharedUtil.info;
|
|
|
|
var isArray = sharedUtil.isArray;
|
|
|
|
var isInt = sharedUtil.isInt;
|
|
|
|
var isString = sharedUtil.isString;
|
|
|
|
var shadow = sharedUtil.shadow;
|
|
|
|
var stringToPDFString = sharedUtil.stringToPDFString;
|
|
|
|
var stringToUTF8String = sharedUtil.stringToUTF8String;
|
|
|
|
var warn = sharedUtil.warn;
|
2015-12-22 20:59:23 +09:00
|
|
|
var isValidUrl = sharedUtil.isValidUrl;
|
2015-12-26 01:35:21 +09:00
|
|
|
var Util = sharedUtil.Util;
|
2015-11-22 01:32:47 +09:00
|
|
|
var Ref = corePrimitives.Ref;
|
|
|
|
var RefSet = corePrimitives.RefSet;
|
|
|
|
var RefSetCache = corePrimitives.RefSetCache;
|
|
|
|
var isName = corePrimitives.isName;
|
|
|
|
var isCmd = corePrimitives.isCmd;
|
|
|
|
var isDict = corePrimitives.isDict;
|
|
|
|
var isRef = corePrimitives.isRef;
|
|
|
|
var isStream = corePrimitives.isStream;
|
|
|
|
var CipherTransformFactory = coreCrypto.CipherTransformFactory;
|
|
|
|
var Lexer = coreParser.Lexer;
|
|
|
|
var Parser = coreParser.Parser;
|
|
|
|
var ChunkedStream = coreChunkedStream.ChunkedStream;
|
2016-01-31 23:35:57 +09:00
|
|
|
var ColorSpace = coreColorSpace.ColorSpace;
|
2013-06-26 02:33:53 +09:00
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
var Catalog = (function CatalogClosure() {
|
2015-11-22 01:32:47 +09:00
|
|
|
function Catalog(pdfManager, xref, pageFactory) {
|
2013-04-09 07:14:56 +09:00
|
|
|
this.pdfManager = pdfManager;
|
2011-10-25 08:55:23 +09:00
|
|
|
this.xref = xref;
|
2013-02-07 08:19:29 +09:00
|
|
|
this.catDict = xref.getCatalogObj();
|
2013-11-15 06:43:38 +09:00
|
|
|
this.fontCache = new RefSetCache();
|
2014-04-13 23:02:56 +09:00
|
|
|
assert(isDict(this.catDict),
|
2013-02-07 08:19:29 +09:00
|
|
|
'catalog object is not a dictionary');
|
|
|
|
|
2015-11-22 01:32:47 +09:00
|
|
|
// TODO refactor to move getPage() to the PDFDocument.
|
|
|
|
this.pageFactory = pageFactory;
|
2013-02-07 08:19:29 +09:00
|
|
|
this.pagePromises = [];
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
Catalog.prototype = {
|
2012-03-25 03:59:51 +09:00
|
|
|
get metadata() {
|
2012-05-28 08:03:04 +09:00
|
|
|
var streamRef = this.catDict.getRaw('Metadata');
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isRef(streamRef)) {
|
2012-05-28 08:03:04 +09:00
|
|
|
return shadow(this, 'metadata', null);
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2012-05-28 08:03:04 +09:00
|
|
|
|
2014-03-21 04:28:22 +09:00
|
|
|
var encryptMetadata = (!this.xref.encrypt ? false :
|
|
|
|
this.xref.encrypt.encryptMetadata);
|
2012-05-28 08:03:04 +09:00
|
|
|
|
|
|
|
var stream = this.xref.fetch(streamRef, !encryptMetadata);
|
2012-03-27 07:05:14 +09:00
|
|
|
var metadata;
|
|
|
|
if (stream && isDict(stream.dict)) {
|
|
|
|
var type = stream.dict.get('Type');
|
|
|
|
var subtype = stream.dict.get('Subtype');
|
2012-03-25 03:59:51 +09:00
|
|
|
|
2012-03-25 04:02:20 +09:00
|
|
|
if (isName(type) && isName(subtype) &&
|
2012-03-25 03:59:51 +09:00
|
|
|
type.name === 'Metadata' && subtype.name === 'XML') {
|
2012-05-28 05:49:28 +09:00
|
|
|
// XXX: This should examine the charset the XML document defines,
|
|
|
|
// however since there are currently no real means to decode
|
|
|
|
// arbitrary charsets, let's just hope that the author of the PDF
|
|
|
|
// was reasonable enough to stick with the XML default charset,
|
|
|
|
// which is UTF-8.
|
2012-05-28 09:00:13 +09:00
|
|
|
try {
|
|
|
|
metadata = stringToUTF8String(bytesToString(stream.getBytes()));
|
|
|
|
} catch (e) {
|
2012-05-30 01:01:46 +09:00
|
|
|
info('Skipping invalid metadata.');
|
2012-05-28 09:00:13 +09:00
|
|
|
}
|
2012-03-25 03:59:51 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-27 07:05:14 +09:00
|
|
|
return shadow(this, 'metadata', metadata);
|
2012-03-25 03:59:51 +09:00
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
get toplevelPagesDict() {
|
|
|
|
var pagesObj = this.catDict.get('Pages');
|
2014-04-13 23:02:56 +09:00
|
|
|
assert(isDict(pagesObj), 'invalid top-level pages dictionary');
|
2011-10-25 08:55:23 +09:00
|
|
|
// shadow the prototype getter
|
2012-04-05 03:43:04 +09:00
|
|
|
return shadow(this, 'toplevelPagesDict', pagesObj);
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
|
|
|
get documentOutline() {
|
2013-06-22 05:42:55 +09:00
|
|
|
var obj = null;
|
|
|
|
try {
|
|
|
|
obj = this.readDocumentOutline();
|
|
|
|
} catch (ex) {
|
|
|
|
if (ex instanceof MissingDataException) {
|
|
|
|
throw ex;
|
|
|
|
}
|
|
|
|
warn('Unable to read document outline');
|
|
|
|
}
|
|
|
|
return shadow(this, 'documentOutline', obj);
|
|
|
|
},
|
|
|
|
readDocumentOutline: function Catalog_readDocumentOutline() {
|
2012-04-05 03:43:04 +09:00
|
|
|
var obj = this.catDict.get('Outlines');
|
2016-02-14 07:13:01 +09:00
|
|
|
if (!isDict(obj)) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
obj = obj.getRaw('First');
|
|
|
|
if (!isRef(obj)) {
|
|
|
|
return null;
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
var root = { items: [] };
|
2016-02-14 07:13:01 +09:00
|
|
|
var queue = [{obj: obj, parent: root}];
|
|
|
|
// To avoid recursion, keep track of the already processed items.
|
|
|
|
var processed = new RefSet();
|
|
|
|
processed.put(obj);
|
2016-01-31 23:35:57 +09:00
|
|
|
var xref = this.xref, blackColor = new Uint8Array(3);
|
2016-02-14 07:13:01 +09:00
|
|
|
|
|
|
|
while (queue.length > 0) {
|
|
|
|
var i = queue.shift();
|
|
|
|
var outlineDict = xref.fetchIfRef(i.obj);
|
|
|
|
if (outlineDict === null) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert(outlineDict.has('Title'), 'Invalid outline item');
|
|
|
|
|
|
|
|
var actionDict = outlineDict.get('A'), dest = null, url = null;
|
|
|
|
if (actionDict) {
|
|
|
|
var destEntry = actionDict.get('D');
|
|
|
|
if (destEntry) {
|
|
|
|
dest = destEntry;
|
|
|
|
} else {
|
|
|
|
var uriEntry = actionDict.get('URI');
|
|
|
|
if (isString(uriEntry) && isValidUrl(uriEntry, false)) {
|
|
|
|
url = uriEntry;
|
2012-03-15 05:58:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2016-02-14 07:13:01 +09:00
|
|
|
} else if (outlineDict.has('Dest')) {
|
|
|
|
dest = outlineDict.getRaw('Dest');
|
|
|
|
if (isName(dest)) {
|
|
|
|
dest = dest.name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var title = outlineDict.get('Title');
|
2016-01-31 23:35:57 +09:00
|
|
|
var flags = outlineDict.get('F') || 0;
|
|
|
|
|
2016-05-06 02:16:35 +09:00
|
|
|
var color = outlineDict.getArray('C'), rgbColor = blackColor;
|
2016-01-31 23:35:57 +09:00
|
|
|
// We only need to parse the color when it's valid, and non-default.
|
|
|
|
if (isArray(color) && color.length === 3 &&
|
|
|
|
(color[0] !== 0 || color[1] !== 0 || color[2] !== 0)) {
|
|
|
|
rgbColor = ColorSpace.singletons.rgb.getRgb(color, 0);
|
|
|
|
}
|
2016-02-14 07:13:01 +09:00
|
|
|
var outlineItem = {
|
|
|
|
dest: dest,
|
|
|
|
url: url,
|
|
|
|
title: stringToPDFString(title),
|
2016-01-31 23:35:57 +09:00
|
|
|
color: rgbColor,
|
2016-02-14 07:13:01 +09:00
|
|
|
count: outlineDict.get('Count'),
|
2016-01-31 23:35:57 +09:00
|
|
|
bold: !!(flags & 2),
|
|
|
|
italic: !!(flags & 1),
|
2016-02-14 07:13:01 +09:00
|
|
|
items: []
|
|
|
|
};
|
|
|
|
i.parent.items.push(outlineItem);
|
|
|
|
obj = outlineDict.getRaw('First');
|
|
|
|
if (isRef(obj) && !processed.has(obj)) {
|
|
|
|
queue.push({obj: obj, parent: outlineItem});
|
|
|
|
processed.put(obj);
|
|
|
|
}
|
|
|
|
obj = outlineDict.getRaw('Next');
|
|
|
|
if (isRef(obj) && !processed.has(obj)) {
|
|
|
|
queue.push({obj: obj, parent: i.parent});
|
|
|
|
processed.put(obj);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
2014-03-21 04:28:22 +09:00
|
|
|
return (root.items.length > 0 ? root.items : null);
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
|
|
|
get numPages() {
|
|
|
|
var obj = this.toplevelPagesDict.get('Count');
|
2014-04-13 23:02:56 +09:00
|
|
|
assert(
|
2011-10-25 08:55:23 +09:00
|
|
|
isInt(obj),
|
|
|
|
'page count in top level pages object is not an integer'
|
|
|
|
);
|
|
|
|
// shadow the prototype getter
|
|
|
|
return shadow(this, 'num', obj);
|
|
|
|
},
|
|
|
|
get destinations() {
|
2012-04-05 07:29:50 +09:00
|
|
|
function fetchDestination(dest) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return isDict(dest) ? dest.get('D') : dest;
|
|
|
|
}
|
|
|
|
|
|
|
|
var xref = this.xref;
|
|
|
|
var dests = {}, nameTreeRef, nameDictionaryRef;
|
|
|
|
var obj = this.catDict.get('Names');
|
2014-05-14 19:43:20 +09:00
|
|
|
if (obj && obj.has('Dests')) {
|
2012-04-05 03:43:04 +09:00
|
|
|
nameTreeRef = obj.getRaw('Dests');
|
2014-02-27 21:46:12 +09:00
|
|
|
} else if (this.catDict.has('Dests')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
nameDictionaryRef = this.catDict.get('Dests');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
|
|
|
|
if (nameDictionaryRef) {
|
|
|
|
// reading simple destination dictionary
|
2012-04-05 03:43:04 +09:00
|
|
|
obj = nameDictionaryRef;
|
2011-10-25 08:55:23 +09:00
|
|
|
obj.forEach(function catalogForEach(key, value) {
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!value) {
|
|
|
|
return;
|
|
|
|
}
|
2012-04-05 07:29:50 +09:00
|
|
|
dests[key] = fetchDestination(value);
|
2011-10-25 08:55:23 +09:00
|
|
|
});
|
|
|
|
}
|
|
|
|
if (nameTreeRef) {
|
2013-03-01 08:29:07 +09:00
|
|
|
var nameTree = new NameTree(nameTreeRef, xref);
|
|
|
|
var names = nameTree.getAll();
|
|
|
|
for (var name in names) {
|
|
|
|
dests[name] = fetchDestination(names[name]);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return shadow(this, 'destinations', dests);
|
|
|
|
},
|
2014-10-05 22:56:40 +09:00
|
|
|
getDestination: function Catalog_getDestination(destinationId) {
|
|
|
|
function fetchDestination(dest) {
|
|
|
|
return isDict(dest) ? dest.get('D') : dest;
|
|
|
|
}
|
|
|
|
|
|
|
|
var xref = this.xref;
|
2015-07-08 04:48:57 +09:00
|
|
|
var dest = null, nameTreeRef, nameDictionaryRef;
|
2014-10-05 22:56:40 +09:00
|
|
|
var obj = this.catDict.get('Names');
|
|
|
|
if (obj && obj.has('Dests')) {
|
|
|
|
nameTreeRef = obj.getRaw('Dests');
|
|
|
|
} else if (this.catDict.has('Dests')) {
|
|
|
|
nameDictionaryRef = this.catDict.get('Dests');
|
|
|
|
}
|
|
|
|
|
2015-07-08 22:31:06 +09:00
|
|
|
if (nameDictionaryRef) { // Simple destination dictionary.
|
|
|
|
var value = nameDictionaryRef.get(destinationId);
|
|
|
|
if (value) {
|
|
|
|
dest = fetchDestination(value);
|
|
|
|
}
|
2014-10-05 22:56:40 +09:00
|
|
|
}
|
|
|
|
if (nameTreeRef) {
|
|
|
|
var nameTree = new NameTree(nameTreeRef, xref);
|
2014-10-06 00:34:49 +09:00
|
|
|
dest = fetchDestination(nameTree.get(destinationId));
|
2014-10-05 22:56:40 +09:00
|
|
|
}
|
|
|
|
return dest;
|
|
|
|
},
|
2015-12-26 05:57:08 +09:00
|
|
|
|
|
|
|
get pageLabels() {
|
|
|
|
var obj = null;
|
|
|
|
try {
|
|
|
|
obj = this.readPageLabels();
|
|
|
|
} catch (ex) {
|
|
|
|
if (ex instanceof MissingDataException) {
|
|
|
|
throw ex;
|
|
|
|
}
|
|
|
|
warn('Unable to read page labels.');
|
|
|
|
}
|
|
|
|
return shadow(this, 'pageLabels', obj);
|
|
|
|
},
|
|
|
|
readPageLabels: function Catalog_readPageLabels() {
|
|
|
|
var obj = this.catDict.getRaw('PageLabels');
|
|
|
|
if (!obj) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
var pageLabels = new Array(this.numPages);
|
|
|
|
var style = null;
|
|
|
|
var prefix = '';
|
|
|
|
var start = 1;
|
|
|
|
|
|
|
|
var numberTree = new NumberTree(obj, this.xref);
|
|
|
|
var nums = numberTree.getAll();
|
|
|
|
var currentLabel = '', currentIndex = 1;
|
|
|
|
|
|
|
|
for (var i = 0, ii = this.numPages; i < ii; i++) {
|
2016-01-28 02:04:13 +09:00
|
|
|
if (i in nums) {
|
2015-12-26 05:57:08 +09:00
|
|
|
var labelDict = nums[i];
|
|
|
|
assert(isDict(labelDict), 'The PageLabel is not a dictionary.');
|
|
|
|
|
|
|
|
var type = labelDict.get('Type');
|
|
|
|
assert(!type || (isName(type) && type.name === 'PageLabel'),
|
|
|
|
'Invalid type in PageLabel dictionary.');
|
|
|
|
|
|
|
|
var s = labelDict.get('S');
|
|
|
|
assert(!s || isName(s), 'Invalid style in PageLabel dictionary.');
|
|
|
|
style = (s ? s.name : null);
|
|
|
|
|
|
|
|
prefix = labelDict.get('P') || '';
|
|
|
|
assert(isString(prefix), 'Invalid prefix in PageLabel dictionary.');
|
|
|
|
|
|
|
|
start = labelDict.get('St') || 1;
|
|
|
|
assert(isInt(start), 'Invalid start in PageLabel dictionary.');
|
|
|
|
currentIndex = start;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (style) {
|
|
|
|
case 'D':
|
|
|
|
currentLabel = currentIndex;
|
|
|
|
break;
|
|
|
|
case 'R':
|
|
|
|
case 'r':
|
|
|
|
currentLabel = Util.toRoman(currentIndex, style === 'r');
|
|
|
|
break;
|
|
|
|
case 'A':
|
|
|
|
case 'a':
|
|
|
|
var LIMIT = 26; // Use only the characters A--Z, or a--z.
|
|
|
|
var A_UPPER_CASE = 0x41, A_LOWER_CASE = 0x61;
|
|
|
|
|
|
|
|
var baseCharCode = (style === 'a' ? A_LOWER_CASE : A_UPPER_CASE);
|
|
|
|
var letterIndex = currentIndex - 1;
|
|
|
|
var character = String.fromCharCode(baseCharCode +
|
|
|
|
(letterIndex % LIMIT));
|
|
|
|
var charBuf = [];
|
|
|
|
for (var j = 0, jj = (letterIndex / LIMIT) | 0; j <= jj; j++) {
|
|
|
|
charBuf.push(character);
|
|
|
|
}
|
|
|
|
currentLabel = charBuf.join('');
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(!style,
|
|
|
|
'Invalid style "' + style + '" in PageLabel dictionary.');
|
|
|
|
}
|
|
|
|
pageLabels[i] = prefix + currentLabel;
|
|
|
|
|
|
|
|
currentLabel = '';
|
|
|
|
currentIndex++;
|
|
|
|
}
|
2016-01-27 07:01:38 +09:00
|
|
|
return pageLabels;
|
2015-12-26 05:57:08 +09:00
|
|
|
},
|
|
|
|
|
2014-03-19 05:32:47 +09:00
|
|
|
get attachments() {
|
|
|
|
var xref = this.xref;
|
2014-05-19 06:35:29 +09:00
|
|
|
var attachments = null, nameTreeRef;
|
2014-03-19 05:32:47 +09:00
|
|
|
var obj = this.catDict.get('Names');
|
|
|
|
if (obj) {
|
|
|
|
nameTreeRef = obj.getRaw('EmbeddedFiles');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nameTreeRef) {
|
|
|
|
var nameTree = new NameTree(nameTreeRef, xref);
|
|
|
|
var names = nameTree.getAll();
|
|
|
|
for (var name in names) {
|
|
|
|
var fs = new FileSpec(names[name], xref);
|
|
|
|
if (!attachments) {
|
2016-01-28 02:04:13 +09:00
|
|
|
attachments = Object.create(null);
|
2014-03-19 05:32:47 +09:00
|
|
|
}
|
|
|
|
attachments[stringToPDFString(name)] = fs.serializable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return shadow(this, 'attachments', attachments);
|
|
|
|
},
|
2013-03-01 08:29:07 +09:00
|
|
|
get javaScript() {
|
|
|
|
var xref = this.xref;
|
|
|
|
var obj = this.catDict.get('Names');
|
|
|
|
|
|
|
|
var javaScript = [];
|
2015-07-21 01:25:02 +09:00
|
|
|
function appendIfJavaScriptDict(jsDict) {
|
|
|
|
var type = jsDict.get('S');
|
|
|
|
if (!isName(type) || type.name !== 'JavaScript') {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
var js = jsDict.get('JS');
|
|
|
|
if (isStream(js)) {
|
|
|
|
js = bytesToString(js.getBytes());
|
|
|
|
} else if (!isString(js)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
javaScript.push(stringToPDFString(js));
|
|
|
|
}
|
2013-03-01 08:29:07 +09:00
|
|
|
if (obj && obj.has('JavaScript')) {
|
|
|
|
var nameTree = new NameTree(obj.getRaw('JavaScript'), xref);
|
|
|
|
var names = nameTree.getAll();
|
|
|
|
for (var name in names) {
|
2014-03-21 04:28:22 +09:00
|
|
|
// We don't really use the JavaScript right now. This code is
|
2013-03-01 08:29:07 +09:00
|
|
|
// defensive so we don't cause errors on document load.
|
|
|
|
var jsDict = names[name];
|
2015-07-21 01:25:02 +09:00
|
|
|
if (isDict(jsDict)) {
|
|
|
|
appendIfJavaScriptDict(jsDict);
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-24 09:07:25 +09:00
|
|
|
|
|
|
|
// Append OpenAction actions to javaScript array
|
|
|
|
var openactionDict = this.catDict.get('OpenAction');
|
2015-07-21 01:25:02 +09:00
|
|
|
if (isDict(openactionDict, 'Action')) {
|
2014-05-24 09:07:25 +09:00
|
|
|
var actionType = openactionDict.get('S');
|
2015-07-21 01:25:02 +09:00
|
|
|
if (isName(actionType) && actionType.name === 'Named') {
|
|
|
|
// The named Print action is not a part of the PDF 1.7 specification,
|
|
|
|
// but is supported by many PDF readers/writers (including Adobe's).
|
|
|
|
var action = openactionDict.get('N');
|
|
|
|
if (isName(action) && action.name === 'Print') {
|
|
|
|
javaScript.push('print({});');
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
appendIfJavaScriptDict(openactionDict);
|
2014-05-24 09:07:25 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-01 08:29:07 +09:00
|
|
|
return shadow(this, 'javaScript', javaScript);
|
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2013-11-15 06:43:38 +09:00
|
|
|
cleanup: function Catalog_cleanup() {
|
2014-05-10 10:21:15 +09:00
|
|
|
var promises = [];
|
|
|
|
this.fontCache.forEach(function (promise) {
|
|
|
|
promises.push(promise);
|
2013-11-15 06:43:38 +09:00
|
|
|
});
|
2014-05-21 11:57:04 +09:00
|
|
|
return Promise.all(promises).then(function (translatedFonts) {
|
|
|
|
for (var i = 0, ii = translatedFonts.length; i < ii; i++) {
|
|
|
|
var font = translatedFonts[i].dict;
|
|
|
|
delete font.translated;
|
2014-05-10 10:21:15 +09:00
|
|
|
}
|
|
|
|
this.fontCache.clear();
|
|
|
|
}.bind(this));
|
2013-11-15 06:43:38 +09:00
|
|
|
},
|
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
getPage: function Catalog_getPage(pageIndex) {
|
|
|
|
if (!(pageIndex in this.pagePromises)) {
|
2013-11-14 08:27:46 +09:00
|
|
|
this.pagePromises[pageIndex] = this.getPageDict(pageIndex).then(
|
|
|
|
function (a) {
|
|
|
|
var dict = a[0];
|
|
|
|
var ref = a[1];
|
2015-11-22 01:32:47 +09:00
|
|
|
return this.pageFactory.createPage(pageIndex, dict, ref,
|
|
|
|
this.fontCache);
|
2013-11-14 08:27:46 +09:00
|
|
|
}.bind(this)
|
|
|
|
);
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
return this.pagePromises[pageIndex];
|
|
|
|
},
|
|
|
|
|
2013-11-14 08:27:46 +09:00
|
|
|
getPageDict: function Catalog_getPageDict(pageIndex) {
|
2014-05-01 22:27:31 +09:00
|
|
|
var capability = createPromiseCapability();
|
2013-11-14 08:27:46 +09:00
|
|
|
var nodesToVisit = [this.catDict.getRaw('Pages')];
|
|
|
|
var currentPageIndex = 0;
|
|
|
|
var xref = this.xref;
|
2015-01-17 20:53:32 +09:00
|
|
|
var checkAllKids = false;
|
2013-11-14 08:27:46 +09:00
|
|
|
|
|
|
|
function next() {
|
|
|
|
while (nodesToVisit.length) {
|
|
|
|
var currentNode = nodesToVisit.pop();
|
|
|
|
|
|
|
|
if (isRef(currentNode)) {
|
|
|
|
xref.fetchAsync(currentNode).then(function (obj) {
|
2015-01-17 20:53:32 +09:00
|
|
|
if (isDict(obj, 'Page') || (isDict(obj) && !obj.has('Kids'))) {
|
2013-11-14 08:27:46 +09:00
|
|
|
if (pageIndex === currentPageIndex) {
|
2014-05-01 22:27:31 +09:00
|
|
|
capability.resolve([obj, currentNode]);
|
2013-11-14 08:27:46 +09:00
|
|
|
} else {
|
|
|
|
currentPageIndex++;
|
|
|
|
next();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
nodesToVisit.push(obj);
|
|
|
|
next();
|
2014-08-06 11:22:12 +09:00
|
|
|
}, capability.reject);
|
2013-11-14 08:27:46 +09:00
|
|
|
return;
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
|
2015-01-17 20:53:32 +09:00
|
|
|
// Must be a child page dictionary.
|
2013-02-07 08:19:29 +09:00
|
|
|
assert(
|
2013-11-14 08:27:46 +09:00
|
|
|
isDict(currentNode),
|
2013-02-07 08:19:29 +09:00
|
|
|
'page dictionary kid reference points to wrong type of object'
|
|
|
|
);
|
2013-11-14 08:27:46 +09:00
|
|
|
var count = currentNode.get('Count');
|
2015-01-17 20:53:32 +09:00
|
|
|
// If the current node doesn't have any children, avoid getting stuck
|
|
|
|
// in an empty node further down in the tree (see issue5644.pdf).
|
|
|
|
if (count === 0) {
|
|
|
|
checkAllKids = true;
|
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
// Skip nodes where the page can't be.
|
|
|
|
if (currentPageIndex + count <= pageIndex) {
|
|
|
|
currentPageIndex += count;
|
|
|
|
continue;
|
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2013-11-14 08:27:46 +09:00
|
|
|
var kids = currentNode.get('Kids');
|
|
|
|
assert(isArray(kids), 'page dictionary kids object is not an array');
|
2015-01-17 20:53:32 +09:00
|
|
|
if (!checkAllKids && count === kids.length) {
|
2013-11-14 08:27:46 +09:00
|
|
|
// Nodes that don't have the page have been skipped and this is the
|
|
|
|
// bottom of the tree which means the page requested must be a
|
|
|
|
// descendant of this pages node. Ideally we would just resolve the
|
|
|
|
// promise with the page ref here, but there is the case where more
|
|
|
|
// pages nodes could link to single a page (see issue 3666 pdf). To
|
|
|
|
// handle this push it back on the queue so if it is a pages node it
|
|
|
|
// will be descended into.
|
|
|
|
nodesToVisit = [kids[pageIndex - currentPageIndex]];
|
|
|
|
currentPageIndex = pageIndex;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
for (var last = kids.length - 1; last >= 0; last--) {
|
|
|
|
nodesToVisit.push(kids[last]);
|
|
|
|
}
|
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
2014-05-01 22:27:31 +09:00
|
|
|
capability.reject('Page index ' + pageIndex + ' not found.');
|
2013-11-14 08:27:46 +09:00
|
|
|
}
|
|
|
|
next();
|
2014-05-01 22:27:31 +09:00
|
|
|
return capability.promise;
|
2013-11-14 08:27:46 +09:00
|
|
|
},
|
|
|
|
|
|
|
|
getPageIndex: function Catalog_getPageIndex(ref) {
|
|
|
|
// The page tree nodes have the count of all the leaves below them. To get
|
|
|
|
// how many pages are before we just have to walk up the tree and keep
|
|
|
|
// adding the count of siblings to the left of the node.
|
|
|
|
var xref = this.xref;
|
|
|
|
function pagesBeforeRef(kidRef) {
|
|
|
|
var total = 0;
|
|
|
|
var parentRef;
|
|
|
|
return xref.fetchAsync(kidRef).then(function (node) {
|
|
|
|
if (!node) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
parentRef = node.getRaw('Parent');
|
|
|
|
return node.getAsync('Parent');
|
|
|
|
}).then(function (parent) {
|
|
|
|
if (!parent) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
return parent.getAsync('Kids');
|
|
|
|
}).then(function (kids) {
|
|
|
|
if (!kids) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
var kidPromises = [];
|
|
|
|
var found = false;
|
|
|
|
for (var i = 0; i < kids.length; i++) {
|
|
|
|
var kid = kids[i];
|
2014-03-21 04:28:22 +09:00
|
|
|
assert(isRef(kid), 'kids must be a ref');
|
2014-08-02 04:45:39 +09:00
|
|
|
if (kid.num === kidRef.num) {
|
2013-11-14 08:27:46 +09:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kidPromises.push(xref.fetchAsync(kid).then(function (kid) {
|
|
|
|
if (kid.has('Count')) {
|
|
|
|
var count = kid.get('Count');
|
|
|
|
total += count;
|
|
|
|
} else { // page leaf node
|
|
|
|
total++;
|
|
|
|
}
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
error('kid ref not found in parents kids');
|
|
|
|
}
|
|
|
|
return Promise.all(kidPromises).then(function () {
|
|
|
|
return [total, parentRef];
|
|
|
|
});
|
|
|
|
});
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-11-14 08:27:46 +09:00
|
|
|
|
|
|
|
var total = 0;
|
|
|
|
function next(ref) {
|
|
|
|
return pagesBeforeRef(ref).then(function (args) {
|
|
|
|
if (!args) {
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
var count = args[0];
|
|
|
|
var parentRef = args[1];
|
|
|
|
total += count;
|
|
|
|
return next(parentRef);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
return next(ref);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
return Catalog;
|
2011-10-25 08:55:23 +09:00
|
|
|
})();
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
var XRef = (function XRefClosure() {
|
2013-02-07 08:19:29 +09:00
|
|
|
function XRef(stream, password) {
|
2011-10-25 08:55:23 +09:00
|
|
|
this.stream = stream;
|
|
|
|
this.entries = [];
|
2016-01-28 02:04:13 +09:00
|
|
|
this.xrefstms = Object.create(null);
|
2011-10-25 08:55:23 +09:00
|
|
|
// prepare the XRef cache
|
|
|
|
this.cache = [];
|
2013-02-07 08:19:29 +09:00
|
|
|
this.password = password;
|
2014-06-16 23:52:04 +09:00
|
|
|
this.stats = {
|
|
|
|
streamTypes: [],
|
|
|
|
fontTypes: []
|
|
|
|
};
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
XRef.prototype = {
|
2013-02-07 08:19:29 +09:00
|
|
|
setStartXRef: function XRef_setStartXRef(startXRef) {
|
|
|
|
// Store the starting positions of xref tables as we process them
|
|
|
|
// so we can recover from missing data errors
|
|
|
|
this.startXRefQueue = [startXRef];
|
|
|
|
},
|
|
|
|
|
|
|
|
parse: function XRef_parse(recoveryMode) {
|
|
|
|
var trailerDict;
|
|
|
|
if (!recoveryMode) {
|
|
|
|
trailerDict = this.readXRef();
|
|
|
|
} else {
|
|
|
|
warn('Indexing all PDF objects');
|
|
|
|
trailerDict = this.indexObjects();
|
|
|
|
}
|
|
|
|
trailerDict.assignXref(this);
|
|
|
|
this.trailer = trailerDict;
|
|
|
|
var encrypt = trailerDict.get('Encrypt');
|
|
|
|
if (encrypt) {
|
|
|
|
var ids = trailerDict.get('ID');
|
|
|
|
var fileId = (ids && ids.length) ? ids[0] : '';
|
2014-03-21 04:28:22 +09:00
|
|
|
this.encrypt = new CipherTransformFactory(encrypt, fileId,
|
|
|
|
this.password);
|
2013-02-07 08:19:29 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
// get the root dictionary (catalog) object
|
|
|
|
if (!(this.root = trailerDict.get('Root'))) {
|
|
|
|
error('Invalid root reference');
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
processXRefTable: function XRef_processXRefTable(parser) {
|
|
|
|
if (!('tableState' in this)) {
|
|
|
|
// Stores state of the table as we process it so we can resume
|
|
|
|
// from middle of table in case of missing data error
|
|
|
|
this.tableState = {
|
|
|
|
entryNum: 0,
|
|
|
|
streamPos: parser.lexer.stream.pos,
|
|
|
|
parserBuf1: parser.buf1,
|
|
|
|
parserBuf2: parser.buf2
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
var obj = this.readXRefTable(parser);
|
|
|
|
|
|
|
|
// Sanity check
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isCmd(obj, 'trailer')) {
|
2013-02-07 08:19:29 +09:00
|
|
|
error('Invalid XRef table: could not find trailer dictionary');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
// Read trailer dictionary, e.g.
|
|
|
|
// trailer
|
|
|
|
// << /Size 22
|
|
|
|
// /Root 20R
|
|
|
|
// /Info 10R
|
|
|
|
// /ID [ <81b14aafa313db63dbd6f981e49f94f4> ]
|
|
|
|
// >>
|
|
|
|
// The parser goes through the entire stream << ... >> and provides
|
|
|
|
// a getter interface for the key-value table
|
|
|
|
var dict = parser.getObj();
|
2014-05-03 03:45:34 +09:00
|
|
|
|
|
|
|
// The pdflib PDF generator can generate a nested trailer dictionary
|
|
|
|
if (!isDict(dict) && dict.dict) {
|
|
|
|
dict = dict.dict;
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isDict(dict)) {
|
2013-02-07 08:19:29 +09:00
|
|
|
error('Invalid XRef table: could not parse trailer dictionary');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
delete this.tableState;
|
|
|
|
|
|
|
|
return dict;
|
|
|
|
},
|
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
readXRefTable: function XRef_readXRefTable(parser) {
|
2012-01-31 23:01:04 +09:00
|
|
|
// Example of cross-reference table:
|
|
|
|
// xref
|
|
|
|
// 0 1 <-- subsection header (first obj #, obj count)
|
|
|
|
// 0000000000 65535 f <-- actual object (offset, generation #, f/n)
|
|
|
|
// 23 2 <-- subsection header ... and so on ...
|
2012-02-01 00:57:32 +09:00
|
|
|
// 0000025518 00002 n
|
2012-01-31 23:01:04 +09:00
|
|
|
// 0000025635 00000 n
|
|
|
|
// trailer
|
|
|
|
// ...
|
2012-02-01 00:57:32 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
var stream = parser.lexer.stream;
|
|
|
|
var tableState = this.tableState;
|
|
|
|
stream.pos = tableState.streamPos;
|
|
|
|
parser.buf1 = tableState.parserBuf1;
|
|
|
|
parser.buf2 = tableState.parserBuf2;
|
|
|
|
|
2012-01-31 23:01:04 +09:00
|
|
|
// Outer loop is over subsection headers
|
2011-10-25 08:55:23 +09:00
|
|
|
var obj;
|
2012-01-31 23:01:04 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
while (true) {
|
|
|
|
if (!('firstEntryNum' in tableState) || !('entryCount' in tableState)) {
|
|
|
|
if (isCmd(obj = parser.getObj(), 'trailer')) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tableState.firstEntryNum = obj;
|
|
|
|
tableState.entryCount = parser.getObj();
|
|
|
|
}
|
|
|
|
|
|
|
|
var first = tableState.firstEntryNum;
|
|
|
|
var count = tableState.entryCount;
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isInt(first) || !isInt(count)) {
|
2012-01-31 23:57:12 +09:00
|
|
|
error('Invalid XRef table: wrong types in subsection header');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2012-01-31 23:01:04 +09:00
|
|
|
// Inner loop is over objects themselves
|
2013-02-07 08:19:29 +09:00
|
|
|
for (var i = tableState.entryNum; i < count; i++) {
|
|
|
|
tableState.streamPos = stream.pos;
|
|
|
|
tableState.entryNum = i;
|
|
|
|
tableState.parserBuf1 = parser.buf1;
|
|
|
|
tableState.parserBuf2 = parser.buf2;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var entry = {};
|
2012-01-31 23:01:04 +09:00
|
|
|
entry.offset = parser.getObj();
|
|
|
|
entry.gen = parser.getObj();
|
|
|
|
var type = parser.getObj();
|
|
|
|
|
2014-03-21 04:28:22 +09:00
|
|
|
if (isCmd(type, 'f')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
entry.free = true;
|
2014-03-21 04:28:22 +09:00
|
|
|
} else if (isCmd(type, 'n')) {
|
2012-01-31 23:01:04 +09:00
|
|
|
entry.uncompressed = true;
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2012-01-31 23:01:04 +09:00
|
|
|
// Validate entry obj
|
2012-02-01 00:57:32 +09:00
|
|
|
if (!isInt(entry.offset) || !isInt(entry.gen) ||
|
|
|
|
!(entry.free || entry.uncompressed)) {
|
2012-02-01 00:49:06 +09:00
|
|
|
error('Invalid entry in XRef subsection: ' + first + ', ' + count);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2016-04-21 22:10:40 +09:00
|
|
|
// The first xref table entry, i.e. obj 0, should be free. Attempting
|
|
|
|
// to adjust an incorrect first obj # (fixes issue 3248 and 7229).
|
|
|
|
if (i === 0 && entry.free && first === 1) {
|
|
|
|
first = 0;
|
|
|
|
}
|
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!this.entries[i + first]) {
|
2012-01-31 23:57:12 +09:00
|
|
|
this.entries[i + first] = entry;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
tableState.entryNum = 0;
|
|
|
|
tableState.streamPos = stream.pos;
|
|
|
|
tableState.parserBuf1 = parser.buf1;
|
|
|
|
tableState.parserBuf2 = parser.buf2;
|
|
|
|
delete tableState.firstEntryNum;
|
|
|
|
delete tableState.entryCount;
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
2012-04-24 12:14:58 +09:00
|
|
|
// Sanity check: as per spec, first object must be free
|
2014-02-27 21:46:12 +09:00
|
|
|
if (this.entries[0] && !this.entries[0].free) {
|
2012-02-01 00:49:06 +09:00
|
|
|
error('Invalid XRef table: unexpected first object');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
return obj;
|
|
|
|
},
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
processXRefStream: function XRef_processXRefStream(stream) {
|
|
|
|
if (!('streamState' in this)) {
|
|
|
|
// Stores state of the stream as we process it so we can resume
|
|
|
|
// from middle of stream in case of missing data error
|
2013-05-10 12:26:28 +09:00
|
|
|
var streamParameters = stream.dict;
|
2013-02-07 08:19:29 +09:00
|
|
|
var byteWidths = streamParameters.get('W');
|
|
|
|
var range = streamParameters.get('Index');
|
|
|
|
if (!range) {
|
|
|
|
range = [0, streamParameters.get('Size')];
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
this.streamState = {
|
|
|
|
entryRanges: range,
|
|
|
|
byteWidths: byteWidths,
|
|
|
|
entryNum: 0,
|
|
|
|
streamPos: stream.pos
|
|
|
|
};
|
|
|
|
}
|
|
|
|
this.readXRefStream(stream);
|
|
|
|
delete this.streamState;
|
|
|
|
|
2013-05-10 12:26:28 +09:00
|
|
|
return stream.dict;
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
readXRefStream: function XRef_readXRefStream(stream) {
|
2011-10-25 08:55:23 +09:00
|
|
|
var i, j;
|
2013-02-07 08:19:29 +09:00
|
|
|
var streamState = this.streamState;
|
|
|
|
stream.pos = streamState.streamPos;
|
|
|
|
|
|
|
|
var byteWidths = streamState.byteWidths;
|
|
|
|
var typeFieldWidth = byteWidths[0];
|
|
|
|
var offsetFieldWidth = byteWidths[1];
|
|
|
|
var generationFieldWidth = byteWidths[2];
|
|
|
|
|
|
|
|
var entryRanges = streamState.entryRanges;
|
|
|
|
while (entryRanges.length > 0) {
|
|
|
|
var first = entryRanges[0];
|
|
|
|
var n = entryRanges[1];
|
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isInt(first) || !isInt(n)) {
|
2011-10-25 08:55:23 +09:00
|
|
|
error('Invalid XRef range fields: ' + first + ', ' + n);
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
if (!isInt(typeFieldWidth) || !isInt(offsetFieldWidth) ||
|
|
|
|
!isInt(generationFieldWidth)) {
|
|
|
|
error('Invalid XRef entry fields length: ' + first + ', ' + n);
|
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
for (i = streamState.entryNum; i < n; ++i) {
|
|
|
|
streamState.entryNum = i;
|
|
|
|
streamState.streamPos = stream.pos;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var type = 0, offset = 0, generation = 0;
|
2014-03-21 04:28:22 +09:00
|
|
|
for (j = 0; j < typeFieldWidth; ++j) {
|
2011-10-25 08:55:23 +09:00
|
|
|
type = (type << 8) | stream.getByte();
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
|
|
|
// if type field is absent, its default value is 1
|
2014-02-27 21:46:12 +09:00
|
|
|
if (typeFieldWidth === 0) {
|
2011-10-25 08:55:23 +09:00
|
|
|
type = 1;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
|
|
|
for (j = 0; j < offsetFieldWidth; ++j) {
|
2011-10-25 08:55:23 +09:00
|
|
|
offset = (offset << 8) | stream.getByte();
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
|
|
|
for (j = 0; j < generationFieldWidth; ++j) {
|
2011-10-25 08:55:23 +09:00
|
|
|
generation = (generation << 8) | stream.getByte();
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
var entry = {};
|
|
|
|
entry.offset = offset;
|
|
|
|
entry.gen = generation;
|
|
|
|
switch (type) {
|
|
|
|
case 0:
|
|
|
|
entry.free = true;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
entry.uncompressed = true;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error('Invalid XRef entry type: ' + type);
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!this.entries[first + i]) {
|
2011-10-25 08:55:23 +09:00
|
|
|
this.entries[first + i] = entry;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
streamState.entryNum = 0;
|
|
|
|
streamState.streamPos = stream.pos;
|
|
|
|
entryRanges.splice(0, 2);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
indexObjects: function XRef_indexObjects() {
|
2011-10-25 08:55:23 +09:00
|
|
|
// Simple scan through the PDF content to find objects,
|
|
|
|
// trailers and XRef streams.
|
2015-08-21 23:57:08 +09:00
|
|
|
var TAB = 0x9, LF = 0xA, CR = 0xD, SPACE = 0x20;
|
|
|
|
var PERCENT = 0x25, LT = 0x3C;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
function readToken(data, offset) {
|
|
|
|
var token = '', ch = data[offset];
|
2015-08-21 23:57:08 +09:00
|
|
|
while (ch !== LF && ch !== CR && ch !== LT) {
|
2014-03-21 04:28:22 +09:00
|
|
|
if (++offset >= data.length) {
|
2011-10-25 08:55:23 +09:00
|
|
|
break;
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
token += String.fromCharCode(ch);
|
|
|
|
ch = data[offset];
|
|
|
|
}
|
|
|
|
return token;
|
|
|
|
}
|
|
|
|
function skipUntil(data, offset, what) {
|
|
|
|
var length = what.length, dataLength = data.length;
|
|
|
|
var skipped = 0;
|
|
|
|
// finding byte sequence
|
|
|
|
while (offset < dataLength) {
|
|
|
|
var i = 0;
|
2014-08-02 04:45:39 +09:00
|
|
|
while (i < length && data[offset + i] === what[i]) {
|
2011-10-25 08:55:23 +09:00
|
|
|
++i;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2014-03-21 04:28:22 +09:00
|
|
|
if (i >= length) {
|
2011-10-25 08:55:23 +09:00
|
|
|
break; // sequence found
|
2014-03-21 04:28:22 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
offset++;
|
|
|
|
skipped++;
|
|
|
|
}
|
|
|
|
return skipped;
|
|
|
|
}
|
2015-10-02 19:46:58 +09:00
|
|
|
var objRegExp = /^(\d+)\s+(\d+)\s+obj\b/;
|
2011-10-25 08:55:23 +09:00
|
|
|
var trailerBytes = new Uint8Array([116, 114, 97, 105, 108, 101, 114]);
|
|
|
|
var startxrefBytes = new Uint8Array([115, 116, 97, 114, 116, 120, 114,
|
|
|
|
101, 102]);
|
|
|
|
var endobjBytes = new Uint8Array([101, 110, 100, 111, 98, 106]);
|
|
|
|
var xrefBytes = new Uint8Array([47, 88, 82, 101, 102]);
|
|
|
|
|
2015-08-21 23:57:08 +09:00
|
|
|
// Clear out any existing entries, since they may be bogus.
|
|
|
|
this.entries.length = 0;
|
|
|
|
|
2011-10-25 08:55:23 +09:00
|
|
|
var stream = this.stream;
|
|
|
|
stream.pos = 0;
|
|
|
|
var buffer = stream.getBytes();
|
|
|
|
var position = stream.start, length = buffer.length;
|
|
|
|
var trailers = [], xrefStms = [];
|
|
|
|
while (position < length) {
|
|
|
|
var ch = buffer[position];
|
2015-08-21 23:57:08 +09:00
|
|
|
if (ch === TAB || ch === LF || ch === CR || ch === SPACE) {
|
2011-10-25 08:55:23 +09:00
|
|
|
++position;
|
|
|
|
continue;
|
|
|
|
}
|
2015-08-21 23:57:08 +09:00
|
|
|
if (ch === PERCENT) { // %-comment
|
2011-10-25 08:55:23 +09:00
|
|
|
do {
|
|
|
|
++position;
|
2013-08-24 02:57:11 +09:00
|
|
|
if (position >= length) {
|
|
|
|
break;
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
ch = buffer[position];
|
2015-08-21 23:57:08 +09:00
|
|
|
} while (ch !== LF && ch !== CR);
|
2011-10-25 08:55:23 +09:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
var token = readToken(buffer, position);
|
|
|
|
var m;
|
2015-08-21 23:57:08 +09:00
|
|
|
if (token.indexOf('xref') === 0 &&
|
|
|
|
(token.length === 4 || /\s/.test(token[4]))) {
|
2011-10-25 08:55:23 +09:00
|
|
|
position += skipUntil(buffer, position, trailerBytes);
|
|
|
|
trailers.push(position);
|
|
|
|
position += skipUntil(buffer, position, startxrefBytes);
|
2015-10-02 19:46:58 +09:00
|
|
|
} else if ((m = objRegExp.exec(token))) {
|
2015-04-04 15:15:31 +09:00
|
|
|
if (typeof this.entries[m[1]] === 'undefined') {
|
|
|
|
this.entries[m[1]] = {
|
2015-07-11 03:18:53 +09:00
|
|
|
offset: position - stream.start,
|
2015-04-04 15:15:31 +09:00
|
|
|
gen: m[2] | 0,
|
|
|
|
uncompressed: true
|
|
|
|
};
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
var contentLength = skipUntil(buffer, position, endobjBytes) + 7;
|
|
|
|
var content = buffer.subarray(position, position + contentLength);
|
|
|
|
|
|
|
|
// checking XRef stream suspect
|
|
|
|
// (it shall have '/XRef' and next char is not a letter)
|
|
|
|
var xrefTagOffset = skipUntil(content, 0, xrefBytes);
|
|
|
|
if (xrefTagOffset < contentLength &&
|
|
|
|
content[xrefTagOffset + 5] < 64) {
|
2015-07-11 03:18:53 +09:00
|
|
|
xrefStms.push(position - stream.start);
|
|
|
|
this.xrefstms[position - stream.start] = 1; // Avoid recursion
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
position += contentLength;
|
2015-10-01 21:46:03 +09:00
|
|
|
} else if (token.indexOf('trailer') === 0 &&
|
|
|
|
(token.length === 7 || /\s/.test(token[7]))) {
|
|
|
|
trailers.push(position);
|
|
|
|
position += skipUntil(buffer, position, startxrefBytes);
|
2014-02-27 21:46:12 +09:00
|
|
|
} else {
|
2011-10-25 08:55:23 +09:00
|
|
|
position += token.length + 1;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
// reading XRef streams
|
2014-04-08 06:42:54 +09:00
|
|
|
var i, ii;
|
|
|
|
for (i = 0, ii = xrefStms.length; i < ii; ++i) {
|
2013-02-07 08:19:29 +09:00
|
|
|
this.startXRefQueue.push(xrefStms[i]);
|
|
|
|
this.readXRef(/* recoveryMode */ true);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
// finding main trailer
|
|
|
|
var dict;
|
2014-04-08 06:42:54 +09:00
|
|
|
for (i = 0, ii = trailers.length; i < ii; ++i) {
|
2011-10-25 08:55:23 +09:00
|
|
|
stream.pos = trailers[i];
|
2014-06-16 23:52:04 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), true, this);
|
2011-10-25 08:55:23 +09:00
|
|
|
var obj = parser.getObj();
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isCmd(obj, 'trailer')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
continue;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// read the trailer dictionary
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isDict(dict = parser.getObj())) {
|
2011-10-25 08:55:23 +09:00
|
|
|
continue;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// taking the first one with 'ID'
|
2014-02-27 21:46:12 +09:00
|
|
|
if (dict.has('ID')) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return dict;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
// no tailer with 'ID', taking last one (if exists)
|
2014-02-27 21:46:12 +09:00
|
|
|
if (dict) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return dict;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
// nothing helps
|
2012-10-16 19:10:37 +09:00
|
|
|
// calling error() would reject worker with an UnknownErrorException.
|
|
|
|
throw new InvalidPDFException('Invalid PDF structure');
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
readXRef: function XRef_readXRef(recoveryMode) {
|
2011-10-25 08:55:23 +09:00
|
|
|
var stream = this.stream;
|
2011-12-03 06:35:18 +09:00
|
|
|
|
2011-12-03 06:31:29 +09:00
|
|
|
try {
|
2013-02-07 08:19:29 +09:00
|
|
|
while (this.startXRefQueue.length) {
|
|
|
|
var startXRef = this.startXRefQueue[0];
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2014-03-05 06:16:54 +09:00
|
|
|
stream.pos = startXRef + stream.start;
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2014-06-16 23:52:04 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), true, this);
|
2013-02-07 08:19:29 +09:00
|
|
|
var obj = parser.getObj();
|
|
|
|
var dict;
|
|
|
|
|
|
|
|
// Get dictionary
|
|
|
|
if (isCmd(obj, 'xref')) {
|
|
|
|
// Parse end-of-file XRef
|
|
|
|
dict = this.processXRefTable(parser);
|
|
|
|
if (!this.topDict) {
|
|
|
|
this.topDict = dict;
|
2012-02-01 00:49:06 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
// Recursively get other XRefs 'XRefStm', if any
|
|
|
|
obj = dict.get('XRefStm');
|
|
|
|
if (isInt(obj)) {
|
|
|
|
var pos = obj;
|
|
|
|
// ignore previously loaded xref streams
|
|
|
|
// (possible infinite recursion)
|
|
|
|
if (!(pos in this.xrefstms)) {
|
|
|
|
this.xrefstms[pos] = 1;
|
|
|
|
this.startXRefQueue.push(pos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (isInt(obj)) {
|
|
|
|
// Parse in-stream XRef
|
|
|
|
if (!isInt(parser.getObj()) ||
|
|
|
|
!isCmd(parser.getObj(), 'obj') ||
|
|
|
|
!isStream(obj = parser.getObj())) {
|
|
|
|
error('Invalid XRef stream');
|
|
|
|
}
|
|
|
|
dict = this.processXRefStream(obj);
|
|
|
|
if (!this.topDict) {
|
|
|
|
this.topDict = dict;
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!dict) {
|
2013-02-07 08:19:29 +09:00
|
|
|
error('Failed to read XRef stream');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-11-22 23:49:36 +09:00
|
|
|
} else {
|
|
|
|
error('Invalid XRef stream header');
|
2012-02-01 00:49:06 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
|
|
|
|
// Recursively get previous dictionary, if any
|
|
|
|
obj = dict.get('Prev');
|
|
|
|
if (isInt(obj)) {
|
|
|
|
this.startXRefQueue.push(obj);
|
|
|
|
} else if (isRef(obj)) {
|
|
|
|
// The spec says Prev must not be a reference, i.e. "/Prev NNN"
|
|
|
|
// This is a fallback for non-compliant PDFs, i.e. "/Prev NNN 0 R"
|
|
|
|
this.startXRefQueue.push(obj.num);
|
2011-12-03 06:31:29 +09:00
|
|
|
}
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
this.startXRefQueue.shift();
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2012-02-01 00:49:06 +09:00
|
|
|
|
2013-02-07 08:19:29 +09:00
|
|
|
return this.topDict;
|
2011-12-03 06:35:18 +09:00
|
|
|
} catch (e) {
|
2013-02-07 08:19:29 +09:00
|
|
|
if (e instanceof MissingDataException) {
|
|
|
|
throw e;
|
|
|
|
}
|
2014-01-16 06:28:31 +09:00
|
|
|
info('(while reading XRef): ' + e);
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2011-12-03 06:35:18 +09:00
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
if (recoveryMode) {
|
2012-04-24 12:14:58 +09:00
|
|
|
return;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-02-07 08:19:29 +09:00
|
|
|
throw new XRefParseException();
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2013-02-07 08:19:29 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
getEntry: function XRef_getEntry(i) {
|
2014-02-27 21:46:12 +09:00
|
|
|
var xrefEntry = this.entries[i];
|
2014-04-12 19:05:12 +09:00
|
|
|
if (xrefEntry && !xrefEntry.free && xrefEntry.offset) {
|
2014-02-27 21:46:12 +09:00
|
|
|
return xrefEntry;
|
|
|
|
}
|
|
|
|
return null;
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
fetchIfRef: function XRef_fetchIfRef(obj) {
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isRef(obj)) {
|
2011-10-25 08:55:23 +09:00
|
|
|
return obj;
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
return this.fetch(obj);
|
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
fetch: function XRef_fetch(ref, suppressEncryption) {
|
2014-04-13 23:02:56 +09:00
|
|
|
assert(isRef(ref), 'ref object is not a reference');
|
2011-10-25 08:55:23 +09:00
|
|
|
var num = ref.num;
|
2013-04-09 07:14:56 +09:00
|
|
|
if (num in this.cache) {
|
2014-02-27 21:46:12 +09:00
|
|
|
var cacheEntry = this.cache[num];
|
|
|
|
return cacheEntry;
|
2013-04-09 07:14:56 +09:00
|
|
|
}
|
2012-01-09 05:03:00 +09:00
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
var xrefEntry = this.getEntry(num);
|
2012-01-09 05:03:00 +09:00
|
|
|
|
|
|
|
// the referenced entry can be free
|
2014-02-27 21:46:12 +09:00
|
|
|
if (xrefEntry === null) {
|
|
|
|
return (this.cache[num] = null);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xrefEntry.uncompressed) {
|
2014-03-26 23:07:38 +09:00
|
|
|
xrefEntry = this.fetchUncompressed(ref, xrefEntry, suppressEncryption);
|
2014-02-27 21:46:12 +09:00
|
|
|
} else {
|
2014-03-26 23:07:38 +09:00
|
|
|
xrefEntry = this.fetchCompressed(xrefEntry, suppressEncryption);
|
|
|
|
}
|
2014-06-10 18:29:25 +09:00
|
|
|
if (isDict(xrefEntry)){
|
2014-06-19 12:41:33 +09:00
|
|
|
xrefEntry.objId = ref.toString();
|
2014-06-10 18:29:25 +09:00
|
|
|
} else if (isStream(xrefEntry)) {
|
2014-06-19 12:41:33 +09:00
|
|
|
xrefEntry.dict.objId = ref.toString();
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2014-03-26 23:07:38 +09:00
|
|
|
return xrefEntry;
|
2014-02-27 21:46:12 +09:00
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2014-03-21 04:28:22 +09:00
|
|
|
fetchUncompressed: function XRef_fetchUncompressed(ref, xrefEntry,
|
2014-02-27 21:46:12 +09:00
|
|
|
suppressEncryption) {
|
2011-10-25 08:55:23 +09:00
|
|
|
var gen = ref.gen;
|
2014-02-27 21:46:12 +09:00
|
|
|
var num = ref.num;
|
|
|
|
if (xrefEntry.gen !== gen) {
|
|
|
|
error('inconsistent generation in XRef');
|
|
|
|
}
|
2014-03-05 06:16:54 +09:00
|
|
|
var stream = this.stream.makeSubStream(xrefEntry.offset +
|
|
|
|
this.stream.start);
|
2014-02-27 21:46:12 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), true, this);
|
|
|
|
var obj1 = parser.getObj();
|
|
|
|
var obj2 = parser.getObj();
|
|
|
|
var obj3 = parser.getObj();
|
|
|
|
if (!isInt(obj1) || parseInt(obj1, 10) !== num ||
|
|
|
|
!isInt(obj2) || parseInt(obj2, 10) !== gen ||
|
|
|
|
!isCmd(obj3)) {
|
|
|
|
error('bad XRef entry');
|
|
|
|
}
|
|
|
|
if (!isCmd(obj3, 'obj')) {
|
2014-03-21 04:28:22 +09:00
|
|
|
// some bad PDFs use "obj1234" and really mean 1234
|
2014-02-27 21:46:12 +09:00
|
|
|
if (obj3.cmd.indexOf('obj') === 0) {
|
|
|
|
num = parseInt(obj3.cmd.substring(3), 10);
|
|
|
|
if (!isNaN(num)) {
|
|
|
|
return num;
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
error('bad XRef entry');
|
|
|
|
}
|
|
|
|
if (this.encrypt && !suppressEncryption) {
|
2014-06-19 08:30:27 +09:00
|
|
|
xrefEntry = parser.getObj(this.encrypt.createCipherTransform(num, gen));
|
2014-02-27 21:46:12 +09:00
|
|
|
} else {
|
|
|
|
xrefEntry = parser.getObj();
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
if (!isStream(xrefEntry)) {
|
|
|
|
this.cache[num] = xrefEntry;
|
|
|
|
}
|
|
|
|
return xrefEntry;
|
|
|
|
},
|
2011-10-25 08:55:23 +09:00
|
|
|
|
2014-02-27 21:46:12 +09:00
|
|
|
fetchCompressed: function XRef_fetchCompressed(xrefEntry,
|
|
|
|
suppressEncryption) {
|
|
|
|
var tableOffset = xrefEntry.offset;
|
|
|
|
var stream = this.fetch(new Ref(tableOffset, 0));
|
|
|
|
if (!isStream(stream)) {
|
2011-10-25 08:55:23 +09:00
|
|
|
error('bad ObjStm stream');
|
2014-02-27 21:46:12 +09:00
|
|
|
}
|
2013-05-10 12:26:28 +09:00
|
|
|
var first = stream.dict.get('First');
|
|
|
|
var n = stream.dict.get('N');
|
2011-10-25 08:55:23 +09:00
|
|
|
if (!isInt(first) || !isInt(n)) {
|
|
|
|
error('invalid first and n parameters for ObjStm stream');
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
var parser = new Parser(new Lexer(stream), false, this);
|
2012-11-02 22:26:45 +09:00
|
|
|
parser.allowStreams = true;
|
2014-02-27 21:46:12 +09:00
|
|
|
var i, entries = [], num, nums = [];
|
2011-10-25 08:55:23 +09:00
|
|
|
// read the object numbers to populate cache
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
num = parser.getObj();
|
|
|
|
if (!isInt(num)) {
|
|
|
|
error('invalid object number in the ObjStm stream: ' + num);
|
|
|
|
}
|
|
|
|
nums.push(num);
|
|
|
|
var offset = parser.getObj();
|
|
|
|
if (!isInt(offset)) {
|
|
|
|
error('invalid object offset in the ObjStm stream: ' + offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// read stream objects for cache
|
|
|
|
for (i = 0; i < n; ++i) {
|
|
|
|
entries.push(parser.getObj());
|
2016-05-09 01:28:18 +09:00
|
|
|
// The ObjStm should not contain 'endobj'. If it's present, skip over it
|
|
|
|
// to support corrupt PDFs (fixes issue 5241, bug 898610, bug 1037816).
|
|
|
|
if (isCmd(parser.buf1, 'endobj')) {
|
|
|
|
parser.shift();
|
|
|
|
}
|
2012-11-04 13:03:52 +09:00
|
|
|
num = nums[i];
|
|
|
|
var entry = this.entries[num];
|
|
|
|
if (entry && entry.offset === tableOffset && entry.gen === i) {
|
|
|
|
this.cache[num] = entries[i];
|
|
|
|
}
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
xrefEntry = entries[xrefEntry.gen];
|
|
|
|
if (xrefEntry === undefined) {
|
2011-10-25 08:55:23 +09:00
|
|
|
error('bad XRef entry for compressed object');
|
|
|
|
}
|
2014-02-27 21:46:12 +09:00
|
|
|
return xrefEntry;
|
2011-10-25 08:55:23 +09:00
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2013-06-05 09:57:52 +09:00
|
|
|
fetchIfRefAsync: function XRef_fetchIfRefAsync(obj) {
|
|
|
|
if (!isRef(obj)) {
|
2014-05-01 22:27:31 +09:00
|
|
|
return Promise.resolve(obj);
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
return this.fetchAsync(obj);
|
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2013-06-05 09:57:52 +09:00
|
|
|
fetchAsync: function XRef_fetchAsync(ref, suppressEncryption) {
|
2014-08-06 11:22:12 +09:00
|
|
|
var streamManager = this.stream.manager;
|
|
|
|
var xref = this;
|
|
|
|
return new Promise(function tryFetch(resolve, reject) {
|
|
|
|
try {
|
|
|
|
resolve(xref.fetch(ref, suppressEncryption));
|
|
|
|
} catch (e) {
|
|
|
|
if (e instanceof MissingDataException) {
|
2015-10-21 07:45:55 +09:00
|
|
|
streamManager.requestRange(e.begin, e.end).then(function () {
|
2014-08-06 11:22:12 +09:00
|
|
|
tryFetch(resolve, reject);
|
2015-10-21 07:45:55 +09:00
|
|
|
}, reject);
|
2014-08-06 11:22:12 +09:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
reject(e);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
},
|
2014-02-27 21:46:12 +09:00
|
|
|
|
2012-04-05 05:43:26 +09:00
|
|
|
getCatalogObj: function XRef_getCatalogObj() {
|
2012-04-06 00:12:48 +09:00
|
|
|
return this.root;
|
2011-10-25 08:55:23 +09:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-12-07 07:18:40 +09:00
|
|
|
return XRef;
|
2011-10-25 08:55:23 +09:00
|
|
|
})();
|
|
|
|
|
2013-03-01 08:29:07 +09:00
|
|
|
/**
|
2015-12-26 01:35:21 +09:00
|
|
|
* A NameTree/NumberTree is like a Dict but has some advantageous properties,
|
|
|
|
* see the specification (7.9.6 and 7.9.7) for additional details.
|
|
|
|
* TODO: implement all the Dict functions and make this more efficient.
|
2013-03-01 08:29:07 +09:00
|
|
|
*/
|
2015-12-26 01:35:21 +09:00
|
|
|
var NameOrNumberTree = (function NameOrNumberTreeClosure() {
|
|
|
|
function NameOrNumberTree(root, xref) {
|
|
|
|
throw new Error('Cannot initialize NameOrNumberTree.');
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
NameOrNumberTree.prototype = {
|
|
|
|
getAll: function NameOrNumberTree_getAll() {
|
2016-01-28 02:04:13 +09:00
|
|
|
var dict = Object.create(null);
|
2013-03-01 08:29:07 +09:00
|
|
|
if (!this.root) {
|
|
|
|
return dict;
|
|
|
|
}
|
|
|
|
var xref = this.xref;
|
2015-12-26 01:35:21 +09:00
|
|
|
// Reading Name/Number tree.
|
2013-03-01 08:29:07 +09:00
|
|
|
var processed = new RefSet();
|
|
|
|
processed.put(this.root);
|
|
|
|
var queue = [this.root];
|
|
|
|
while (queue.length > 0) {
|
|
|
|
var i, n;
|
2013-03-02 23:00:17 +09:00
|
|
|
var obj = xref.fetchIfRef(queue.shift());
|
2013-03-19 22:36:12 +09:00
|
|
|
if (!isDict(obj)) {
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-01 08:29:07 +09:00
|
|
|
if (obj.has('Kids')) {
|
|
|
|
var kids = obj.get('Kids');
|
|
|
|
for (i = 0, n = kids.length; i < n; i++) {
|
|
|
|
var kid = kids[i];
|
2015-12-26 01:35:21 +09:00
|
|
|
assert(!processed.has(kid),
|
|
|
|
'Duplicate entry in "' + this._type + '" tree.');
|
2013-03-01 08:29:07 +09:00
|
|
|
queue.push(kid);
|
|
|
|
processed.put(kid);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2015-12-26 01:35:21 +09:00
|
|
|
var entries = obj.get(this._type);
|
|
|
|
if (isArray(entries)) {
|
|
|
|
for (i = 0, n = entries.length; i < n; i += 2) {
|
|
|
|
dict[xref.fetchIfRef(entries[i])] = xref.fetchIfRef(entries[i + 1]);
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dict;
|
2014-10-06 00:34:49 +09:00
|
|
|
},
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
get: function NameOrNumberTree_get(key) {
|
2014-10-06 00:34:49 +09:00
|
|
|
if (!this.root) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
var xref = this.xref;
|
2015-12-26 01:35:21 +09:00
|
|
|
var kidsOrEntries = xref.fetchIfRef(this.root);
|
2014-10-06 00:34:49 +09:00
|
|
|
var loopCount = 0;
|
2015-12-26 01:35:21 +09:00
|
|
|
var MAX_LEVELS = 10;
|
2014-10-06 00:34:49 +09:00
|
|
|
var l, r, m;
|
|
|
|
|
|
|
|
// Perform a binary search to quickly find the entry that
|
2015-12-26 01:35:21 +09:00
|
|
|
// contains the key we are looking for.
|
|
|
|
while (kidsOrEntries.has('Kids')) {
|
|
|
|
if (++loopCount > MAX_LEVELS) {
|
|
|
|
warn('Search depth limit reached for "' + this._type + '" tree.');
|
2014-10-06 00:34:49 +09:00
|
|
|
return null;
|
|
|
|
}
|
2015-02-03 00:12:52 +09:00
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
var kids = kidsOrEntries.get('Kids');
|
2014-10-06 00:34:49 +09:00
|
|
|
if (!isArray(kids)) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
l = 0;
|
|
|
|
r = kids.length - 1;
|
|
|
|
while (l <= r) {
|
|
|
|
m = (l + r) >> 1;
|
|
|
|
var kid = xref.fetchIfRef(kids[m]);
|
|
|
|
var limits = kid.get('Limits');
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
if (key < xref.fetchIfRef(limits[0])) {
|
2014-10-06 00:34:49 +09:00
|
|
|
r = m - 1;
|
2015-12-26 01:35:21 +09:00
|
|
|
} else if (key > xref.fetchIfRef(limits[1])) {
|
2014-10-06 00:34:49 +09:00
|
|
|
l = m + 1;
|
|
|
|
} else {
|
2015-12-26 01:35:21 +09:00
|
|
|
kidsOrEntries = xref.fetchIfRef(kids[m]);
|
2014-10-06 00:34:49 +09:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (l > r) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
// If we get here, then we have found the right entry. Now go through the
|
|
|
|
// entries in the dictionary until we find the key we're looking for.
|
|
|
|
var entries = kidsOrEntries.get(this._type);
|
|
|
|
if (isArray(entries)) {
|
2014-10-06 00:34:49 +09:00
|
|
|
// Perform a binary search to reduce the lookup time.
|
|
|
|
l = 0;
|
2015-12-26 01:35:21 +09:00
|
|
|
r = entries.length - 2;
|
2014-10-06 00:34:49 +09:00
|
|
|
while (l <= r) {
|
|
|
|
// Check only even indices (0, 2, 4, ...) because the
|
2015-12-26 01:35:21 +09:00
|
|
|
// odd indices contain the actual data.
|
2014-10-06 00:34:49 +09:00
|
|
|
m = (l + r) & ~1;
|
2015-12-26 01:35:21 +09:00
|
|
|
var currentKey = xref.fetchIfRef(entries[m]);
|
|
|
|
if (key < currentKey) {
|
2014-10-06 00:34:49 +09:00
|
|
|
r = m - 2;
|
2015-12-26 01:35:21 +09:00
|
|
|
} else if (key > currentKey) {
|
2014-10-06 00:34:49 +09:00
|
|
|
l = m + 2;
|
|
|
|
} else {
|
2015-12-26 01:35:21 +09:00
|
|
|
return xref.fetchIfRef(entries[m + 1]);
|
2014-10-06 00:34:49 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return null;
|
2013-03-01 08:29:07 +09:00
|
|
|
}
|
|
|
|
};
|
2015-12-26 01:35:21 +09:00
|
|
|
return NameOrNumberTree;
|
|
|
|
})();
|
|
|
|
|
|
|
|
var NameTree = (function NameTreeClosure() {
|
|
|
|
function NameTree(root, xref) {
|
|
|
|
this.root = root;
|
|
|
|
this.xref = xref;
|
|
|
|
this._type = 'Names';
|
|
|
|
}
|
|
|
|
|
|
|
|
Util.inherit(NameTree, NameOrNumberTree, {});
|
|
|
|
|
2013-03-01 08:29:07 +09:00
|
|
|
return NameTree;
|
|
|
|
})();
|
|
|
|
|
2015-12-26 01:35:21 +09:00
|
|
|
var NumberTree = (function NumberTreeClosure() {
|
|
|
|
function NumberTree(root, xref) {
|
|
|
|
this.root = root;
|
|
|
|
this.xref = xref;
|
|
|
|
this._type = 'Nums';
|
|
|
|
}
|
|
|
|
|
|
|
|
Util.inherit(NumberTree, NameOrNumberTree, {});
|
|
|
|
|
|
|
|
return NumberTree;
|
|
|
|
})();
|
|
|
|
|
2014-03-19 05:32:47 +09:00
|
|
|
/**
|
2015-02-03 00:12:52 +09:00
|
|
|
* "A PDF file can refer to the contents of another file by using a File
|
2014-03-19 05:32:47 +09:00
|
|
|
* Specification (PDF 1.1)", see the spec (7.11) for more details.
|
|
|
|
* NOTE: Only embedded files are supported (as part of the attachments support)
|
2015-02-03 00:12:52 +09:00
|
|
|
* TODO: support the 'URL' file system (with caching if !/V), portable
|
2014-03-19 05:32:47 +09:00
|
|
|
* collections attributes and related files (/RF)
|
|
|
|
*/
|
|
|
|
var FileSpec = (function FileSpecClosure() {
|
|
|
|
function FileSpec(root, xref) {
|
|
|
|
if (!root || !isDict(root)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
this.xref = xref;
|
|
|
|
this.root = root;
|
|
|
|
if (root.has('FS')) {
|
|
|
|
this.fs = root.get('FS');
|
|
|
|
}
|
|
|
|
this.description = root.has('Desc') ?
|
|
|
|
stringToPDFString(root.get('Desc')) :
|
|
|
|
'';
|
|
|
|
if (root.has('RF')) {
|
|
|
|
warn('Related file specifications are not supported');
|
|
|
|
}
|
|
|
|
this.contentAvailable = true;
|
|
|
|
if (!root.has('EF')) {
|
|
|
|
this.contentAvailable = false;
|
|
|
|
warn('Non-embedded file specifications are not supported');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function pickPlatformItem(dict) {
|
|
|
|
// Look for the filename in this order:
|
|
|
|
// UF, F, Unix, Mac, DOS
|
|
|
|
if (dict.has('UF')) {
|
|
|
|
return dict.get('UF');
|
|
|
|
} else if (dict.has('F')) {
|
|
|
|
return dict.get('F');
|
|
|
|
} else if (dict.has('Unix')) {
|
|
|
|
return dict.get('Unix');
|
|
|
|
} else if (dict.has('Mac')) {
|
|
|
|
return dict.get('Mac');
|
|
|
|
} else if (dict.has('DOS')) {
|
|
|
|
return dict.get('DOS');
|
|
|
|
} else {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FileSpec.prototype = {
|
|
|
|
get filename() {
|
|
|
|
if (!this._filename && this.root) {
|
|
|
|
var filename = pickPlatformItem(this.root) || 'unnamed';
|
|
|
|
this._filename = stringToPDFString(filename).
|
|
|
|
replace(/\\\\/g, '\\').
|
|
|
|
replace(/\\\//g, '/').
|
|
|
|
replace(/\\/g, '/');
|
|
|
|
}
|
|
|
|
return this._filename;
|
|
|
|
},
|
|
|
|
get content() {
|
|
|
|
if (!this.contentAvailable) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
if (!this.contentRef && this.root) {
|
|
|
|
this.contentRef = pickPlatformItem(this.root.get('EF'));
|
|
|
|
}
|
|
|
|
var content = null;
|
|
|
|
if (this.contentRef) {
|
|
|
|
var xref = this.xref;
|
|
|
|
var fileObj = xref.fetchIfRef(this.contentRef);
|
|
|
|
if (fileObj && isStream(fileObj)) {
|
|
|
|
content = fileObj.getBytes();
|
|
|
|
} else {
|
|
|
|
warn('Embedded file specification points to non-existing/invalid ' +
|
|
|
|
'content');
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
warn('Embedded file specification does not have a content');
|
|
|
|
}
|
|
|
|
return content;
|
|
|
|
},
|
|
|
|
get serializable() {
|
|
|
|
return {
|
|
|
|
filename: this.filename,
|
|
|
|
content: this.content
|
|
|
|
};
|
|
|
|
}
|
|
|
|
};
|
|
|
|
return FileSpec;
|
|
|
|
})();
|
|
|
|
|
2013-06-05 09:57:52 +09:00
|
|
|
/**
|
|
|
|
* A helper for loading missing data in object graphs. It traverses the graph
|
|
|
|
* depth first and queues up any objects that have missing data. Once it has
|
|
|
|
* has traversed as many objects that are available it attempts to bundle the
|
|
|
|
* missing data requests and then resume from the nodes that weren't ready.
|
|
|
|
*
|
|
|
|
* NOTE: It provides protection from circular references by keeping track of
|
|
|
|
* of loaded references. However, you must be careful not to load any graphs
|
|
|
|
* that have references to the catalog or other pages since that will cause the
|
|
|
|
* entire PDF document object graph to be traversed.
|
|
|
|
*/
|
|
|
|
var ObjectLoader = (function() {
|
|
|
|
function mayHaveChildren(value) {
|
|
|
|
return isRef(value) || isDict(value) || isArray(value) || isStream(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
function addChildren(node, nodesToVisit) {
|
2014-04-08 06:42:54 +09:00
|
|
|
var value;
|
2013-06-05 09:57:52 +09:00
|
|
|
if (isDict(node) || isStream(node)) {
|
|
|
|
var map;
|
|
|
|
if (isDict(node)) {
|
|
|
|
map = node.map;
|
|
|
|
} else {
|
|
|
|
map = node.dict.map;
|
|
|
|
}
|
|
|
|
for (var key in map) {
|
2014-04-08 06:42:54 +09:00
|
|
|
value = map[key];
|
2013-06-05 09:57:52 +09:00
|
|
|
if (mayHaveChildren(value)) {
|
|
|
|
nodesToVisit.push(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (isArray(node)) {
|
|
|
|
for (var i = 0, ii = node.length; i < ii; i++) {
|
2014-04-08 06:42:54 +09:00
|
|
|
value = node[i];
|
2013-06-05 09:57:52 +09:00
|
|
|
if (mayHaveChildren(value)) {
|
|
|
|
nodesToVisit.push(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function ObjectLoader(obj, keys, xref) {
|
|
|
|
this.obj = obj;
|
|
|
|
this.keys = keys;
|
|
|
|
this.xref = xref;
|
|
|
|
this.refSet = null;
|
2015-10-21 07:45:55 +09:00
|
|
|
this.capability = null;
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
ObjectLoader.prototype = {
|
|
|
|
load: function ObjectLoader_load() {
|
|
|
|
var keys = this.keys;
|
2014-05-01 22:27:31 +09:00
|
|
|
this.capability = createPromiseCapability();
|
2013-06-05 09:57:52 +09:00
|
|
|
// Don't walk the graph if all the data is already loaded.
|
|
|
|
if (!(this.xref.stream instanceof ChunkedStream) ||
|
|
|
|
this.xref.stream.getMissingChunks().length === 0) {
|
2014-05-01 22:27:31 +09:00
|
|
|
this.capability.resolve();
|
|
|
|
return this.capability.promise;
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
this.refSet = new RefSet();
|
|
|
|
// Setup the initial nodes to visit.
|
|
|
|
var nodesToVisit = [];
|
|
|
|
for (var i = 0; i < keys.length; i++) {
|
|
|
|
nodesToVisit.push(this.obj[keys[i]]);
|
|
|
|
}
|
|
|
|
|
2015-10-21 07:45:55 +09:00
|
|
|
this._walk(nodesToVisit);
|
2014-05-01 22:27:31 +09:00
|
|
|
return this.capability.promise;
|
2013-06-05 09:57:52 +09:00
|
|
|
},
|
|
|
|
|
2015-10-21 07:45:55 +09:00
|
|
|
_walk: function ObjectLoader_walk(nodesToVisit) {
|
2013-06-05 09:57:52 +09:00
|
|
|
var nodesToRevisit = [];
|
|
|
|
var pendingRequests = [];
|
|
|
|
// DFS walk of the object graph.
|
|
|
|
while (nodesToVisit.length) {
|
|
|
|
var currentNode = nodesToVisit.pop();
|
|
|
|
|
|
|
|
// Only references or chunked streams can cause missing data exceptions.
|
|
|
|
if (isRef(currentNode)) {
|
|
|
|
// Skip nodes that have already been visited.
|
|
|
|
if (this.refSet.has(currentNode)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
try {
|
|
|
|
var ref = currentNode;
|
|
|
|
this.refSet.put(ref);
|
|
|
|
currentNode = this.xref.fetch(currentNode);
|
|
|
|
} catch (e) {
|
|
|
|
if (!(e instanceof MissingDataException)) {
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
nodesToRevisit.push(currentNode);
|
|
|
|
pendingRequests.push({ begin: e.begin, end: e.end });
|
|
|
|
}
|
|
|
|
}
|
2013-07-04 06:29:38 +09:00
|
|
|
if (currentNode && currentNode.getBaseStreams) {
|
|
|
|
var baseStreams = currentNode.getBaseStreams();
|
|
|
|
var foundMissingData = false;
|
|
|
|
for (var i = 0; i < baseStreams.length; i++) {
|
|
|
|
var stream = baseStreams[i];
|
|
|
|
if (stream.getMissingChunks && stream.getMissingChunks().length) {
|
|
|
|
foundMissingData = true;
|
|
|
|
pendingRequests.push({
|
|
|
|
begin: stream.start,
|
|
|
|
end: stream.end
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (foundMissingData) {
|
|
|
|
nodesToRevisit.push(currentNode);
|
|
|
|
}
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
addChildren(currentNode, nodesToVisit);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pendingRequests.length) {
|
2015-10-21 07:45:55 +09:00
|
|
|
this.xref.stream.manager.requestRanges(pendingRequests).then(
|
2013-06-05 09:57:52 +09:00
|
|
|
function pendingRequestCallback() {
|
|
|
|
nodesToVisit = nodesToRevisit;
|
|
|
|
for (var i = 0; i < nodesToRevisit.length; i++) {
|
|
|
|
var node = nodesToRevisit[i];
|
|
|
|
// Remove any reference nodes from the currrent refset so they
|
|
|
|
// aren't skipped when we revist them.
|
|
|
|
if (isRef(node)) {
|
|
|
|
this.refSet.remove(node);
|
|
|
|
}
|
|
|
|
}
|
2015-10-21 07:45:55 +09:00
|
|
|
this._walk(nodesToVisit);
|
|
|
|
}.bind(this), this.capability.reject);
|
2013-06-05 09:57:52 +09:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Everything is loaded.
|
|
|
|
this.refSet = null;
|
2014-05-01 22:27:31 +09:00
|
|
|
this.capability.resolve();
|
2013-06-05 09:57:52 +09:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
return ObjectLoader;
|
|
|
|
})();
|
2015-11-22 01:32:47 +09:00
|
|
|
|
|
|
|
exports.Catalog = Catalog;
|
|
|
|
exports.ObjectLoader = ObjectLoader;
|
|
|
|
exports.XRef = XRef;
|
2016-02-15 04:44:00 +09:00
|
|
|
exports.FileSpec = FileSpec;
|
2015-11-22 01:32:47 +09:00
|
|
|
}));
|