Cache the normalized unicode-value on the Glyph-instance

Currently, during text-extraction, we're repeatedly normalizing and (when necessary) reversing the unicode-values every time. This seems a little unnecessary, since the result won't change, hence this patch moves that into the `Glyph`-instance and makes it *lazily* initialized.

Taking the `tracemonkey.pdf` document as an example: When extracting the text-content there's a total of 69236 characters but only 595 unique `Glyph`-instances, which mean a 99.1 percent cache hit-rate. Generally speaking, the longer a PDF document is the more beneficial this should be.

*Please note:* The old code is fast enough that it unfortunately seems difficult to measure a (clear) performance improvement with this patch, so I completely understand if it's deemed an unnecessary change.
This commit is contained in:
Jonas Jenwald 2022-11-03 10:20:18 +01:00
parent eda51d1dcc
commit c33b8d7692
3 changed files with 24 additions and 11 deletions

View File

@ -51,11 +51,6 @@ import {
getStdFontMap,
getSymbolsFonts,
} from "./standard_fonts.js";
import {
getNormalizedUnicodes,
getUnicodeForGlyph,
reverseIfRtl,
} from "./unicode.js";
import { getTilingPatternIR, Pattern } from "./pattern.js";
import { getXfaFontDict, getXfaFontName } from "./xfa_fonts.js";
import { IdentityToUnicodeMap, ToUnicodeMap } from "./to_unicode_map.js";
@ -75,6 +70,7 @@ import { DecodeStream } from "./decode_stream.js";
import { getGlyphsUnicode } from "./glyphlist.js";
import { getLookupTableFactory } from "./core_utils.js";
import { getMetrics } from "./metrics.js";
import { getUnicodeForGlyph } from "./unicode.js";
import { MurmurHash3_64 } from "../shared/murmurhash3.js";
import { OperatorList } from "./operator_list.js";
import { PDFImage } from "./image.js";
@ -2293,7 +2289,6 @@ class PartialEvaluator {
if (includeMarkedContent) {
markedContentData = markedContentData || { level: 0 };
}
const NormalizedUnicodes = getNormalizedUnicodes();
const textContent = {
items: [],
@ -2839,9 +2834,7 @@ class PartialEvaluator {
textChunk.prevTransform = getCurrentTextTransform();
}
let glyphUnicode = glyph.unicode;
glyphUnicode = NormalizedUnicodes[glyphUnicode] || glyphUnicode;
glyphUnicode = reverseIfRtl(glyphUnicode);
const glyphUnicode = glyph.normalizedUnicode;
if (saveLastChar(glyphUnicode)) {
// The two last chars are a non-whitespace followed by a whitespace
// and then this non-whitespace, so we insert a whitespace here.

View File

@ -35,9 +35,11 @@ import {
} from "./fonts_utils.js";
import {
getCharUnicodeCategory,
getNormalizedUnicodes,
getUnicodeForGlyph,
getUnicodeRangeFor,
mapSpecialUnicodeValues,
reverseIfRtl,
} from "./unicode.js";
import { getDingbatsGlyphsUnicode, getGlyphsUnicode } from "./glyphlist.js";
import {
@ -218,6 +220,24 @@ class Glyph {
this.isZeroWidthDiacritic = category.isZeroWidthDiacritic;
this.isInvisibleFormatMark = category.isInvisibleFormatMark;
}
/**
* This property, which is only used by `PartialEvaluator.getTextContent`,
* is purposely made non-serializable.
* @type {string}
*/
get normalizedUnicode() {
return shadow(
this,
"normalizedUnicode",
reverseIfRtl(Glyph._NormalizedUnicodes[this.unicode] || this.unicode),
/* nonSerializable = */ true
);
}
static get _NormalizedUnicodes() {
return shadow(this, "_NormalizedUnicodes", getNormalizedUnicodes());
}
}
function int16(b0, b1) {

View File

@ -498,7 +498,7 @@ function createValidAbsoluteUrl(url, baseUrl = null, options = null) {
return null;
}
function shadow(obj, prop, value) {
function shadow(obj, prop, value, nonSerializable = false) {
if (
typeof PDFJSDev === "undefined" ||
PDFJSDev.test("!PRODUCTION || TESTING")
@ -510,7 +510,7 @@ function shadow(obj, prop, value) {
}
Object.defineProperty(obj, prop, {
value,
enumerable: true,
enumerable: !nonSerializable,
configurable: true,
writable: false,
});