Convert src/core/type1_font.js to use standard classes

This commit is contained in:
Jonas Jenwald 2021-05-02 16:54:45 +02:00
parent 4bd69556ab
commit f64b7922b3

View File

@ -30,129 +30,130 @@ import { Stream } from "./stream.js";
import { Type1Parser } from "./type1_parser.js"; import { Type1Parser } from "./type1_parser.js";
import { warn } from "../shared/util.js"; import { warn } from "../shared/util.js";
// Type1Font is also a CIDFontType0. function findBlock(streamBytes, signature, startIndex) {
const Type1Font = (function Type1FontClosure() { const streamBytesLength = streamBytes.length;
function findBlock(streamBytes, signature, startIndex) { const signatureLength = signature.length;
const streamBytesLength = streamBytes.length; const scanLength = streamBytesLength - signatureLength;
const signatureLength = signature.length;
const scanLength = streamBytesLength - signatureLength;
let i = startIndex, let i = startIndex,
found = false; found = false;
while (i < scanLength) { while (i < scanLength) {
let j = 0; let j = 0;
while (j < signatureLength && streamBytes[i + j] === signature[j]) { while (j < signatureLength && streamBytes[i + j] === signature[j]) {
j++; j++;
}
if (j >= signatureLength) {
// `signature` found, skip over whitespace.
i += j;
while (i < streamBytesLength && isWhiteSpace(streamBytes[i])) {
i++;
}
found = true;
break;
}
i++;
} }
return { if (j >= signatureLength) {
found, // `signature` found, skip over whitespace.
length: i, i += j;
}; while (i < streamBytesLength && isWhiteSpace(streamBytes[i])) {
i++;
}
found = true;
break;
}
i++;
}
return {
found,
length: i,
};
}
function getHeaderBlock(stream, suggestedLength) {
const EEXEC_SIGNATURE = [0x65, 0x65, 0x78, 0x65, 0x63];
const streamStartPos = stream.pos; // Save the initial stream position.
let headerBytes, headerBytesLength, block;
try {
headerBytes = stream.getBytes(suggestedLength);
headerBytesLength = headerBytes.length;
} catch (ex) {
if (ex instanceof MissingDataException) {
throw ex;
}
// Ignore errors if the `suggestedLength` is huge enough that a Uint8Array
// cannot hold the result of `getBytes`, and fallback to simply checking
// the entire stream (fixes issue3928.pdf).
} }
function getHeaderBlock(stream, suggestedLength) { if (headerBytesLength === suggestedLength) {
const EEXEC_SIGNATURE = [0x65, 0x65, 0x78, 0x65, 0x63]; // Most of the time `suggestedLength` is correct, so to speed things up we
// initially only check the last few bytes to see if the header was found.
// Otherwise we (potentially) check the entire stream to prevent errors in
// `Type1Parser` (fixes issue5686.pdf).
block = findBlock(
headerBytes,
EEXEC_SIGNATURE,
suggestedLength - 2 * EEXEC_SIGNATURE.length
);
const streamStartPos = stream.pos; // Save the initial stream position. if (block.found && block.length === suggestedLength) {
let headerBytes, headerBytesLength, block;
try {
headerBytes = stream.getBytes(suggestedLength);
headerBytesLength = headerBytes.length;
} catch (ex) {
if (ex instanceof MissingDataException) {
throw ex;
}
// Ignore errors if the `suggestedLength` is huge enough that a Uint8Array
// cannot hold the result of `getBytes`, and fallback to simply checking
// the entire stream (fixes issue3928.pdf).
}
if (headerBytesLength === suggestedLength) {
// Most of the time `suggestedLength` is correct, so to speed things up we
// initially only check the last few bytes to see if the header was found.
// Otherwise we (potentially) check the entire stream to prevent errors in
// `Type1Parser` (fixes issue5686.pdf).
block = findBlock(
headerBytes,
EEXEC_SIGNATURE,
suggestedLength - 2 * EEXEC_SIGNATURE.length
);
if (block.found && block.length === suggestedLength) {
return {
stream: new Stream(headerBytes),
length: suggestedLength,
};
}
}
warn('Invalid "Length1" property in Type1 font -- trying to recover.');
stream.pos = streamStartPos; // Reset the stream position.
const SCAN_BLOCK_LENGTH = 2048;
let actualLength;
while (true) {
const scanBytes = stream.peekBytes(SCAN_BLOCK_LENGTH);
block = findBlock(scanBytes, EEXEC_SIGNATURE, 0);
if (block.length === 0) {
break;
}
stream.pos += block.length; // Update the stream position.
if (block.found) {
actualLength = stream.pos - streamStartPos;
break;
}
}
stream.pos = streamStartPos; // Reset the stream position.
if (actualLength) {
return { return {
stream: new Stream(stream.getBytes(actualLength)), stream: new Stream(headerBytes),
length: actualLength, length: suggestedLength,
}; };
} }
warn('Unable to recover "Length1" property in Type1 font -- using as is.'); }
warn('Invalid "Length1" property in Type1 font -- trying to recover.');
stream.pos = streamStartPos; // Reset the stream position.
const SCAN_BLOCK_LENGTH = 2048;
let actualLength;
while (true) {
const scanBytes = stream.peekBytes(SCAN_BLOCK_LENGTH);
block = findBlock(scanBytes, EEXEC_SIGNATURE, 0);
if (block.length === 0) {
break;
}
stream.pos += block.length; // Update the stream position.
if (block.found) {
actualLength = stream.pos - streamStartPos;
break;
}
}
stream.pos = streamStartPos; // Reset the stream position.
if (actualLength) {
return { return {
stream: new Stream(stream.getBytes(suggestedLength)), stream: new Stream(stream.getBytes(actualLength)),
length: suggestedLength, length: actualLength,
}; };
} }
warn('Unable to recover "Length1" property in Type1 font -- using as is.');
return {
stream: new Stream(stream.getBytes(suggestedLength)),
length: suggestedLength,
};
}
function getEexecBlock(stream, suggestedLength) { function getEexecBlock(stream, suggestedLength) {
// We should ideally parse the eexec block to ensure that `suggestedLength` // We should ideally parse the eexec block to ensure that `suggestedLength`
// is correct, so we don't truncate the block data if it's too small. // is correct, so we don't truncate the block data if it's too small.
// However, this would also require checking if the fixed-content portion // However, this would also require checking if the fixed-content portion
// exists (using the 'Length3' property), and ensuring that it's valid. // exists (using the 'Length3' property), and ensuring that it's valid.
// //
// Given that `suggestedLength` almost always is correct, all the validation // Given that `suggestedLength` almost always is correct, all the validation
// would require a great deal of unnecessary parsing for most fonts. // would require a great deal of unnecessary parsing for most fonts.
// To save time, we always fetch the entire stream instead, which also avoid // To save time, we always fetch the entire stream instead, which also avoid
// issues if `suggestedLength` is huge (see comment in `getHeaderBlock`). // issues if `suggestedLength` is huge (see comment in `getHeaderBlock`).
// //
// NOTE: This means that the function can include the fixed-content portion // NOTE: This means that the function can include the fixed-content portion
// in the returned eexec block. In practice this does *not* seem to matter, // in the returned eexec block. In practice this does *not* seem to matter,
// since `Type1Parser_extractFontProgram` will skip over any non-commands. // since `Type1Parser_extractFontProgram` will skip over any non-commands.
const eexecBytes = stream.getBytes(); const eexecBytes = stream.getBytes();
return { return {
stream: new Stream(eexecBytes), stream: new Stream(eexecBytes),
length: eexecBytes.length, length: eexecBytes.length,
}; };
} }
// eslint-disable-next-line no-shadow /**
function Type1Font(name, file, properties) { * Type1Font is also a CIDFontType0.
*/
class Type1Font {
constructor(name, file, properties) {
// Some bad generators embed pfb file as is, we have to strip 6-byte header. // Some bad generators embed pfb file as is, we have to strip 6-byte header.
// Also, length1 and length2 might be off by 6 bytes as well. // Also, length1 and length2 might be off by 6 bytes as well.
// http://www.math.ubc.ca/~cass/piscript/type1.pdf // http://www.math.ubc.ca/~cass/piscript/type1.pdf
@ -215,222 +216,209 @@ const Type1Font = (function Type1FontClosure() {
this.seacs = this.getSeacs(data.charstrings); this.seacs = this.getSeacs(data.charstrings);
} }
Type1Font.prototype = { get numGlyphs() {
get numGlyphs() { return this.charstrings.length + 1;
return this.charstrings.length + 1; }
},
getCharset: function Type1Font_getCharset() { getCharset() {
const charset = [".notdef"]; const charset = [".notdef"];
const charstrings = this.charstrings; const charstrings = this.charstrings;
for (let glyphId = 0; glyphId < charstrings.length; glyphId++) { for (let glyphId = 0; glyphId < charstrings.length; glyphId++) {
charset.push(charstrings[glyphId].glyphName); charset.push(charstrings[glyphId].glyphName);
}
return charset;
}
getGlyphMapping(properties) {
const charstrings = this.charstrings;
if (properties.composite) {
const charCodeToGlyphId = Object.create(null);
// Map CIDs directly to GIDs.
for (
let glyphId = 0, charstringsLen = charstrings.length;
glyphId < charstringsLen;
glyphId++
) {
const charCode = properties.cMap.charCodeOf(glyphId);
// Add 1 because glyph 0 is duplicated.
charCodeToGlyphId[charCode] = glyphId + 1;
} }
return charset; return charCodeToGlyphId;
}, }
getGlyphMapping: function Type1Font_getGlyphMapping(properties) { const glyphNames = [".notdef"];
const charstrings = this.charstrings; let builtInEncoding, glyphId;
for (glyphId = 0; glyphId < charstrings.length; glyphId++) {
if (properties.composite) { glyphNames.push(charstrings[glyphId].glyphName);
const charCodeToGlyphId = Object.create(null); }
// Map CIDs directly to GIDs. const encoding = properties.builtInEncoding;
for ( if (encoding) {
let glyphId = 0, charstringsLen = charstrings.length; builtInEncoding = Object.create(null);
glyphId < charstringsLen; for (const charCode in encoding) {
glyphId++ glyphId = glyphNames.indexOf(encoding[charCode]);
) { if (glyphId >= 0) {
const charCode = properties.cMap.charCodeOf(glyphId); builtInEncoding[charCode] = glyphId;
// Add 1 because glyph 0 is duplicated.
charCodeToGlyphId[charCode] = glyphId + 1;
}
return charCodeToGlyphId;
}
const glyphNames = [".notdef"];
let builtInEncoding, glyphId;
for (glyphId = 0; glyphId < charstrings.length; glyphId++) {
glyphNames.push(charstrings[glyphId].glyphName);
}
const encoding = properties.builtInEncoding;
if (encoding) {
builtInEncoding = Object.create(null);
for (const charCode in encoding) {
glyphId = glyphNames.indexOf(encoding[charCode]);
if (glyphId >= 0) {
builtInEncoding[charCode] = glyphId;
}
} }
} }
}
return type1FontGlyphMapping(properties, builtInEncoding, glyphNames); return type1FontGlyphMapping(properties, builtInEncoding, glyphNames);
}, }
hasGlyphId: function Type1Font_hasGlyphID(id) { hasGlyphId(id) {
if (id < 0 || id >= this.numGlyphs) { if (id < 0 || id >= this.numGlyphs) {
return false; return false;
}
if (id === 0) {
// notdef is always defined.
return true;
}
const glyph = this.charstrings[id - 1];
return glyph.charstring.length > 0;
}
getSeacs(charstrings) {
const seacMap = [];
for (let i = 0, ii = charstrings.length; i < ii; i++) {
const charstring = charstrings[i];
if (charstring.seac) {
// Offset by 1 for .notdef
seacMap[i + 1] = charstring.seac;
} }
if (id === 0) { }
// notdef is always defined. return seacMap;
return true; }
}
const glyph = this.charstrings[id - 1];
return glyph.charstring.length > 0;
},
getSeacs: function Type1Font_getSeacs(charstrings) { getType2Charstrings(type1Charstrings) {
let i, ii; const type2Charstrings = [];
const seacMap = []; for (let i = 0, ii = type1Charstrings.length; i < ii; i++) {
for (i = 0, ii = charstrings.length; i < ii; i++) { type2Charstrings.push(type1Charstrings[i].charstring);
const charstring = charstrings[i]; }
if (charstring.seac) { return type2Charstrings;
// Offset by 1 for .notdef }
seacMap[i + 1] = charstring.seac;
getType2Subrs(type1Subrs) {
let bias = 0;
const count = type1Subrs.length;
if (count < 1133) {
bias = 107;
} else if (count < 33769) {
bias = 1131;
} else {
bias = 32768;
}
// Add a bunch of empty subrs to deal with the Type2 bias
const type2Subrs = [];
let i;
for (i = 0; i < bias; i++) {
type2Subrs.push([0x0b]);
}
for (i = 0; i < count; i++) {
type2Subrs.push(type1Subrs[i]);
}
return type2Subrs;
}
wrap(name, glyphs, charstrings, subrs, properties) {
const cff = new CFF();
cff.header = new CFFHeader(1, 0, 4, 4);
cff.names = [name];
const topDict = new CFFTopDict();
// CFF strings IDs 0...390 are predefined names, so refering
// to entries in our own String INDEX starts at SID 391.
topDict.setByName("version", 391);
topDict.setByName("Notice", 392);
topDict.setByName("FullName", 393);
topDict.setByName("FamilyName", 394);
topDict.setByName("Weight", 395);
topDict.setByName("Encoding", null); // placeholder
topDict.setByName("FontMatrix", properties.fontMatrix);
topDict.setByName("FontBBox", properties.bbox);
topDict.setByName("charset", null); // placeholder
topDict.setByName("CharStrings", null); // placeholder
topDict.setByName("Private", null); // placeholder
cff.topDict = topDict;
const strings = new CFFStrings();
strings.add("Version 0.11"); // Version
strings.add("See original notice"); // Notice
strings.add(name); // FullName
strings.add(name); // FamilyName
strings.add("Medium"); // Weight
cff.strings = strings;
cff.globalSubrIndex = new CFFIndex();
const count = glyphs.length;
const charsetArray = [".notdef"];
let i, ii;
for (i = 0; i < count; i++) {
const glyphName = charstrings[i].glyphName;
const index = CFFStandardStrings.indexOf(glyphName);
if (index === -1) {
strings.add(glyphName);
}
charsetArray.push(glyphName);
}
cff.charset = new CFFCharset(false, 0, charsetArray);
const charStringsIndex = new CFFIndex();
charStringsIndex.add([0x8b, 0x0e]); // .notdef
for (i = 0; i < count; i++) {
charStringsIndex.add(glyphs[i]);
}
cff.charStrings = charStringsIndex;
const privateDict = new CFFPrivateDict();
privateDict.setByName("Subrs", null); // placeholder
const fields = [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"StemSnapH",
"StemSnapV",
"BlueShift",
"BlueFuzz",
"BlueScale",
"LanguageGroup",
"ExpansionFactor",
"ForceBold",
"StdHW",
"StdVW",
];
for (i = 0, ii = fields.length; i < ii; i++) {
const field = fields[i];
if (!(field in properties.privateData)) {
continue;
}
const value = properties.privateData[field];
if (Array.isArray(value)) {
// All of the private dictionary array data in CFF must be stored as
// "delta-encoded" numbers.
for (let j = value.length - 1; j > 0; j--) {
value[j] -= value[j - 1]; // ... difference from previous value
} }
} }
return seacMap; privateDict.setByName(field, value);
}, }
cff.topDict.privateDict = privateDict;
getType2Charstrings: function Type1Font_getType2Charstrings( const subrIndex = new CFFIndex();
type1Charstrings for (i = 0, ii = subrs.length; i < ii; i++) {
) { subrIndex.add(subrs[i]);
const type2Charstrings = []; }
for (let i = 0, ii = type1Charstrings.length; i < ii; i++) { privateDict.subrsIndex = subrIndex;
type2Charstrings.push(type1Charstrings[i].charstring);
}
return type2Charstrings;
},
getType2Subrs: function Type1Font_getType2Subrs(type1Subrs) { const compiler = new CFFCompiler(cff);
let bias = 0; return compiler.compile();
const count = type1Subrs.length; }
if (count < 1133) { }
bias = 107;
} else if (count < 33769) {
bias = 1131;
} else {
bias = 32768;
}
// Add a bunch of empty subrs to deal with the Type2 bias
const type2Subrs = [];
let i;
for (i = 0; i < bias; i++) {
type2Subrs.push([0x0b]);
}
for (i = 0; i < count; i++) {
type2Subrs.push(type1Subrs[i]);
}
return type2Subrs;
},
wrap: function Type1Font_wrap(
name,
glyphs,
charstrings,
subrs,
properties
) {
const cff = new CFF();
cff.header = new CFFHeader(1, 0, 4, 4);
cff.names = [name];
const topDict = new CFFTopDict();
// CFF strings IDs 0...390 are predefined names, so refering
// to entries in our own String INDEX starts at SID 391.
topDict.setByName("version", 391);
topDict.setByName("Notice", 392);
topDict.setByName("FullName", 393);
topDict.setByName("FamilyName", 394);
topDict.setByName("Weight", 395);
topDict.setByName("Encoding", null); // placeholder
topDict.setByName("FontMatrix", properties.fontMatrix);
topDict.setByName("FontBBox", properties.bbox);
topDict.setByName("charset", null); // placeholder
topDict.setByName("CharStrings", null); // placeholder
topDict.setByName("Private", null); // placeholder
cff.topDict = topDict;
const strings = new CFFStrings();
strings.add("Version 0.11"); // Version
strings.add("See original notice"); // Notice
strings.add(name); // FullName
strings.add(name); // FamilyName
strings.add("Medium"); // Weight
cff.strings = strings;
cff.globalSubrIndex = new CFFIndex();
const count = glyphs.length;
const charsetArray = [".notdef"];
let i, ii;
for (i = 0; i < count; i++) {
const glyphName = charstrings[i].glyphName;
const index = CFFStandardStrings.indexOf(glyphName);
if (index === -1) {
strings.add(glyphName);
}
charsetArray.push(glyphName);
}
cff.charset = new CFFCharset(false, 0, charsetArray);
const charStringsIndex = new CFFIndex();
charStringsIndex.add([0x8b, 0x0e]); // .notdef
for (i = 0; i < count; i++) {
charStringsIndex.add(glyphs[i]);
}
cff.charStrings = charStringsIndex;
const privateDict = new CFFPrivateDict();
privateDict.setByName("Subrs", null); // placeholder
const fields = [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"StemSnapH",
"StemSnapV",
"BlueShift",
"BlueFuzz",
"BlueScale",
"LanguageGroup",
"ExpansionFactor",
"ForceBold",
"StdHW",
"StdVW",
];
for (i = 0, ii = fields.length; i < ii; i++) {
const field = fields[i];
if (!(field in properties.privateData)) {
continue;
}
const value = properties.privateData[field];
if (Array.isArray(value)) {
// All of the private dictionary array data in CFF must be stored as
// "delta-encoded" numbers.
for (let j = value.length - 1; j > 0; j--) {
value[j] -= value[j - 1]; // ... difference from previous value
}
}
privateDict.setByName(field, value);
}
cff.topDict.privateDict = privateDict;
const subrIndex = new CFFIndex();
for (i = 0, ii = subrs.length; i < ii; i++) {
subrIndex.add(subrs[i]);
}
privateDict.subrsIndex = subrIndex;
const compiler = new CFFCompiler(cff);
return compiler.compile();
},
};
return Type1Font;
})();
export { Type1Font }; export { Type1Font };