Merge pull request #4181 from nnethercote/compact-images
Reduce memory consumption of simple black and white images.
This commit is contained in:
commit
c5a804c43a
@ -166,7 +166,7 @@ var PartialEvaluator = (function PartialEvaluatorClosure() {
|
||||
(w + h) < SMALL_IMAGE_DIMENSIONS) {
|
||||
var imageObj = new PDFImage(this.xref, resources, image,
|
||||
inline, null, null);
|
||||
var imgData = imageObj.getImageData();
|
||||
var imgData = imageObj.createImageData();
|
||||
operatorList.addOp(OPS.paintInlineImageXObject, [imgData]);
|
||||
return;
|
||||
}
|
||||
@ -189,7 +189,7 @@ var PartialEvaluator = (function PartialEvaluatorClosure() {
|
||||
|
||||
|
||||
PDFImage.buildImage(function(imageObj) {
|
||||
var imgData = imageObj.getImageData();
|
||||
var imgData = imageObj.createImageData();
|
||||
self.handler.send('obj', [objId, self.pageIndex, 'Image', imgData],
|
||||
null, [imgData.data.buffer]);
|
||||
}, self.handler, self.xref, resources, image, inline);
|
||||
@ -1318,7 +1318,8 @@ var PartialEvaluator = (function PartialEvaluatorClosure() {
|
||||
// replacing queue items
|
||||
squash(fnArray, j, count * 4, OPS.paintInlineImageXObjectGroup);
|
||||
argsArray.splice(j, count * 4,
|
||||
[{width: imgWidth, height: imgHeight, data: imgData}, map]);
|
||||
[{width: imgWidth, height: imgHeight, kind: 'rgba_32bpp',
|
||||
data: imgData}, map]);
|
||||
i = j;
|
||||
ii = argsArray.length;
|
||||
}
|
||||
|
@ -267,10 +267,10 @@ var PDFImage = (function PDFImageClosure() {
|
||||
var bpc = this.bpc;
|
||||
|
||||
// This image doesn't require any extra work.
|
||||
if (bpc === 8)
|
||||
if (bpc === 8) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
var bufferLength = buffer.length;
|
||||
var width = this.width;
|
||||
var height = this.height;
|
||||
var numComps = this.numComps;
|
||||
@ -417,7 +417,14 @@ var PDFImage = (function PDFImageClosure() {
|
||||
buffer[i + 2] = clamp((buffer[i + 2] - matteRgb[2]) * k + matteRgb[2]);
|
||||
}
|
||||
},
|
||||
fillRgbaBuffer: function PDFImage_fillRgbaBuffer(buffer, width, height) {
|
||||
createImageData: function PDFImage_createImageData() {
|
||||
var drawWidth = this.drawWidth;
|
||||
var drawHeight = this.drawHeight;
|
||||
var imgData = { // other fields are filled in below
|
||||
width: drawWidth,
|
||||
height: drawHeight,
|
||||
};
|
||||
|
||||
var numComps = this.numComps;
|
||||
var originalWidth = this.width;
|
||||
var originalHeight = this.height;
|
||||
@ -429,21 +436,46 @@ var PDFImage = (function PDFImageClosure() {
|
||||
|
||||
// imgArray can be incomplete (e.g. after CCITT fax encoding)
|
||||
var actualHeight = 0 | (imgArray.length / rowBytes *
|
||||
height / originalHeight);
|
||||
drawHeight / originalHeight);
|
||||
|
||||
// If it is a 1-bit-per-pixel grayscale (i.e. black-and-white) image
|
||||
// without any complications, we pass a same-sized copy to the main
|
||||
// thread rather than expanding by 32x to RGBA form. This saves *lots* of
|
||||
// memory for many scanned documents. It's also much faster.
|
||||
if (this.colorSpace.name === 'DeviceGray' && bpc === 1 &&
|
||||
!this.smask && !this.mask && !this.needsDecode &&
|
||||
drawWidth === originalWidth && drawHeight === originalHeight) {
|
||||
imgData.kind = 'grayscale_1bpp';
|
||||
|
||||
// We must make a copy of imgArray, otherwise it'll be neutered upon
|
||||
// transfer which will break any code that subsequently reuses it.
|
||||
var newArray = new Uint8Array(imgArray.length);
|
||||
newArray.set(imgArray);
|
||||
imgData.data = newArray;
|
||||
imgData.origLength = imgArray.length;
|
||||
return imgData;
|
||||
}
|
||||
|
||||
var comps = this.getComponents(imgArray);
|
||||
|
||||
var rgbaBuf = new Uint8Array(drawWidth * drawHeight * 4);
|
||||
|
||||
// Handle opacity here since color key masking needs to be performed on
|
||||
// undecoded values.
|
||||
this.fillOpacity(buffer, width, height, actualHeight, comps);
|
||||
this.fillOpacity(rgbaBuf, drawWidth, drawHeight, actualHeight, comps);
|
||||
|
||||
if (this.needsDecode) {
|
||||
this.decodeBuffer(comps);
|
||||
}
|
||||
|
||||
this.colorSpace.fillRgb(buffer, originalWidth, originalHeight, width,
|
||||
height, actualHeight, bpc, comps);
|
||||
this.colorSpace.fillRgb(rgbaBuf, originalWidth, originalHeight, drawWidth,
|
||||
drawHeight, actualHeight, bpc, comps);
|
||||
|
||||
this.undoPreblend(buffer, width, actualHeight);
|
||||
this.undoPreblend(rgbaBuf, drawWidth, actualHeight);
|
||||
|
||||
imgData.kind = 'rgba_32bpp';
|
||||
imgData.data = rgbaBuf;
|
||||
return imgData;
|
||||
},
|
||||
fillGrayBuffer: function PDFImage_fillGrayBuffer(buffer) {
|
||||
var numComps = this.numComps;
|
||||
@ -468,18 +500,6 @@ var PDFImage = (function PDFImageClosure() {
|
||||
for (var i = 0; i < length; ++i)
|
||||
buffer[i] = (scale * comps[i]) | 0;
|
||||
},
|
||||
getImageData: function PDFImage_getImageData() {
|
||||
var drawWidth = this.drawWidth;
|
||||
var drawHeight = this.drawHeight;
|
||||
var imgData = {
|
||||
width: drawWidth,
|
||||
height: drawHeight,
|
||||
data: new Uint8Array(drawWidth * drawHeight * 4)
|
||||
};
|
||||
var pixels = imgData.data;
|
||||
this.fillRgbaBuffer(pixels, drawWidth, drawHeight);
|
||||
return imgData;
|
||||
},
|
||||
getImageBytes: function PDFImage_getImageBytes(length) {
|
||||
this.image.reset();
|
||||
return this.image.getBytes(length);
|
||||
|
@ -437,45 +437,81 @@ var CanvasGraphics = (function CanvasGraphicsClosure() {
|
||||
// of putImageData(). (E.g. in Firefox we make two short-lived copies of
|
||||
// the data passed to putImageData()). |n| shouldn't be too small, however,
|
||||
// because too many putImageData() calls will slow things down.
|
||||
//
|
||||
// Note: as written, if the last chunk is partial, the putImageData() call
|
||||
// will (conceptually) put pixels past the bounds of the canvas. But
|
||||
// that's ok; any such pixels are ignored.
|
||||
|
||||
var rowsInFullChunks = 16;
|
||||
var fullChunks = (imgData.height / rowsInFullChunks) | 0;
|
||||
var rowsInLastChunk = imgData.height - fullChunks * rowsInFullChunks;
|
||||
var elemsInFullChunks = imgData.width * rowsInFullChunks * 4;
|
||||
var elemsInLastChunk = imgData.width * rowsInLastChunk * 4;
|
||||
var fullChunkHeight = 16;
|
||||
var fracChunks = imgData.height / fullChunkHeight;
|
||||
var fullChunks = Math.floor(fracChunks);
|
||||
var totalChunks = Math.ceil(fracChunks);
|
||||
var partialChunkHeight = imgData.height - fullChunks * fullChunkHeight;
|
||||
|
||||
var chunkImgData = ctx.createImageData(imgData.width, rowsInFullChunks);
|
||||
var chunkImgData = ctx.createImageData(imgData.width, fullChunkHeight);
|
||||
var srcPos = 0;
|
||||
var src = imgData.data;
|
||||
var dst = chunkImgData.data;
|
||||
var haveSetAndSubarray = 'set' in dst && 'subarray' in src;
|
||||
|
||||
// Do all the full-size chunks.
|
||||
for (var i = 0; i < fullChunks; i++) {
|
||||
if (haveSetAndSubarray) {
|
||||
dst.set(src.subarray(srcPos, srcPos + elemsInFullChunks));
|
||||
srcPos += elemsInFullChunks;
|
||||
} else {
|
||||
for (var j = 0; j < elemsInFullChunks; j++) {
|
||||
chunkImgData.data[j] = imgData.data[srcPos++];
|
||||
}
|
||||
}
|
||||
ctx.putImageData(chunkImgData, 0, i * rowsInFullChunks);
|
||||
}
|
||||
// There are multiple forms in which the pixel data can be passed, and
|
||||
// imgData.kind tells us which one this is.
|
||||
|
||||
// Do the final, partial chunk, if required.
|
||||
if (rowsInLastChunk !== 0) {
|
||||
if (haveSetAndSubarray) {
|
||||
dst.set(src.subarray(srcPos, srcPos + elemsInLastChunk));
|
||||
srcPos += elemsInLastChunk;
|
||||
} else {
|
||||
for (var j = 0; j < elemsInLastChunk; j++) {
|
||||
chunkImgData.data[j] = imgData.data[srcPos++];
|
||||
if (imgData.kind === 'grayscale_1bpp') {
|
||||
// Grayscale, 1 bit per pixel (i.e. black-and-white).
|
||||
var srcData = imgData.data;
|
||||
var destData = chunkImgData.data;
|
||||
var alpha = 255;
|
||||
var origLength = imgData.origLength;
|
||||
for (var i = 0; i < totalChunks; i++) {
|
||||
var thisChunkHeight =
|
||||
(i < fullChunks) ? fullChunkHeight : partialChunkHeight;
|
||||
var destPos = 0;
|
||||
for (var j = 0; j < thisChunkHeight; j++) {
|
||||
var mask = 0;
|
||||
var srcByte = 0;
|
||||
for (var k = 0; k < imgData.width; k++) {
|
||||
if (srcPos >= origLength) {
|
||||
// We ran out of input. Make all remaining pixels transparent.
|
||||
alpha = 0;
|
||||
}
|
||||
if (mask === 0) {
|
||||
srcByte = srcData[srcPos++];
|
||||
mask = 128;
|
||||
}
|
||||
|
||||
var c = (+!!(srcByte & mask)) * 255;
|
||||
destData[destPos++] = c;
|
||||
destData[destPos++] = c;
|
||||
destData[destPos++] = c;
|
||||
destData[destPos++] = alpha;
|
||||
|
||||
mask >>= 1;
|
||||
}
|
||||
}
|
||||
ctx.putImageData(chunkImgData, 0, i * fullChunkHeight);
|
||||
}
|
||||
// This (conceptually) puts pixels past the bounds of the canvas. But
|
||||
// that's ok; any such pixels are ignored.
|
||||
ctx.putImageData(chunkImgData, 0, fullChunks * rowsInFullChunks);
|
||||
|
||||
} else if (imgData.kind === 'rgba_32bpp') {
|
||||
// RGBA, 32-bits per pixel.
|
||||
var haveSetAndSubarray = 'set' in dst && 'subarray' in src;
|
||||
|
||||
for (var i = 0; i < totalChunks; i++) {
|
||||
var thisChunkHeight =
|
||||
(i < fullChunks) ? fullChunkHeight : partialChunkHeight;
|
||||
var elemsInThisChunk = imgData.width * thisChunkHeight * 4;
|
||||
if (haveSetAndSubarray) {
|
||||
dst.set(src.subarray(srcPos, srcPos + elemsInThisChunk));
|
||||
srcPos += elemsInThisChunk;
|
||||
} else {
|
||||
for (var j = 0; j < elemsInThisChunk; j++) {
|
||||
chunkImgData.data[j] = imgData.data[srcPos++];
|
||||
}
|
||||
}
|
||||
ctx.putImageData(chunkImgData, 0, i * fullChunkHeight);
|
||||
}
|
||||
|
||||
} else {
|
||||
error('bad image kind: ' + imgData.kind);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user