pdf.js/test/unit/stream_spec.js
Nicholas Nethercote b3024db677 Estimate the size of decoded streams in advance.
When decoding a stream, the decode buffer is often grown multiple times, its
byte size increasing like so: 512, 1024, 2048, etc. This patch estimates the
minimum size in advance (using the length of the encoded stream), often
allowing the smaller sizes to be skipped. It also renames numerous |length|
variables as |maybeLength| to make it clear that they can be |null|.

I measured this change on eight documents. This change reduces the cumulative
size of decode buffer allocations by 0--32%, with 10--20% being typical. This
reduces peak RSS by 10 or 20 MiB for several of them.
2014-03-13 02:06:58 -07:00

43 lines
1.3 KiB
JavaScript

/* -*- Mode: Java; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set shiftwidth=2 tabstop=2 autoindent cindent expandtab: */
/* globals expect, it, describe, beforeEach, Stream, PredictorStream, Dict */
'use strict';
describe('stream', function() {
beforeEach(function() {
this.addMatchers({
toMatchTypedArray: function(expected) {
var actual = this.actual;
if (actual.length != expected.length)
return false;
for (var i = 0, ii = expected.length; i < ii; i++) {
var a = actual[i], b = expected[i];
if (a !== b)
return false;
}
return true;
}
});
});
describe('PredictorStream', function() {
it('should decode simple predictor data', function() {
var dict = new Dict();
dict.set('Predictor', 12);
dict.set('Colors', 1);
dict.set('BitsPerComponent', 8);
dict.set('Columns', 2);
var input = new Stream(new Uint8Array([2, 100, 3, 2, 1, 255, 2, 1, 255]),
0, 9, dict);
var predictor = new PredictorStream(input, /* length = */ 9, dict);
var result = predictor.getBytes(6);
expect(result).toMatchTypedArray(
new Uint8Array([100, 3, 101, 2, 102, 1])
);
});
});
});