// The implementation of the [HTTP/2 Header Compression][http2-compression] spec is separated from // the 'integration' part which handles HEADERS and PUSH_PROMISE frames. The compression itself is // implemented in the first part of the file, and consists of three classes: `HeaderTable`, // `HeaderSetDecompressor` and `HeaderSetCompressor`. The two latter classes are // [Transform Stream][node-transform] subclasses that operate in [object mode][node-objectmode]. // These transform chunks of binary data into `[name, value]` pairs and vice versa, and store their // state in `HeaderTable` instances. // // The 'integration' part is also implemented by two [Transform Stream][node-transform] subclasses // that operate in [object mode][node-objectmode]: the `Compressor` and the `Decompressor`. These // provide a layer between the [framer](framer.html) and the // [connection handling component](connection.html). // // [node-transform]: https://nodejs.org/api/stream.html#stream_class_stream_transform // [node-objectmode]: https://nodejs.org/api/stream.html#stream_new_stream_readable_options // [http2-compression]: https://tools.ietf.org/html/rfc7541
var TransformStream = require('stream').Transform; varassert = require('assert'); var util = require('util');
// Header compression // ==================
// The HeaderTable class // ---------------------
// The [Header Table] is a component used to associate headers to index values. It is basically an // ordered list of `[name, value]` pairs, so it's implemented as a subclass of `Array`. // In this implementation, the Header Table and the [Static Table] are handled as a single table. // [Header Table]: https://tools.ietf.org/html/rfc7541#section-2.3.2 // [Static Table]: https://tools.ietf.org/html/rfc7541#section-2.3.1 function HeaderTable(log, limit) { var self = HeaderTable.staticTable.map(entryFromPair);
self._log = log;
self._limit = limit || DEFAULT_HEADER_TABLE_LIMIT;
self._staticLength = self.length;
self._size = 0;
self._enforceLimit = HeaderTable.prototype._enforceLimit;
self.add = HeaderTable.prototype.add;
self.setSizeLimit = HeaderTable.prototype.setSizeLimit; return self;
}
function entryFromPair(pair) { var entry = pair.slice();
entry._size = size(entry); return entry;
}
// The encoder decides how to update the header table and as such can control how much memory is // used by the header table. To limit the memory requirements on the decoder side, the header table // size is bounded. // // * The default header table size limit is 4096 bytes. // * The size of an entry is defined as follows: the size of an entry is the sum of its name's // length in bytes, of its value's length in bytes and of 32 bytes. // * The size of a header table is the sum of the size of its entries. var DEFAULT_HEADER_TABLE_LIMIT = 4096;
// The `add(index, entry)` can be used to [manage the header table][tablemgmt]: // [tablemgmt]: https://tools.ietf.org/html/rfc7541#section-4 // // * it pushes the new `entry` at the beggining of the table // * before doing such a modification, it has to be ensured that the header table size will stay // lower than or equal to the header table size limit. To achieve this, entries are evicted from // the end of the header table until the size of the header table is less than or equal to // `(this._limit - entry.size)`, or until the table is empty. // // <---------- Index Address Space ----------> // <-- Static Table --> <-- Header Table --> // +---+-----------+---+ +---+-----------+---+ // | 0 | ... | k | |k+1| ... | n | // +---+-----------+---+ +---+-----------+---+ // ^ | // | V // Insertion Point Drop Point
HeaderTable.prototype._enforceLimit = function _enforceLimit(limit) { var droppedEntries = []; while ((this._size > 0) && (this._size > limit)) { var dropped = this.pop(); this._size -= dropped._size;
droppedEntries.unshift(dropped);
} return droppedEntries;
};
HeaderTable.prototype.add = function(entry) { var limit = this._limit - entry._size; var droppedEntries = this._enforceLimit(limit);
// The table size limit can be changed externally. In this case, the same eviction algorithm is used
HeaderTable.prototype.setSizeLimit = function setSizeLimit(limit) { this._limit = limit; this._enforceLimit(this._limit);
};
// The table is generated with feeding the table from the spec to the following sed command: // // sed -re "s/\s*\| [0-9]+\s*\| ([^ ]*)/ [ '\1'/g" -e "s/\|\s([^ ]*)/, '\1'/g" -e 's/ \|/],/g'
// The HeaderSetDecompressor class // -------------------------------
// A `HeaderSetDecompressor` instance is a transform stream that can be used to *decompress a // single header set*. Its input is a stream of binary data chunks and its output is a stream of // `[name, value]` pairs. // // Currently, it is not a proper streaming decompressor implementation, since it buffer its input // until the end os the stream, and then processes the whole header block at once.
// `_transform` is the implementation of the [corresponding virtual function][_transform] of the // TransformStream class. It collects the data chunks for later processing. // [_transform]: https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
HeaderSetDecompressor.prototype._transform = function _transform(chunk, encoding, callback) { this._chunks.push(chunk);
callback();
};
// The *JavaScript object representation* of a header representation: // // { // name: String || Integer, // string literal or index // value: String || Integer, // string literal or index // index: Boolean // with or without indexing // } // // *Important:* to ease the indexing of the header table, indexes start at 0 instead of 1. // // Examples: // // Indexed: // { name: 2 , value: 2 , index: false } // Literal: // { name: 2 , value: 'X', index: false } // without indexing // { name: 2 , value: 'Y', index: true } // with indexing // { name: 'A', value: 'Z', index: true } // with indexing, literal name
HeaderSetDecompressor.prototype._execute = function _execute(rep) { this._log.trace({ key: rep.name, value: rep.value, index: rep.index }, 'Executing header representation');
var entry, pair;
if (rep.contextUpdate) { this._table.setSizeLimit(rep.newMaxSize);
}
// * An _indexed representation_ entails the following actions: // * The header field corresponding to the referenced entry is emitted elseif (typeof rep.value === 'number') { var index = rep.value;
entry = this._table[index];
pair = entry.slice(); this.push(pair);
}
// * A _literal representation_ that is _not added_ to the header table entails the following // action: // * The header is emitted. // * A _literal representation_ that is _added_ to the header table entails the following further // actions: // * The header is added to the header table. // * The header is emitted. else { if (typeof rep.name === 'number') {
pair = [this._table[rep.name][0], rep.value];
} else {
pair = [rep.name, rep.value];
}
if (rep.index) {
entry = entryFromPair(pair); this._table.add(entry);
}
this.push(pair);
}
};
// `_flush` is the implementation of the [corresponding virtual function][_flush] of the // TransformStream class. The whole decompressing process is done in `_flush`. It gets called when // the input stream is over. // [_flush]: https://nodejs.org/api/stream.html#stream_transform_flush_callback
HeaderSetDecompressor.prototype._flush = function _flush(callback) { var buffer = concat(this._chunks);
// * processes the header representations
buffer.cursor = 0; while (buffer.cursor < buffer.length) { this._execute(HeaderSetDecompressor.header(buffer));
}
callback();
};
// The HeaderSetCompressor class // -----------------------------
// A `HeaderSetCompressor` instance is a transform stream that can be used to *compress a single // header set*. Its input is a stream of `[name, value]` pairs and its output is a stream of // binary data chunks. // // It is a real streaming compressor, since it does not wait until the header set is complete. // // The compression algorithm is (intentionally) not specified by the spec. Therefore, the current // compression algorithm can probably be improved in the future.
if (!rep.chunks) {
rep.chunks = HeaderSetCompressor.header(rep);
}
rep.chunks.forEach(this.push);
};
// `_transform` is the implementation of the [corresponding virtual function][_transform] of the // TransformStream class. It processes the input headers one by one: // [_transform]: https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
HeaderSetCompressor.prototype._transform = function _transform(pair, encoding, callback) { var name = pair[0].toLowerCase(); var value = pair[1]; var entry, rep;
// * tries to find full (name, value) or name match in the header table var nameMatch = -1, fullMatch = -1; for (var droppedIndex = 0; droppedIndex < this._table.length; droppedIndex++) {
entry = this._table[droppedIndex]; if (entry[0] === name) { if (entry[1] === value) {
fullMatch = droppedIndex; break;
} elseif (nameMatch === -1) {
nameMatch = droppedIndex;
}
}
}
// `_flush` is the implementation of the [corresponding virtual function][_flush] of the // TransformStream class. It gets called when there's no more header to compress. The final step: // [_flush]: https://nodejs.org/api/stream.html#stream_transform_flush_callback
HeaderSetCompressor.prototype._flush = function _flush(callback) {
callback();
};
// ### Integer representation ### // // The algorithm to represent an integer I is as follows: // // 1. If I < 2^N - 1, encode I on N bits // 2. Else, encode 2^N - 1 on N bits and do the following steps: // 1. Set I to (I - (2^N - 1)) and Q to 1 // 2. While Q > 0 // 1. Compute Q and R, quotient and remainder of I divided by 2^7 // 2. If Q is strictly greater than 0, write one 1 bit; otherwise, write one 0 bit // 3. Encode R on the next 7 bits // 4. I = Q
HeaderSetCompressor.integer = function writeInteger(I, N) { var limit = Math.pow(2,N) - 1; if (I < limit) { return [Buffer.from([I])];
}
var bytes = []; if (N !== 0) {
bytes.push(limit);
}
I -= limit;
var Q = 1, R; while (Q > 0) {
Q = Math.floor(I / 128);
R = I % 128;
if (Q > 0) {
R += 128;
}
bytes.push(R);
I = Q;
}
return [Buffer.from(bytes)];
};
// The inverse algorithm: // // 1. Set I to the number coded on the lower N bits of the first byte // 2. If I is smaller than 2^N - 1 then return I // 2. Else the number is encoded on more than one byte, so do the following steps: // 1. Set M to 0 // 2. While returning with I // 1. Let B be the next byte (the first byte if N is 0) // 2. Read out the lower 7 bits of B and multiply it with 2^M // 3. Increase I with this number // 4. Increase M by 7 // 5. Return I if the most significant bit of B is 0
HeaderSetDecompressor.integer = function readInteger(buffer, N) { var limit = Math.pow(2,N) - 1;
var I = buffer[buffer.cursor] & limit; if (N !== 0) {
buffer.cursor += 1;
}
if (I === limit) { var M = 0; do {
I += (buffer[buffer.cursor] & 127) << M;
M += 7;
buffer.cursor += 1;
} while (buffer[buffer.cursor - 1] & 128);
}
return I;
};
// ### Huffman Encoding ###
function HuffmanTable(table) { function createTree(codes, position) { if (codes.length === 1) { return [table.indexOf(codes[0])];
}
else {
position = position || 0; var zero = []; var one = []; for (var i = 0; i < codes.length; i++) { var string = codes[i]; if (string[position] === '0') {
zero.push(string);
} else {
one.push(string);
}
} return [createTree(zero, position + 1), createTree(one, position + 1)];
}
}
// The initializer arrays for the Huffman tables are generated with feeding the tables from the // spec to this sed command: // // sed -e "s/^.* [|]//g" -e "s/|//g" -e "s/ .*//g" -e "s/^/ '/g" -e "s/$/',/g"
// Integration with HTTP/2 // =======================
// This section describes the interaction between the compressor/decompressor and the rest of the // HTTP/2 implementation. The `Compressor` and the `Decompressor` makes up a layer between the // [framer](framer.html) and the [connection handling component](connection.html). They let most // frames pass through, except HEADERS and PUSH_PROMISE frames. They convert the frames between // these two representations: // // { { // type: 'HEADERS', type: 'HEADERS', // flags: {}, flags: {}, // stream: 1, <===> stream: 1, // headers: { data: Buffer // N1: 'V1', } // N2: ['V1', 'V2', ...], // // ... // } // } // // There are possibly several binary frame that belong to a single non-binary frame.
var MAX_HTTP_PAYLOAD_SIZE = 16384;
// The Compressor class // --------------------
// The Compressor transform stream is basically stateless.
util.inherits(Compressor, TransformStream); function Compressor(log, type) {
TransformStream.call(this, { objectMode: true });
// Changing the header table size
Compressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) { this._table.setSizeLimit(size); if (!this.tableSizeChangePending || size < this.lowestTableSizePending) { this.lowestTableSizePending = size;
} this.tableSizeSetting = size; this.tableSizeChangePending = true;
};
// `compress` takes a header set, and compresses it using a new `HeaderSetCompressor` stream // instance. This means that from now on, the advantages of streaming header encoding are lost, // but the API becomes simpler.
Compressor.prototype.compress = function compress(headers) { var compressor = new HeaderSetCompressor(this._log, this._table);
// To ensure we send colon headers first for (var name in headers) { if (name.trim()[0] === ':') {
colonHeaders.push(name);
} else {
nonColonHeaders.push(name);
}
}
function compressHeader(name) { var value = headers[name];
name = String(name).toLowerCase();
// * To allow for better compression efficiency, the Cookie header field MAY be split into // separate header fields, each with one or more cookie-pairs. if (name == 'cookie') { if (!(value instanceof Array)) {
value = [value];
}
value = Array.prototype.concat.apply([], value.map(function(cookie) { return String(cookie).split(';').map(trim);
}));
}
if (value instanceof Array) { for (var i = 0; i < value.length; i++) {
compressor.write([name, String(value[i])]);
}
} else {
compressor.write([name, String(value)]);
}
}
var chunk, chunks = []; while (chunk = compressor.read()) {
chunks.push(chunk);
}
function insertSoftIllegalHpack(originalCompressed) { var illegalLiteral = Buffer.from([
0x00, // Literal, no index
0x08, // Name: not huffman encoded, 8 bytes long
0x3a,
0x69,
0x6c,
0x6c,
0x65,
0x67,
0x61,
0x6c, // :illegal
0x10, // Value: not huffman encoded, 16 bytes long // REALLY NOT LEGAL
0x52,
0x45,
0x41,
0x4c,
0x4c,
0x59,
0x20,
0x4e,
0x4f,
0x54,
0x20,
0x4c,
0x45,
0x47,
0x41,
0x4c,
]); var newBufferLength = originalCompressed.length + illegalLiteral.length; var concatenated = Buffer.alloc(newBufferLength);
originalCompressed.copy(concatenated, 0);
illegalLiteral.copy(concatenated, originalCompressed.length); return concatenated;
}
function insertHardIllegalHpack(originalCompressed) { // Now we have to add an invalid header var illegalIndexed = HeaderSetCompressor.integer(5000, 7); // The above returns an array of buffers, but there's only one buffer, so // get rid of the array.
illegalIndexed = illegalIndexed[0]; // Set the first bit to 1 to signal this is an indexed representation
illegalIndexed[0] |= 0x80; var newBufferLength = originalCompressed.length + illegalIndexed.length; var concatenated = Buffer.alloc(newBufferLength);
originalCompressed.copy(concatenated, 0);
illegalIndexed.copy(concatenated, originalCompressed.length); return concatenated;
}
if ("x-softillegalhpack" in headers) { return insertSoftIllegalHpack(concat(chunks));
}
if ("x-hardillegalhpack" in headers) { return insertHardIllegalHpack(concat(chunks));
}
return concat(chunks);
};
// When a `frame` arrives
Compressor.prototype._transform = function _transform(frame, encoding, done) { // * and it is a HEADERS or PUSH_PROMISE frame // * it generates a header block using the compress method // * cuts the header block into `chunks` that are not larger than `MAX_HTTP_PAYLOAD_SIZE` // * for each chunk, it pushes out a chunk frame that is identical to the original, except // the `data` property which holds the given chunk, the type of the frame which is always // CONTINUATION except for the first frame, and the END_HEADERS/END_PUSH_STREAM flag that // marks the last frame and the END_STREAM flag which is always false before the end if (frame.type === 'HEADERS' || frame.type === 'PUSH_PROMISE') { var buffer = this.compress(frame.headers);
// This will result in CONTINUATIONs from a PUSH_PROMISE being 4 bytes shorter than they could // be, but that's not the end of the world, and it prevents us from going over MAX_HTTP_PAYLOAD_SIZE // on the initial PUSH_PROMISE frame. var adjustment = frame.type === 'PUSH_PROMISE' ? 4 : 0; var chunks = cut(buffer, MAX_HTTP_PAYLOAD_SIZE - adjustment);
for (var i = 0; i < chunks.length; i++) { var chunkFrame; var first = (i === 0); var last = (i === chunks.length - 1);
// * otherwise, the frame is forwarded without taking any action else { this.push(frame);
}
done();
};
// The Decompressor class // ----------------------
// The Decompressor is a stateful transform stream, since it has to collect multiple frames first, // and the decoding comes after unifying the payload of those frames. // // If there's a frame in progress, `this._inProgress` is `true`. The frames are collected in // `this._frames`, and the type of the frame and the stream identifier is stored in `this._type` // and `this._stream` respectively.
util.inherits(Decompressor, TransformStream); function Decompressor(log, type) {
TransformStream.call(this, { objectMode: true });
// Changing the header table size
Decompressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) { this._table.setSizeLimit(size);
};
// `decompress` takes a full header block, and decompresses it using a new `HeaderSetDecompressor` // stream instance. This means that from now on, the advantages of streaming header decoding are // lost, but the API becomes simpler.
Decompressor.prototype.decompress = function decompress(block) { var decompressor = new HeaderSetDecompressor(this._log, this._table);
decompressor.end(block);
var seenNonColonHeader = false; var headers = {}; var pair; while (pair = decompressor.read()) { var name = pair[0]; var value = pair[1]; var isColonHeader = (name.trim()[0] === ':'); if (seenNonColonHeader && isColonHeader) { this.emit('error', 'PROTOCOL_ERROR'); return headers;
}
seenNonColonHeader = !isColonHeader; if (name in headers) { if (headers[name] instanceof Array) {
headers[name].push(value);
} else {
headers[name] = [headers[name], value];
}
} else {
headers[name] = value;
}
}
// * If there are multiple Cookie header fields after decompression, these MUST be concatenated // into a single octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII // string "; "). if (('cookie' in headers) && (headers['cookie'] instanceof Array)) {
headers['cookie'] = headers['cookie'].join('; ');
}
return headers;
};
// When a `frame` arrives
Decompressor.prototype._transform = function _transform(frame, encoding, done) { // * and the collection process is already `_inProgress`, the frame is simply stored, except if // it's an illegal frame if (this._inProgress) { if ((frame.type !== 'CONTINUATION') || (frame.stream !== this._base.stream)) { this._log.error('A series of HEADER frames were not continuous'); this.emit('error', 'PROTOCOL_ERROR'); return;
} this._frames.push(frame);
}
// * and the collection process is not `_inProgress`, but the new frame's type is HEADERS or // PUSH_PROMISE, a new collection process begins elseif ((frame.type === 'HEADERS') || (frame.type === 'PUSH_PROMISE')) { this._inProgress = true; this._base = util._extend({}, frame); this._frames = [frame];
}
// * otherwise, the frame is forwarded without taking any action else { this.push(frame);
}
// * When the frame signals that it's the last in the series, the header block chunks are // concatenated, the headers are decompressed, and a new frame gets pushed out with the // decompressed headers. if (this._inProgress && (frame.flags.END_HEADERS || frame.flags.END_PUSH_PROMISE)) { var buffer = concat(this._frames.map(function(frame) { return frame.data;
})); try { var headers = this.decompress(buffer);
} catch(error) { this._log.error({ err: error }, 'Header decompression error'); this.emit('error', 'COMPRESSION_ERROR'); return;
} this.push(util._extend(this._base, { headers: headers })); this._inProgress = false;
}
done();
};
// Helper functions // ================
// Concatenate an array of buffers into a new buffer function concat(buffers) { var size = 0; for (var i = 0; i < buffers.length; i++) {
size += buffers[i].length;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.