/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set sw=2 ts=8 et tw=80 : */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// HttpLog.h should generally be included first #include"HttpLog.h"
// Log on level :5, instead of default :4. #undef LOG #define LOG(args) LOG5(args) #undef LOG_ENABLED #define LOG_ENABLED() LOG5_ENABLED()
void Http2CompressionCleanup() { // this happens after the socket thread has been destroyed delete gStaticHeaders;
gStaticHeaders = nullptr;
UnregisterStrongMemoryReporter(gStaticReporter);
gStaticReporter = nullptr;
}
nsresult Http2Decompressor::DecodeHeaderBlock(const uint8_t* data,
uint32_t datalen,
nsACString& output, bool isPush) {
mSetInitialMaxBufferSizeAllowed = false;
mOffset = 0;
mData = data;
mDataLen = datalen;
mOutput = &output; // Add in some space to hopefully not have to reallocate while decompressing // the headers. 512 bytes seems like a good enough number.
mOutput->Truncate();
mOutput->SetCapacity(datalen + 512);
mHeaderStatus.Truncate();
mHeaderHost.Truncate();
mHeaderScheme.Truncate();
mHeaderPath.Truncate();
mHeaderMethod.Truncate();
mSeenNonColonHeader = false;
mIsPush = isPush;
nsresult rv = NS_OK;
nsresult softfail_rv = NS_OK; while (NS_SUCCEEDED(rv) && (mOffset < mDataLen)) { bool modifiesTable = true; constchar* preamble = "Decompressor state after ?"; if (mData[mOffset] & 0x80) {
rv = DoIndexed();
preamble = "Decompressor state after indexed";
} elseif (mData[mOffset] & 0x40) {
rv = DoLiteralWithIncremental();
preamble = "Decompressor state after literal with incremental";
} elseif (mData[mOffset] & 0x20) {
rv = DoContextUpdate();
preamble = "Decompressor state after context update";
} elseif (mData[mOffset] & 0x10) {
modifiesTable = false;
rv = DoLiteralNeverIndexed();
preamble = "Decompressor state after literal never index";
} else {
modifiesTable = false;
rv = DoLiteralWithoutIndex();
preamble = "Decompressor state after literal without index";
}
DumpState(preamble); if (rv == NS_ERROR_ILLEGAL_VALUE) { if (modifiesTable) { // Unfortunately, we can't count on our peer now having the same state // as us, so let's terminate the session and we can try again later. return NS_ERROR_FAILURE;
}
// This is an http-level error that we can handle by resetting the stream // in the upper layers. Let's note that we saw this, then continue // decompressing until we either hit the end of the header block or find a // hard failure. That way we won't get an inconsistent compression state // with the server.
softfail_rv = rv;
rv = NS_OK;
} elseif (rv == NS_ERROR_NET_RESET) { // This happens when we detect connection-based auth being requested in // the response headers. We'll paper over it for now, and the session will // handle this as if it received RST_STREAM with HTTP_1_1_REQUIRED.
softfail_rv = rv;
rv = NS_OK;
}
}
if (accum != mask) { // the simple case for small values return NS_OK;
}
}
uint32_t factor = 1; // 128 ^ 0
// we need a series of bytes. The high bit signifies if we need another one. // The first one is a a factor of 128 ^ 0, the next 128 ^1, the next 128 ^2, // ..
if (mOffset >= mDataLen) {
NS_WARNING("Ran out of data to decode integer"); // This is session-fatal. return NS_ERROR_FAILURE;
} bool chainBit = mData[mOffset] & 0x80;
accum += (mData[mOffset] & 0x7f) * factor;
++mOffset;
factor = factor * 128;
while (chainBit) { // really big offsets are just trawling for overflows if (accum >= 0x800000) {
NS_WARNING("Decoding integer >= 0x800000"); // This is not strictly fatal to the session, but given the fact that // the value is way to large to be reasonable, let's just tell our peer // to go away. return NS_ERROR_FAILURE;
}
if (mOffset >= mDataLen) {
NS_WARNING("Ran out of data to decode integer"); // This is session-fatal. return NS_ERROR_FAILURE;
}
chainBit = mData[mOffset] & 0x80;
accum += (mData[mOffset] & 0x7f) * factor;
++mOffset;
factor = factor * 128;
} return NS_OK;
}
staticbool HasConnectionBasedAuth(const nsACString& headerValue) { for (const nsACString& authMethod :
nsCCharSeparatedTokenizer(headerValue, '\n').ToRange()) { if (authMethod.LowerCaseEqualsLiteral("ntlm")) { returntrue;
} if (authMethod.LowerCaseEqualsLiteral("negotiate")) { returntrue;
}
}
// Look for upper case characters in the name. for (constchar* cPtr = name.BeginReading(); cPtr && cPtr < name.EndReading();
++cPtr) { if (*cPtr <= 'Z' && *cPtr >= 'A') {
nsCString toLog(name);
LOG(("HTTP Decompressor upper case response header found. [%s]\n",
toLog.get())); return NS_ERROR_ILLEGAL_VALUE;
}
}
// Look for CR, LF or NUL in value - could be smuggling (RFC7540 Sec 10.3) // treat as malformed if (!nsHttp::IsReasonableHeaderValue(value)) { return NS_ERROR_ILLEGAL_VALUE;
}
// http/2 transport level headers shouldn't be gatewayed into http/1 bool isColonHeader = false; for (constchar* cPtr = name.BeginReading(); cPtr && cPtr < name.EndReading();
++cPtr) { if (*cPtr == ':') {
isColonHeader = true; break;
} if (*cPtr != ' ' && *cPtr != '\t') {
isColonHeader = false; break;
}
}
if (isColonHeader) { // :status is the only pseudo-header field allowed in received HEADERS // frames, PUSH_PROMISE allows the other pseudo-header fields if (!name.EqualsLiteral(":status") && !mIsPush) {
LOG(("HTTP Decompressor found illegal response pseudo-header %s",
name.BeginReading())); return NS_ERROR_ILLEGAL_VALUE;
} if (mSeenNonColonHeader) {
LOG(("HTTP Decompressor found illegal : header %s", name.BeginReading())); return NS_ERROR_ILLEGAL_VALUE;
}
LOG(("HTTP Decompressor not gatewaying %s into http/1",
name.BeginReading())); return NS_OK;
}
// Need to check if the server is going to try to speak connection-based auth // with us. If so, we need to kill this via h2, and dial back with http/1.1. // Technically speaking, the server should've just reset or goaway'd us with // HTTP_1_1_REQUIRED, but there are some busted servers out there, so we need // to check on our own to work around them. if (name.EqualsLiteral("www-authenticate") ||
name.EqualsLiteral("proxy-authenticate")) { if (HasConnectionBasedAuth(value)) {
LOG3(("Http2Decompressor %p connection-based auth found in %s", this,
name.BeginReading())); return NS_ERROR_NET_RESET;
}
} return NS_OK;
}
nsresult Http2Decompressor::OutputHeader(uint32_t index) { // NWGH - make this < index // bounds check if (mHeaderTable.Length() <= index) {
LOG(("Http2Decompressor::OutputHeader index too large %u", index)); // This is session-fatal. return NS_ERROR_FAILURE;
}
nsresult Http2Decompressor::CopyHeaderString(uint32_t index, nsACString& name) { // NWGH - make this < index // bounds check if (mHeaderTable.Length() <= index) { // This is session-fatal. return NS_ERROR_FAILURE;
}
name = mHeaderTable[index]->mName; return NS_OK;
}
nsresult Http2Decompressor::CopyStringFromInput(uint32_t bytes,
nsACString& val) { if (mOffset + bytes > mDataLen) { // This is session-fatal. return NS_ERROR_FAILURE;
}
nsresult Http2Decompressor::DecodeFinalHuffmanCharacter( const HuffmanIncomingTable* table, uint8_t& c, uint8_t& bitsLeft) {
MOZ_ASSERT(mOffset <= mDataLen); if (mOffset > mDataLen) {
NS_WARNING("DecodeFinalHuffmanCharacter would read beyond end of buffer"); return NS_ERROR_FAILURE;
}
uint8_t mask = (1 << bitsLeft) - 1;
uint8_t idx = mData[mOffset - 1] & mask;
idx <<= (8 - bitsLeft); // Don't update bitsLeft yet, because we need to check that value against the // number of bits used by our encoding later on. We'll update when we are sure // how many bits we've actually used.
if (table->IndexHasANextTable(idx)) { // Can't chain to another table when we're all out of bits in the encoding
LOG(("DecodeFinalHuffmanCharacter trying to chain when we're out of bits")); return NS_ERROR_FAILURE;
}
if (bitsLeft < entry->mPrefixLen) { // We don't have enough bits to actually make a match, this is some sort of // invalid coding
LOG(("DecodeFinalHuffmanCharacter does't have enough bits to match")); return NS_ERROR_FAILURE;
}
// This is a character! if (entry->mValue == 256) { // EOS
LOG(("DecodeFinalHuffmanCharacter actually decoded an EOS")); return NS_ERROR_FAILURE;
}
c = static_cast<uint8_t>(entry->mValue & 0xFF);
bitsLeft -= entry->mPrefixLen;
if (bitsLeft) { // Need to extract bitsLeft bits from the previous byte, and 8 - bitsLeft // bits from the current byte
uint8_t mask = (1 << bitsLeft) - 1;
rv = (mData[mOffset - 1] & mask) << (8 - bitsLeft);
rv |= (mData[mOffset] & ~mask) >> bitsLeft;
} else {
rv = mData[mOffset];
}
// We always update these here, under the assumption that all 8 bits we got // here will be used. These may be re-adjusted later in the case that we don't // use up all 8 bits of the byte.
++mOffset;
++bytesConsumed;
if (table->IndexHasANextTable(idx)) { if (mOffset >= mDataLen) { if (!bitsLeft || (mOffset > mDataLen)) { // TODO - does this get me into trouble in the new world? // No info left in input to try to consume, we're done
LOG(("DecodeHuffmanCharacter all out of bits to consume, can't chain")); return NS_ERROR_FAILURE;
}
// We might get lucky here! return DecodeFinalHuffmanCharacter(table->NextTable(idx), c, bitsLeft);
}
// We're sorry, Mario, but your princess is in another castle return DecodeHuffmanCharacter(table->NextTable(idx), c, bytesConsumed,
bitsLeft);
}
const HuffmanIncomingEntry* entry = table->Entry(idx); if (entry->mValue == 256) {
LOG(("DecodeHuffmanCharacter found an actual EOS")); return NS_ERROR_FAILURE;
}
c = static_cast<uint8_t>(entry->mValue & 0xFF);
// Need to adjust bitsLeft (and possibly other values) because we may not have // consumed all of the bits of the byte we extracted. if (entry->mPrefixLen <= bitsLeft) {
bitsLeft -= entry->mPrefixLen;
--mOffset;
--bytesConsumed;
} else {
bitsLeft = 8 - (entry->mPrefixLen - bitsLeft);
}
MOZ_ASSERT(bitsLeft < 8);
return NS_OK;
}
nsresult Http2Decompressor::CopyHuffmanStringFromInput(uint32_t bytes,
nsACString& val) { if (mOffset + bytes > mDataLen) {
LOG(("CopyHuffmanStringFromInput not enough data")); return NS_ERROR_FAILURE;
}
while (bytesRead < bytes) {
uint32_t bytesConsumed = 0;
rv = DecodeHuffmanCharacter(&HuffmanIncomingRoot, c, bytesConsumed,
bitsLeft); if (NS_FAILED(rv)) {
LOG(("CopyHuffmanStringFromInput failed to decode a character")); return rv;
}
bytesRead += bytesConsumed;
buf.Append(c);
}
if (bytesRead > bytes) {
LOG(("CopyHuffmanStringFromInput read more bytes than was allowed!")); return NS_ERROR_FAILURE;
}
if (bitsLeft) { // The shortest valid code is 4 bits, so we know there can be at most one // character left that our loop didn't decode. Check to see if that's the // case, and if so, add it to our output.
rv = DecodeFinalHuffmanCharacter(&HuffmanIncomingRoot, c, bitsLeft); if (NS_SUCCEEDED(rv)) {
buf.Append(c);
}
}
if (bitsLeft > 7) {
LOG(("CopyHuffmanStringFromInput more than 7 bits of padding")); return NS_ERROR_FAILURE;
}
if (bitsLeft) { // Any bits left at this point must belong to the EOS symbol, so make sure // they make sense (ie, are all ones)
uint8_t mask = (1 << bitsLeft) - 1;
uint8_t bits = mData[mOffset - 1] & mask; if (bits != mask) {
LOG(
("CopyHuffmanStringFromInput ran out of data but found possible " "non-EOS symbol")); return NS_ERROR_FAILURE;
}
}
val = buf;
LOG(("CopyHuffmanStringFromInput decoded a full string!")); return NS_OK;
}
nsresult Http2Decompressor::DoIndexed() { // this starts with a 1 bit pattern
MOZ_ASSERT(mData[mOffset] & 0x80);
if (index == 0) { return NS_ERROR_FAILURE;
} // NWGH - remove this line, since we'll keep everything 1-indexed
index--; // Internally, we 0-index everything, since this is, y'know, C++
// first let's get the name
uint32_t index;
nsresult rv = DecodeInteger(namePrefixLen, index); if (NS_FAILED(rv)) { return rv;
}
// sanity check if (mOffset >= mDataLen) {
NS_WARNING("Http2 Decompressor ran out of data"); // This is session-fatal return NS_ERROR_FAILURE;
}
bool isHuffmanEncoded;
if (!index) { // name is embedded as a literal
uint32_t nameLen;
isHuffmanEncoded = mData[mOffset] & (1 << 7);
rv = DecodeInteger(7, nameLen); if (NS_SUCCEEDED(rv)) { if (isHuffmanEncoded) {
rv = CopyHuffmanStringFromInput(nameLen, name);
} else {
rv = CopyStringFromInput(nameLen, name);
}
}
LOG(("Http2Decompressor::DoLiteralInternal literal name %s",
name.BeginReading()));
} else { // NWGH - make this index, not index - 1 // name is from headertable
rv = CopyHeaderString(index - 1, name);
LOG(("Http2Decompressor::DoLiteralInternal indexed name %d %s", index,
name.BeginReading()));
} if (NS_FAILED(rv)) { return rv;
}
// sanity check if (mOffset >= mDataLen) {
NS_WARNING("Http2 Decompressor ran out of data"); // This is session-fatal return NS_ERROR_FAILURE;
}
// now the value
uint32_t valueLen;
isHuffmanEncoded = mData[mOffset] & (1 << 7);
rv = DecodeInteger(7, valueLen); if (NS_SUCCEEDED(rv)) { if (isHuffmanEncoded) {
rv = CopyHuffmanStringFromInput(valueLen, value);
} else {
rv = CopyStringFromInput(valueLen, value);
}
} if (NS_FAILED(rv)) { return rv;
}
int32_t newline = 0; while ((newline = value.FindChar('\n', newline)) != -1) { if (value[newline + 1] == ' ' || value[newline + 1] == '\t') {
LOG(("Http2Decompressor::Disallowing folded header value %s",
value.BeginReading())); return NS_ERROR_ILLEGAL_VALUE;
} // Increment this to avoid always finding the same newline and looping // forever
++newline;
}
LOG(("Http2Decompressor::DoLiteralInternal value %s", value.BeginReading())); return NS_OK;
}
nsresult Http2Decompressor::DoLiteralWithoutIndex() { // this starts with 0000 bit pattern
MOZ_ASSERT((mData[mOffset] & 0xF0) == 0x00);
nsresult Http2Decompressor::DoLiteralWithIncremental() { // this starts with 01 bit pattern
MOZ_ASSERT((mData[mOffset] & 0xC0) == 0x40);
nsAutoCString name, value;
nsresult rv = DoLiteralInternal(name, value, 6); if (NS_SUCCEEDED(rv)) {
rv = OutputHeader(name, value);
} // Let NET_RESET continue on so that we don't get out of sync, as it is just // used to kill the stream, not the session. if (NS_FAILED(rv) && rv != NS_ERROR_NET_RESET) { return rv;
}
uint32_t room = nvPair(name, value).Size(); if (room > mMaxBuffer) {
ClearHeaderTable();
LOG(
("HTTP decompressor literal with index not inserted due to size %u %s " "%s\n",
room, name.get(), value.get()));
DumpState("Decompressor state after ClearHeaderTable"); return rv;
}
MakeRoom(room, "decompressor");
// Incremental Indexing implicitly adds a row to the header table.
mHeaderTable.AddElement(name, value);
nsresult Http2Decompressor::DoContextUpdate() { // This starts with 001 bit pattern
MOZ_ASSERT((mData[mOffset] & 0xE0) == 0x20);
// Getting here means we have to adjust the max table size, because the // compressor on the other end has signaled to us through HPACK (not H2) // that it's using a size different from the currently-negotiated size. // This change could either come about because we've sent a // SETTINGS_HEADER_TABLE_SIZE, or because the encoder has decided that // the current negotiated size doesn't fit its needs (for whatever reason) // and so it needs to change it (either up to the max allowed by our SETTING, // or down to some value below that)
uint32_t newMaxSize;
nsresult rv = DecodeInteger(5, newMaxSize);
LOG(("Http2Decompressor::DoContextUpdate new maximum size %u", newMaxSize)); if (NS_FAILED(rv)) { return rv;
}
if (newMaxSize > mMaxBufferSetting) { // This is fatal to the session - peer is trying to use a table larger // than we have made available. return NS_ERROR_FAILURE;
}
nsDependentCSubstring name =
Substring(beginBuffer + startIndex, beginBuffer + colonIndex); // all header names are lower case in http/2
ToLowerCase(name);
// colon headers are for http/2 and this is http/1 input, so that // is probably a smuggling attack of some kind bool isColonHeader = false; for (constchar* cPtr = name.BeginReading();
cPtr && cPtr < name.EndReading(); ++cPtr) { if (*cPtr == ':') {
isColonHeader = true; break;
} if (*cPtr != ' ' && *cPtr != '\t') {
isColonHeader = false; break;
}
} if (isColonHeader) { continue;
}
nsDependentCSubstring value =
Substring(beginBuffer + valueIndex, beginBuffer + crlfIndex);
if (name.EqualsLiteral("content-length")) {
int64_t len;
nsCString tmp(value); if (nsHttp::ParseInt64(tmp.get(), nullptr, &len)) {
mParsedContentLength = len;
}
}
if (name.EqualsLiteral("cookie")) { // cookie crumbling bool haveMoreCookies = true;
int32_t nextCookie = valueIndex; while (haveMoreCookies) {
int32_t semiSpaceIndex =
Substring(nvInput, 0, crlfIndex).Find("; ", nextCookie); if (semiSpaceIndex == -1) {
haveMoreCookies = false;
semiSpaceIndex = crlfIndex;
}
nsDependentCSubstring cookie =
Substring(beginBuffer + nextCookie, beginBuffer + semiSpaceIndex); // cookies less than 20 bytes are not indexed
ProcessHeader(nvPair(name, cookie), false, cookie.Length() < 20);
nextCookie = semiSpaceIndex + 2;
}
} else { // allow indexing of every non-cookie except authorization
ProcessHeader(nvPair(name, value), false,
name.EqualsLiteral("authorization"));
}
}
// NB: This is a *really* ugly hack, but to do this in the right place (the // transaction) would require totally reworking how/when the transaction // creates its request stream, which is not worth the effort and risk of // breakage just to add one header only to h2 connections. if (!simpleConnectForm && !isWebsocket) { // Add in TE: trailers for regular requests
nsAutoCString te("te");
nsAutoCString trailers("trailers");
ProcessHeader(nvPair(te, trailers), false, false);
}
mOutput = nullptr;
DumpState("Compressor state after EncodeHeaderBlock"); return NS_OK;
}
void Http2Compressor::DoOutput(Http2Compressor::outputCode code, constclass nvPair* pair, uint32_t index) { // start Byte needs to be calculated from the offset after // the opcode has been written out in case the output stream // buffer gets resized/relocated
uint32_t offset = mOutput->Length();
uint8_t* startByte;
switch (code) { case kNeverIndexedLiteral:
LOG(
("HTTP compressor %p neverindex literal with name reference %u %s " "%s\n", this, index, pair->mName.get(), pair->mValue.get()));
// In this case, the index will have already been adjusted to be 1-based // instead of 0-based.
EncodeInteger(4, index); // 0001 4 bit prefix
startByte = reinterpret_cast<unsignedchar*>(mOutput->BeginWriting()) + offset;
*startByte = (*startByte & 0x0f) | 0x10;
if (!index) {
HuffmanAppend(pair->mName);
}
HuffmanAppend(pair->mValue); break;
case kPlainLiteral:
LOG(("HTTP compressor %p noindex literal with name reference %u %s %s\n", this, index, pair->mName.get(), pair->mValue.get()));
// In this case, the index will have already been adjusted to be 1-based // instead of 0-based.
EncodeInteger(4, index); // 0000 4 bit prefix
startByte = reinterpret_cast<unsignedchar*>(mOutput->BeginWriting()) + offset;
*startByte = *startByte & 0x0f;
if (!index) {
HuffmanAppend(pair->mName);
}
HuffmanAppend(pair->mValue); break;
case kIndexedLiteral:
LOG(("HTTP compressor %p literal with name reference %u %s %s\n", this,
index, pair->mName.get(), pair->mValue.get()));
// In this case, the index will have already been adjusted to be 1-based // instead of 0-based.
EncodeInteger(6, index); // 01 2 bit prefix
startByte = reinterpret_cast<unsignedchar*>(mOutput->BeginWriting()) + offset;
*startByte = (*startByte & 0x3f) | 0x40;
if (!index) {
HuffmanAppend(pair->mName);
}
HuffmanAppend(pair->mValue); break;
case kIndex:
LOG(("HTTP compressor %p index %u %s %s\n", this, index,
pair->mName.get(), pair->mValue.get())); // NWGH - make this plain old index instead of index + 1 // In this case, we are passed the raw 0-based C index, and need to // increment to make it 1-based and comply with the spec
EncodeInteger(7, index + 1);
startByte = reinterpret_cast<unsignedchar*>(mOutput->BeginWriting()) + offset;
*startByte = *startByte | 0x80; // 1 1 bit prefix break;
}
}
if (mask) {
val -= mask;
tmp = mask;
mOutput->Append(reinterpret_cast<char*>(&tmp), 1);
}
uint32_t q, r; do {
q = val / 128;
r = val % 128;
tmp = r; if (q) {
tmp |= 0x80; // chain bit
}
val = q;
mOutput->Append(reinterpret_cast<char*>(&tmp), 1);
} while (q);
}
if (huffLength) { // Fill in the most significant <huffLength> bits of the next byte
bitsLeft = 8 - huffLength;
uint8_t val = (huffValue & ((1 << huffLength) - 1)) << bitsLeft;
buf.Append(reinterpret_cast<char*>(&val), 1);
}
}
if (bitsLeft != 8) { // Pad the last <bitsLeft> bits with ones, which corresponds to the EOS // encoding
uint8_t val = (1 << bitsLeft) - 1;
offset = buf.Length() - 1;
startByte = reinterpret_cast<unsignedchar*>(buf.BeginWriting()) + offset;
*startByte = *startByte | val;
}
// Now we know how long our encoded string is, we can fill in our length
uint32_t bufLength = buf.Length();
offset = mOutput->Length();
EncodeInteger(7, bufLength);
startByte = reinterpret_cast<unsignedchar*>(mOutput->BeginWriting()) + offset;
*startByte = *startByte | 0x80;
// Finally, we can add our REAL data!
mOutput->Append(buf);
LOG(
("Http2Compressor::HuffmanAppend %p encoded %d byte original on %d " "bytes.\n", this, length, bufLength));
}
// NWGH - make this index = 1; index <= headerTableSize; ++index for (uint32_t index = 0; index < headerTableSize; ++index) { if (mHeaderTable[index]->mName.Equals(inputPair.mName)) { // NWGH - make this nameReference = index
nameReference = index + 1; if (mHeaderTable[index]->mValue.Equals(inputPair.mValue)) {
match = true;
matchedIndex = index; break;
}
}
}
// We need to emit a new literal if (!match || noLocalIndex || neverIndex) { if (neverIndex) {
DoOutput(kNeverIndexedLiteral, &inputPair, nameReference);
DumpState("Compressor state after literal never index"); return;
}
if (noLocalIndex || (newSize > (mMaxBuffer / 2)) || (mMaxBuffer < 128)) {
DoOutput(kPlainLiteral, &inputPair, nameReference);
DumpState("Compressor state after literal without index"); return;
}
// make sure to makeroom() first so that any implied items // get preserved.
MakeRoom(newSize, "compressor");
DoOutput(kIndexedLiteral, &inputPair, nameReference);
mHeaderTable.AddElement(inputPair.mName, inputPair.mValue);
LOG(("HTTP compressor %p new literal placed at index 0\n", this));
DumpState("Compressor state after literal with index"); return;
}
// emit an index
DoOutput(kIndex, &inputPair, matchedIndex);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.