/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
// earlier versions of CSS had "bad comment" tokens, but in level 3, // unterminated comments are just comments.
[ "/* bad comment",
[{ tokenType: "Comment", text: "/* bad comment", value: " bad comment" }],
],
];
const test = (cssText, tokenTypes) => { const lexer = new InspectorCSSParserWrapper(cssText);
let reconstructed = "";
let lastTokenEnd = 0;
let i = 0;
let token; while ((token = lexer.nextToken())) { const expectedToken = tokenTypes[i]; Assert.deepEqual(
{
tokenType: token.tokenType,
text: token.text,
value: token.value,
number: token.number,
unit: token.unit,
},
{
tokenType: expectedToken.tokenType,
text: expectedToken.text,
value: expectedToken.value ?? null,
number: expectedToken.number ?? null,
unit: expectedToken.unit ?? null,
},
`Got expected token #${i} for"${cssText}"`
);
Assert.greater(token.endOffset, token.startOffset);
equal(token.startOffset, lastTokenEnd);
lastTokenEnd = token.endOffset;
reconstructed += cssText.substring(token.startOffset, token.endOffset);
++i;
} // Ensure that we saw the correct number of tokens.
equal(i, tokenTypes.length); // Ensure that the reported offsets cover all the text.
equal(reconstructed, cssText);
};
for (const [cssText, rustTokenTypes] of LEX_TESTS) {
info(`Test "${cssText}"`);
test(cssText, rustTokenTypes);
}
});
const test = (cssText, locations) => { const lexer = new InspectorCSSParserWrapper(cssText);
let i = 0;
let token; const testLocation = () => { const startLine = lexer.parser.lineNumber; const startColumn = lexer.parser.columnNumber;
// We do this in a bit of a funny way so that we can also test the // location of the EOF.
let combined = ":" + startLine + ":" + startColumn; if (token) {
combined = token.tokenType + combined;
}
equal(combined, locations[i]);
++i;
}; while ((token = lexer.nextToken())) {
testLocation();
} // Collect location after we consumed all the tokens
testLocation(); // Ensure that we saw the correct number of tokens.
equal(i, locations.length);
};
for (const [cssText, rustLocations] of LINECOL_TESTS) {
info(`Test "${cssText}"`);
test(cssText, rustLocations);
}
});
add_task(function test_lexer_eofchar() { const EOFCHAR_TESTS = [
["hello", "hello"],
["hello \\", "hello \\\\"],
["'hello", "'hello'"],
['"hello', '"hello"'],
["'hello\\", "'hello\\\\'"],
['"hello\\', '"hello\\\\"'],
["/*hello", "/*hello*/"],
["/*hello*", "/*hello*/"],
["/*hello\\", "/*hello\\*/"],
["url(hello", "url(hello)"],
["url('hello", "url('hello')"],
['url("hello', 'url("hello")'],
["url(hello\\", "url(hello\\\\)"],
["url('hello\\", "url('hello\\\\')"],
['url("hello\\', 'url("hello\\\\")'], // Ensure that passing a different inputString to performEOFFixup // doesn't cause an assertion trying to strip a backslash from the // end of an empty string.
["'\\", "\\'", ""], // Check single-char quotes
[`"`, `""`],
[`'`, `''`],
];
const test = (cssText, expectedAppend, argText) => { const lexer = new InspectorCSSParserWrapper(cssText, {
trackEOFChars: true,
}); while (lexer.nextToken()) { // We don't need to do anything with the tokens. We only want to consume the iterator // so we can safely call performEOFFixup.
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.