// Copyright 2011 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file.
// A simple interpreter for the Irregexp byte code.
#include"irregexp/imported/regexp-interpreter.h"
#include"irregexp/imported/regexp-bytecodes.h" #include"irregexp/imported/regexp-macro-assembler.h" #include"irregexp/imported/regexp-stack.h"// For kMaximumStackSize. #include"irregexp/imported/regexp.h"
bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
base::Vector<const uint8_t> subject, bool unicode) { // For Latin1 characters the unicode flag makes no difference. for (int i = 0; i < len; i++) { unsignedint old_char = subject[from++]; unsignedint new_char = subject[current++]; if (old_char == new_char) continue; // Convert both characters to lower case.
old_char |= 0x20;
new_char |= 0x20; if (old_char != new_char) returnfalse; // Not letters in the ASCII range and Latin-1 range. if (!(old_char - 'a' <= 'z' - 'a') &&
!(old_char - 224 <= 254 - 224 && old_char != 247)) { returnfalse;
}
} returntrue;
}
#ifdef DEBUG void MaybeTraceInterpreter(const uint8_t* code_base, const uint8_t* pc, int stack_depth, int current_position,
uint32_t current_char, int bytecode_length, constchar* bytecode_name) { if (v8_flags.trace_regexp_bytecodes) { constbool printable = std::isprint(current_char); constchar* format =
printable
? "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = "
: "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = ";
PrintF(format, pc - code_base, stack_depth, current_position, current_char,
printable ? current_char : '.');
// Helpers to access the packed argument. Takes the 32 bits containing the // current bytecode, where the 8 LSB contain the bytecode and the rest contains // a packed 24-bit argument. // TODO(jgruber): Specify signed-ness in bytecode signature declarations, and // police restrictions during bytecode generation.
int32_t LoadPacked24Signed(int32_t bytecode_and_packed_arg) { return bytecode_and_packed_arg >> BYTECODE_SHIFT;
}
uint32_t LoadPacked24Unsigned(int32_t bytecode_and_packed_arg) { returnstatic_cast<uint32_t>(bytecode_and_packed_arg) >> BYTECODE_SHIFT;
}
// A simple abstraction over the backtracking stack used by the interpreter. // // Despite the name 'backtracking' stack, it's actually used as a generic stack // that stores both program counters (= offsets into the bytecode) and generic // integer values. class BacktrackStack { public:
BacktrackStack() = default;
BacktrackStack(const BacktrackStack&) = delete;
BacktrackStack& operator=(const BacktrackStack&) = delete;
V8_WARN_UNUSED_RESULT bool push(int v) {
data_.emplace_back(v); return (static_cast<int>(data_.size()) <= kMaxSize);
} int peek() const {
SBXCHECK(!data_.empty()); return data_.back();
} int pop() { int v = peek();
data_.pop_back(); return v;
}
// The 'sp' is the index of the first empty element in the stack. int sp() const { returnstatic_cast<int>(data_.size()); } void set_sp(uint32_t new_sp) {
DCHECK_LE(new_sp, sp());
data_.resize_no_init(new_sp);
}
private: // Semi-arbitrary. Should be large enough for common cases to remain in the // static stack-allocated backing store, but small enough not to waste space. static constexpr int kStaticCapacity = 64;
using ValueT = int;
base::SmallVector<ValueT, kStaticCapacity> data_;
static constexpr int kMaxSize =
RegExpStack::kMaximumStackSize / sizeof(ValueT);
};
// Registers used during interpreter execution. These consist of output // registers in indices [0, output_register_count[ which will contain matcher // results as a {start,end} index tuple for each capture (where the whole match // counts as implicit capture 0); and internal registers in indices // [output_register_count, total_register_count[. class InterpreterRegisters { public: using RegisterT = int;
InterpreterRegisters(int total_register_count, RegisterT* output_registers, int output_register_count)
: registers_(total_register_count),
output_registers_(output_registers),
total_register_count_(total_register_count),
output_register_count_(output_register_count) { // TODO(jgruber): Use int32_t consistently for registers. Currently, CSA // uses int32_t while runtime uses int.
static_assert(sizeof(int) == sizeof(int32_t));
SBXCHECK_GE(output_register_count, 2); // At least 2 for the match itself.
SBXCHECK_GE(total_register_count, output_register_count);
SBXCHECK_LE(total_register_count, RegExpMacroAssembler::kMaxRegisterCount);
DCHECK_NOT_NULL(output_registers);
// Initialize the output register region to -1 signifying 'no match'.
std::memset(registers_.data(), -1,
output_register_count * sizeof(RegisterT));
USE(total_register_count_);
}
IrregexpInterpreter::Result ThrowStackOverflow(Isolate* isolate,
RegExp::CallOrigin call_origin) {
CHECK(call_origin == RegExp::CallOrigin::kFromRuntime); // We abort interpreter execution after the stack overflow is thrown, and thus // allow allocation here despite the outer DisallowGarbageCollectionScope.
AllowGarbageCollection yes_gc;
isolate->StackOverflow(); return IrregexpInterpreter::EXCEPTION;
}
// Only throws if called from the runtime, otherwise just returns the EXCEPTION // status code.
IrregexpInterpreter::Result MaybeThrowStackOverflow(
Isolate* isolate, RegExp::CallOrigin call_origin) { if (call_origin == RegExp::CallOrigin::kFromRuntime) { return ThrowStackOverflow(isolate, call_origin);
} else { return IrregexpInterpreter::EXCEPTION;
}
}
if (call_origin == RegExp::CallOrigin::kFromJs) { // Direct calls from JavaScript can be interrupted in two ways: // 1. A real stack overflow, in which case we let the caller throw the // exception. // 2. The stack guard was used to interrupt execution for another purpose, // forcing the call through the runtime system. if (js_has_overflowed) { return IrregexpInterpreter::EXCEPTION;
} elseif (check.InterruptRequested()) { return IrregexpInterpreter::RETRY;
}
} else {
DCHECK(call_origin == RegExp::CallOrigin::kFromRuntime); // Prepare for possible GC.
HandleScope handles(isolate);
Handle<TrustedByteArray> code_handle(*code_array_out, isolate);
Handle<String> subject_handle(*subject_string_out, isolate);
if (js_has_overflowed) { return ThrowStackOverflow(isolate, call_origin);
} elseif (check.InterruptRequested()) { constbool was_one_byte =
(*subject_string_out)->IsOneByteRepresentation();
Tagged<Object> result;
{
AllowGarbageCollection yes_gc;
result = isolate->stack_guard()->HandleInterrupts();
} if (IsException(result, isolate)) { return IrregexpInterpreter::EXCEPTION;
}
// If we changed between a LATIN1 and a UC16 string, we need to // restart regexp matching with the appropriate template instantiation of // RawMatch. if (subject_handle->IsOneByteRepresentation() != was_one_byte) { return IrregexpInterpreter::RETRY;
}
// If computed gotos are supported by the compiler, we can get addresses to // labels directly in C/C++. Every bytecode handler has its own label and we // store the addresses in a dispatch table indexed by bytecode. To execute the // next handler we simply jump (goto) directly to its address. #if V8_USE_COMPUTED_GOTO #define BC_LABEL(name) BC_##name: #define DECODE() \ do { \
next_insn = Load32Aligned(next_pc); \
next_handler_addr = dispatch_table[next_insn & BYTECODE_MASK]; \
} while (false) #define DISPATCH() \
pc = next_pc; \
insn = next_insn; \ goto* next_handler_addr // Without computed goto support, we fall back to a simple switch-based // dispatch (A large switch statement inside a loop with a case for every // bytecode). #else// V8_USE_COMPUTED_GOTO #define BC_LABEL(name) case BC_##name: #define DECODE() next_insn = Load32Aligned(next_pc) #define DISPATCH() \
pc = next_pc; \
insn = next_insn; \ goto switch_dispatch_continuation #endif// V8_USE_COMPUTED_GOTO
// ADVANCE/SET_PC_FROM_OFFSET are separated from DISPATCH, because ideally some // instructions can be executed between ADVANCE/SET_PC_FROM_OFFSET and DISPATCH. // We want those two macros as far apart as possible, because the goto in // DISPATCH is dependent on a memory load in ADVANCE/SET_PC_FROM_OFFSET. If we // don't hit the cache and have to fetch the next handler address from physical // memory, instructions between ADVANCE/SET_PC_FROM_OFFSET and DISPATCH can // potentially be executed unconditionally, reducing memory stall. #define ADVANCE(name) \
next_pc = pc + RegExpBytecodeLength(BC_##name); \
DECODE() #define SET_PC_FROM_OFFSET(offset) \
next_pc = code_base + offset; \
DECODE()
// Current position mutations. #define SET_CURRENT_POSITION(value) \ do { \
current = (value); \
DCHECK(base::IsInRange(current, 0, subject.length())); \
} while (false) #define ADVANCE_CURRENT_POSITION(by) SET_CURRENT_POSITION(current + (by))
template <typenameChar>
IrregexpInterpreter::Result RawMatch(
Isolate* isolate, Tagged<TrustedByteArray>* code_array,
Tagged<String>* subject_string, base::Vector<constChar> subject, int* output_registers, int output_register_count, int total_register_count, int current, uint32_t current_char, RegExp::CallOrigin call_origin, const uint32_t backtrack_limit) {
DisallowGarbageCollection no_gc;
#if V8_USE_COMPUTED_GOTO
// We have to make sure that no OOB access to the dispatch table is possible and // all values are valid label addresses. // Otherwise jumps to arbitrary addresses could potentially happen. // This is ensured as follows: // Every index to the dispatch table gets masked using BYTECODE_MASK in // DECODE(). This way we can only get values between 0 (only the least // significant byte of an integer is used) and kRegExpPaddedBytecodeCount - 1 // (BYTECODE_MASK is defined to be exactly this value). // All entries from kRegExpBytecodeCount to kRegExpPaddedBytecodeCount have to // be filled with BREAKs (invalid operation).
// Fill dispatch table from last defined bytecode up to the next power of two // with BREAK (invalid operation). // TODO(pthier): Find a way to fill up automatically (at compile time) // 59 real bytecodes -> 5 fillers #define BYTECODE_FILLER_ITERATOR(V) \
V(BREAK) /* 1 */ \
V(BREAK) /* 2 */ \
V(BREAK) /* 3 */ \
V(BREAK) /* 4 */ \
V(BREAK) /* 5 */
// Make sure kRegExpPaddedBytecodeCount is actually the closest possible power // of two.
DCHECK_EQ(kRegExpPaddedBytecodeCount,
base::bits::RoundUpToPowerOfTwo32(kRegExpBytecodeCount));
// Make sure every bytecode we get by using BYTECODE_MASK is well defined.
static_assert(kRegExpBytecodeCount <= kRegExpPaddedBytecodeCount);
static_assert(kRegExpBytecodeCount + kRegExpBytecodeFillerCount ==
kRegExpPaddedBytecodeCount);
// MatchInternal only supports returning a single match per call. In global // mode, i.e. when output_registers has space for more than one match, we // need to keep running until all matches are filled in. int registers_per_match =
JSRegExp::RegistersForCaptureCount(regexp_data->capture_count());
DCHECK_LE(registers_per_match, output_register_count); int number_of_matches_in_output_registers =
output_register_count / registers_per_match;
int backtrack_limit = regexp_data->backtrack_limit();
int num_matches = 0; int* current_output_registers = output_registers; for (int i = 0; i < number_of_matches_in_output_registers; i++) { auto current_result = MatchInternal(
isolate, &code_array, &subject_string, current_output_registers,
registers_per_match, total_register_count, start_position, call_origin,
backtrack_limit);
IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
Isolate* isolate, Tagged<TrustedByteArray>* code_array,
Tagged<String>* subject_string, int* output_registers, int output_register_count, int total_register_count, int start_position,
RegExp::CallOrigin call_origin, uint32_t backtrack_limit) {
DCHECK((*subject_string)->IsFlat());
// Note: Heap allocation *is* allowed in two situations if calling from // Runtime: // 1. When creating & throwing a stack overflow exception. The interpreter // aborts afterwards, and thus possible-moved objects are never used. // 2. When handling interrupts. We manually relocate unhandlified references // after interrupts have run.
DisallowGarbageCollection no_gc;
base::uc16 previous_char = '\n';
String::FlatContent subject_content =
(*subject_string)->GetFlatContent(no_gc); // Because interrupts can result in GC and string content relocation, the // checksum verification in FlatContent may fail even though this code is // safe. See (2) above.
subject_content.UnsafeDisableChecksumVerification(); if (subject_content.IsOneByte()) {
base::Vector<const uint8_t> subject_vector =
subject_content.ToOneByteVector(); if (start_position != 0) previous_char = subject_vector[start_position - 1]; return RawMatch(isolate, code_array, subject_string, subject_vector,
output_registers, output_register_count,
total_register_count, start_position, previous_char,
call_origin, backtrack_limit);
} else {
DCHECK(subject_content.IsTwoByte());
base::Vector<const base::uc16> subject_vector =
subject_content.ToUC16Vector(); if (start_position != 0) previous_char = subject_vector[start_position - 1]; return RawMatch(isolate, code_array, subject_string, subject_vector,
output_registers, output_register_count,
total_register_count, start_position, previous_char,
call_origin, backtrack_limit);
}
}
#ifndef COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER
// This method is called through an external reference from RegExpExecInternal // builtin. int IrregexpInterpreter::MatchForCallFromJs(
Address subject, int32_t start_position, Address, Address, int* output_registers, int32_t output_register_count,
RegExp::CallOrigin call_origin, Isolate* isolate, Address regexp_data) {
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
DCHECK(call_origin == RegExp::CallOrigin::kFromJs);
if (regexp_data_obj->MarkedForTierUp()) { // Returning RETRY will re-enter through runtime, where actual recompilation // for tier-up takes place. return IrregexpInterpreter::RETRY;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.